diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock
index e7d53e6..ee0f843 100755
--- a/.speakeasy/gen.lock
+++ b/.speakeasy/gen.lock
@@ -1,22 +1,22 @@
lockVersion: 2.0.0
id: 7ebb3741-2ac6-4c7d-96db-cce06c3d2bb5
management:
- docChecksum: 2789f397868dd60c695104a366325f6b
+ docChecksum: 75addedbe555f7fcf2674ec734755162
docVersion: 2.6.13
speakeasyVersion: internal
- generationVersion: 2.230.3
- releaseVersion: 0.2.0
- configChecksum: 08e534281040ac627259dfda2e6cf28e
+ generationVersion: 2.237.3
+ releaseVersion: 0.0.2
+ configChecksum: efc51e2fcd324e5ebee2b33bd25a9d58
features:
terraform:
constsAndDefaults: 0.1.2
- core: 3.8.1
+ core: 3.8.4
deprecations: 2.81.1
- globalSecurity: 2.81.2
+ globalSecurity: 2.81.3
globalServerURLs: 2.82.1
groups: 2.81.2
inputOutputModels: 2.83.0
- methodSecurity: 2.82.0
+ methodSecurity: 2.82.1
nameOverrides: 2.81.1
typeOverrides: 2.81.1
unions: 2.81.7
diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml
index a483919..93f229d 100755
--- a/.speakeasy/gen.yaml
+++ b/.speakeasy/gen.yaml
@@ -8,7 +8,7 @@ generation:
fixes:
nameResolutionDec2023: true
terraform:
- version: 0.2.0
+ version: 0.0.2
author: etleap
imports:
option: openapi
diff --git a/README.md b/README.md
index 04cd58a..8a5d35a 100644
--- a/README.md
+++ b/README.md
@@ -59,7 +59,7 @@ terraform {
required_providers {
etleap = {
source = "etleap/etleap"
- version = "0.2.0"
+ version = "0.0.2"
}
}
}
diff --git a/docs/data-sources/dbt_schedule.md b/docs/data-sources/dbt_schedule.md
new file mode 100644
index 0000000..8f8b5e0
--- /dev/null
+++ b/docs/data-sources/dbt_schedule.md
@@ -0,0 +1,53 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "etleap_dbt_schedule Data Source - terraform-provider-etleap"
+subcategory: ""
+description: |-
+ DbtSchedule DataSource
+---
+
+# etleap_dbt_schedule (Data Source)
+
+DbtSchedule DataSource
+
+## Example Usage
+
+```terraform
+data "etleap_dbt_schedule" "my_dbtschedule" {
+ id = "cc65bc92-8154-4552-a30c-048b8c2b5ed5"
+}
+```
+
+
+## Schema
+
+### Required
+
+- `id` (String) The id of the dbt schedule
+
+### Read-Only
+
+- `connection_id` (String) The [connection](https://docs.etleap.com/docs/api-v2/edbec13814bbc-connection) where the dbt build runs. The only supported connections are Redshift, Snowflake or Databricks Delta Lake destinations.
+- `create_date` (String)
+- `cron` (String) The cron expression that defines triggers for this schedule. The maximum supported cron schedule precision is 1 minute.
+- `current_activity` (String) must be one of ["LOADING", "BUILDING"]
+- `last_dbt_build_date` (String) The last time that a successful dbt build started.
+- `last_dbt_run_time` (Number) The duration of the last successful dbt build.
+- `name` (String) The name of the dbt schedule.
+- `owner` (Attributes) (see [below for nested schema](#nestedatt--owner))
+- `paused` (Boolean) `true` if the schedule is paused.
+- `selector` (String) The selector this schedule runs.
+- `skip_build_if_no_new_data` (Boolean) Whether the dbt build is skipped if no new data has been ingested for any of the pipelines in the table above.
+- `target_schema` (String) The target schema for the dbt build. See [here](https://docs.getdbt.com/docs/build/custom-schemas) for details on how it's used.
+
+
+### Nested Schema for `owner`
+
+Read-Only:
+
+- `email_address` (String)
+- `first_name` (String)
+- `id` (String)
+- `last_name` (String)
+
+
diff --git a/docs/data-sources/model.md b/docs/data-sources/model.md
new file mode 100644
index 0000000..df77956
--- /dev/null
+++ b/docs/data-sources/model.md
@@ -0,0 +1,195 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "etleap_model Data Source - terraform-provider-etleap"
+subcategory: ""
+description: |-
+ Model DataSource
+---
+
+# etleap_model (Data Source)
+
+Model DataSource
+
+## Example Usage
+
+```terraform
+data "etleap_model" "my_model" {
+ id = "f0cf07c6-6222-4082-90d5-a28144bbc66b"
+}
+```
+
+
+## Schema
+
+### Read-Only
+
+- `create_date` (String) The date and time when then the model was created.
+- `dependencies` (Attributes List) (see [below for nested schema](#nestedatt--dependencies))
+- `id` (String) The ID of this resource.
+- `last_update_duration` (Number) How long the latest update took to complete, in milliseconds, or the duration of the current update if one is in progress.
+- `last_update_time` (String) The date and time of the latest successful update for this model.
+- `name` (String)
+- `owner` (Attributes) (see [below for nested schema](#nestedatt--owner))
+- `paused` (Boolean)
+- `query_and_triggers` (Attributes) (see [below for nested schema](#nestedatt--query_and_triggers))
+- `shares` (List of String) An array of users' emails that the model is shared with. Once shared, a model cannot be unshared, and future calls to `PATCH` can only add to this list.
+- `update_schedule` (Attributes) How often this model should update. Etleap will periodically update the model table in your warehouse according to this schedule. See [the Model Updates documentation](https://docs.etleap.com/docs/documentation/ZG9jOjI0MzU2NDY3-introduction-to-models#model-updates) for more information. (see [below for nested schema](#nestedatt--update_schedule))
+- `warehouse` (Attributes) (see [below for nested schema](#nestedatt--warehouse))
+
+
+### Nested Schema for `dependencies`
+
+Read-Only:
+
+- `id` (String) The unique identifier of the pipeline or model.
+- `name` (String) The name of the pipeline or model.
+- `type` (String) must be one of ["PIPELINE", "MODEL"]
+
+
+
+### Nested Schema for `owner`
+
+Read-Only:
+
+- `email_address` (String)
+- `first_name` (String)
+- `id` (String)
+- `last_name` (String)
+
+
+
+### Nested Schema for `query_and_triggers`
+
+Read-Only:
+
+- `query` (String) The SQL query used to build this model. To specify dependencies on pipelines or other models, replace the schema and table name of the dependency with the id of the dependency enclosed in `{{` and `}}`. The dependency must load data into the same Etleap connection as the one given in `warehouse.connectionId` for this model.
+
+**For Example**
+Say there is a pipeline with the id `abcd1234` which loads data to the table "schema"."my_table". To create a model in Etleap that has a dependency on this pipeline, the following query:
+
+```sql
+SELECT col1, col2 FROM "schema"."my_table";
+```
+
+becomes:
+```sql
+SELECT col1, col2 FROM {{abcd1234}};
+```
+
+[See the Model documentation](https://docs.etleap.com/docs/documentation/ZG9jOjI0MzU2NDY3-introduction-to-models#model-dependencies) for more information on Model dependencies.
+- `triggers` (List of String) A list of model dependency ids. An update will be automatically triggered in this model if any of the dependencies listed here get new data. Any ids given here must be present as dependencies in the `query`.
+
+
+
+### Nested Schema for `update_schedule`
+
+Read-Only:
+
+- `daily` (Attributes) (see [below for nested schema](#nestedatt--update_schedule--daily))
+- `hourly` (Attributes) (see [below for nested schema](#nestedatt--update_schedule--hourly))
+- `monthly` (Attributes) (see [below for nested schema](#nestedatt--update_schedule--monthly))
+- `never` (Attributes) (see [below for nested schema](#nestedatt--update_schedule--never))
+- `weekly` (Attributes) (see [below for nested schema](#nestedatt--update_schedule--weekly))
+
+
+### Nested Schema for `update_schedule.daily`
+
+Read-Only:
+
+- `hour_of_day` (Number) Hour of day this schedule should trigger at (in UTC).
+- `mode` (String) must be one of ["DAILY"]
+
+
+
+### Nested Schema for `update_schedule.hourly`
+
+Read-Only:
+
+- `mode` (String) must be one of ["HOURLY"]
+
+
+
+### Nested Schema for `update_schedule.monthly`
+
+Read-Only:
+
+- `day_of_month` (Number) Day of the month this schedule should trigger at (in UTC).
+- `hour_of_day` (Number) Hour of day this schedule should trigger at (in UTC).
+- `mode` (String) must be one of ["MONTHLY"]
+
+
+
+### Nested Schema for `update_schedule.never`
+
+Read-Only:
+
+- `mode` (String) must be one of ["NEVER"]
+
+
+
+### Nested Schema for `update_schedule.weekly`
+
+Read-Only:
+
+- `day_of_week` (Number) The day of the week this schedule should trigger at (in UTC).
+- `hour_of_day` (Number) Hour of day this schedule should trigger at (in UTC).
+- `mode` (String) must be one of ["WEEKLY"]
+
+
+
+
+### Nested Schema for `warehouse`
+
+Read-Only:
+
+- `redshift` (Attributes) (see [below for nested schema](#nestedatt--warehouse--redshift))
+- `snowflake` (Attributes) (see [below for nested schema](#nestedatt--warehouse--snowflake))
+
+
+### Nested Schema for `warehouse.redshift`
+
+Read-Only:
+
+- `connection_id` (String)
+- `distribution_style` (Attributes) (see [below for nested schema](#nestedatt--warehouse--redshift--distribution_style))
+- `materialized_view` (Boolean)
+- `pending_renamed_table` (String) Only set when a table rename was triggered but is not complete yet.
+- `schema` (String)
+- `sort_columns` (List of String) The sort columns to use.
+- `table` (String)
+- `type` (String) must be one of ["REDSHIFT"]
+- `wait_for_update_preparation` (Boolean)
+
+
+### Nested Schema for `warehouse.redshift.distribution_style`
+
+Read-Only:
+
+- `distribution_style_key` (Attributes) (see [below for nested schema](#nestedatt--warehouse--redshift--distribution_style--distribution_style_key))
+- `one` (String) must be one of ["ALL", "AUTO", "EVEN"]
+
+
+### Nested Schema for `warehouse.redshift.distribution_style.one`
+
+Read-Only:
+
+- `column` (String)
+- `type` (String) must be one of ["KEY"]
+
+
+
+
+
+### Nested Schema for `warehouse.snowflake`
+
+Read-Only:
+
+- `connection_id` (String)
+- `materialized_view` (Boolean)
+- `pending_renamed_table` (String) Only set when a table rename was triggered but is not complete yet.
+- `schema` (String)
+- `table` (String)
+- `type` (String) must be one of ["SNOWFLAKE"]
+- `wait_for_update_preparation` (Boolean)
+
+
diff --git a/docs/data-sources/pipeline.md b/docs/data-sources/pipeline.md
new file mode 100644
index 0000000..6d1a082
--- /dev/null
+++ b/docs/data-sources/pipeline.md
@@ -0,0 +1,1513 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "etleap_pipeline Data Source - terraform-provider-etleap"
+subcategory: ""
+description: |-
+ Pipeline DataSource
+---
+
+# etleap_pipeline (Data Source)
+
+Pipeline DataSource
+
+## Example Usage
+
+```terraform
+data "etleap_pipeline" "my_pipeline" {
+ id = "6910ca83-a490-498c-962b-97419b360edc"
+}
+```
+
+
+## Schema
+
+### Read-Only
+
+- `create_date` (String) The date and time when then the pipeline was created.
+- `destinations` (Attributes List) A pipeline may have multiple destinations if it is in the process of being migrated from one to another. (see [below for nested schema](#nestedatt--destinations))
+- `id` (String) The ID of this resource.
+- `last_refresh_finish_date` (String) The date and time when the last refresh finished. `null` if the pipeline was never refreshed.
+- `last_refresh_start_date` (String) The date and time when the last refresh was started. `null` if the pipeline was never refreshed.
+- `latency` (Number) The end-to-end latency in seconds for this pipeline. Not `null` if the pipeline is running (not paused or stopped) and if the initial backfill has finished. See the documentation for more details.
+- `latest_script_version` (Number) Valid script versions are whole numbers and range from 1 to this number.
+- `name` (String)
+- `owner` (Attributes) (see [below for nested schema](#nestedatt--owner))
+- `parsing_error_settings` (Attributes) (see [below for nested schema](#nestedatt--parsing_error_settings))
+- `paused` (Boolean) If the pipeline is paused. Defaults to `false`.
+- `pipeline_mode` (String) The pipeline mode refers to how the pipeline fetches data changes from the source and how those changes are applied to the destination table. See the documentation for more details. must be one of ["UPDATE", "APPEND", "REPLACE", "QUERY"]
+- `refresh_schedule` (Attributes) A pipeline refresh processes all data in your source from the beginning to re-establish consistency with your destination. The pipeline refresh schedule defines when Etleap should automatically refresh the pipeline. See Updates & Refreshes for more information. (see [below for nested schema](#nestedatt--refresh_schedule))
+- `shares` (List of String) A list of users' emails this pipeline is shared with.
+
+A pipeline cannot be unshared, and future calls to `PATCH` can only add to this list.
+- `source` (Attributes) (see [below for nested schema](#nestedatt--source))
+- `stop_reason` (String) Describes the reason a pipeline has stopped. `null` if the pipeline is currently running. must be one of ["PAUSED", "PARSING_ERRORS", "SCHEMA_CHANGES", "REDSHIFT_RESIZE", "REDSHIFT_MAINTENANCE", "SOURCE_CONNECTION_DOWN", "DESTINATION_CONNECTION_DOWN", "PERMANENTLY_STOPPED", "SOURCE_BROKEN", "QUOTA_REACHED", "SOURCE_INACTIVE", "DESTINATION_INACTIVE", "PIPELINE_MODE_CHANGE"]
+- `update_schedule` (Attributes) The update schedule defines when Etleap should automatically check the source for new data. See Updates & Refreshes for more information. When undefined, the pipeline will default to the schedule set on the source connection. (see [below for nested schema](#nestedatt--update_schedule))
+
+
+### Nested Schema for `destinations`
+
+Read-Only:
+
+- `current_version` (Number) The version of the pipeline that is currently writing to the output table.
+- `destination` (Attributes) (see [below for nested schema](#nestedatt--destinations--destination))
+- `parsing_errors` (Attributes) Parsing errors that occur during the transformation of the pipeline. (see [below for nested schema](#nestedatt--destinations--parsing_errors))
+- `refresh_version` (Number) The version of the pipeline that is currently writing to the temporary refresh table. Only specified if there's currently a refresh in progress.
+- `retention_data` (Attributes) Etleap can remove old rows from your destination. This is a summary of the data retention. (see [below for nested schema](#nestedatt--destinations--retention_data))
+- `schema_change_activity` (Attributes List) Array of schema change objects. (see [below for nested schema](#nestedatt--destinations--schema_change_activity))
+
+
+### Nested Schema for `destinations.destination`
+
+Read-Only:
+
+- `delta_lake` (Attributes) (see [below for nested schema](#nestedatt--destinations--destination--delta_lake))
+- `redshift` (Attributes) (see [below for nested schema](#nestedatt--destinations--destination--redshift))
+- `s3_data_lake` (Attributes) (see [below for nested schema](#nestedatt--destinations--destination--s3_data_lake))
+- `snowflake` (Attributes) (see [below for nested schema](#nestedatt--destinations--destination--snowflake))
+
+
+### Nested Schema for `destinations.destination.delta_lake`
+
+Read-Only:
+
+- `automatic_schema_changes` (Boolean) Whether schema changes detected during transformation should be handled automatically or not. Defaults to `true`.
+- `connection_id` (String) The universally unique identifier of the destination connection.
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the destination.
+- `pre10_dot2_runtime_support` (Boolean) This setting disables column mapping on the tables created by this pipeline.
+
+When enabled, this pipeline will create Delta Lake tables that can be read by Databricks clusters with runtime versions before 10.2.
+
+However, without column mapping, native schema changes are not supported and will cause the table's underlying Parquet files to be rewritten, which can be slow. Schema changes will also not preserve column constraints such as `NOT NULL` on the destination tables.
+Default: false
+- `primary_key` (List of String) The destination column names that constitute the primary key.
If the pipline has a sharded source include a column that specifies the shard identifier.
+- `retain_history` (Boolean) If the destination table should retain the history of the source. More information here: https://support.etleap.com/hc/en-us/articles/360008168574. Defaults to `false`.
+- `schema` (String) The schema in the destination that the tables will be created in.
+- `table` (String)
+- `type` (String)
+> Delta Lake connections are currently in Beta which means that they are subject to non-backwards-compatible and breaking changes.
+must be one of ["DELTA_LAKE"]
+- `wait_for_quality_check` (Boolean) If set to `true`, a `Transformation Complete` event is published once a transformation completes, and the pipeline waits for a `Quality Check Complete` event before loading to the destination. Defaults to `false`.
+
+
+
+### Nested Schema for `destinations.destination.redshift`
+
+Read-Only:
+
+- `automatic_schema_changes` (Boolean) Whether schema changes detected during transformation should be handled automatically or not. Defaults to `true`.
+- `compress_columns` (Boolean) Whether columns should be compressed. Defaults to `true`.
+- `connection_id` (String) The universally unique identifier of the destination connection.
+- `distribution_style` (Attributes) (see [below for nested schema](#nestedatt--destinations--destination--redshift--distribution_style))
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the destination.
+- `primary_key` (List of String) The destination column names that constitute the primary key.
If the pipline has a sharded source include a column that specifies the shard identifier.
+- `retain_history` (Boolean) If the destination table should retain the history of the source. More information here: https://support.etleap.com/hc/en-us/articles/360008168574. Defaults to `false`.
+- `schema` (String) The schema in the destination that the tables will be created in. If this is not specified or set to `null` then the schema specified on the connection is used.
+- `sort_columns` (List of String) The sort columns to use.
+- `table` (String)
+- `truncate_strings` (Boolean) Truncate strings to 64K characters, the max allowed by Redshift in a single column. Defaults to `false`.
+- `type` (String) must be one of ["REDSHIFT"]
+- `wait_for_quality_check` (Boolean) If set to `true`, a `Transformation Complete` event is published once a transformation completes, and the pipeline waits for a `Quality Check Complete` event before loading to the destination. Defaults to `false`.
+
+
+### Nested Schema for `destinations.destination.redshift.wait_for_quality_check`
+
+Read-Only:
+
+- `distribution_style_key` (Attributes) (see [below for nested schema](#nestedatt--destinations--destination--redshift--wait_for_quality_check--distribution_style_key))
+- `one` (String) must be one of ["ALL", "AUTO", "EVEN"]
+
+
+### Nested Schema for `destinations.destination.redshift.wait_for_quality_check.distribution_style_key`
+
+Read-Only:
+
+- `column` (String)
+- `type` (String) must be one of ["KEY"]
+
+
+
+
+
+### Nested Schema for `destinations.destination.s3_data_lake`
+
+Read-Only:
+
+- `automatic_schema_changes` (Boolean) Whether schema changes detected during transformation should be handled automatically or not. Defaults to `true`.
+- `connection_id` (String) The universally unique identifier of the destination connection.
+- `generate_snapshots` (Boolean) Defaults to 'false'.
+- `output_format` (String) Format for output files. Defaults to `PARQUET`. For Glue-enabled destinations, only `PARQUET` is a valid format. must be one of ["PARQUET", "CSV"]; Default: "PARQUET"
+- `path_prefix` (String) The S3 path prefix to use for this pipeline. The data key in the destination bucket starts with `{connection.pathPrefix}/{pathPrefix}/v{version.pipeline}/`.
+- `primary_key` (List of String) The destination column names that constitute the primary key.
If the pipline has a sharded source include a column that specifies the shard identifier.
+- `type` (String) must be one of ["S3_DATA_LAKE"]
+- `wait_for_quality_check` (Boolean) If set to `true`, a `Transformation Complete` event is published once a transformation completes, and the pipeline waits for a `Quality Check Complete` event before loading to the destination. Defaults to `false`.
+
+
+
+### Nested Schema for `destinations.destination.snowflake`
+
+Read-Only:
+
+- `automatic_schema_changes` (Boolean) Whether schema changes detected during transformation should be handled automatically or not. Defaults to `true`.
+- `clustering_keys` (List of String) Keys to cluster the table on. If unspecified, the table will use "automatic clustering".
+- `connection_id` (String) The universally unique identifier of the destination connection.
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the destination.
+- `primary_key` (List of String) The destination column names that constitute the primary key.
If the pipline has a sharded source include a column that specifies the shard identifier.
+- `retain_history` (Boolean) If the destination table should retain the history of the source. More information here: https://support.etleap.com/hc/en-us/articles/360008168574. Defaults to `false`.
+- `schema` (String) The schema in the destination that the tables will be created in. If this is not specified or set to `null` then the schema specified on the connection is used.
+- `table` (String)
+- `type` (String) must be one of ["SNOWFLAKE"]
+- `wait_for_quality_check` (Boolean) If set to `true`, a `Transformation Complete` event is published once a transformation completes, and the pipeline waits for a `Quality Check Complete` event before loading to the destination. Defaults to `false`.
+
+
+
+
+### Nested Schema for `destinations.parsing_errors`
+
+Read-Only:
+
+- `operation_errors_by_operation` (Attributes List) (see [below for nested schema](#nestedatt--destinations--parsing_errors--operation_errors_by_operation))
+- `parsing_errors_per_day` (Attributes List) (see [below for nested schema](#nestedatt--destinations--parsing_errors--parsing_errors_per_day))
+- `type_errors_by_column` (Attributes List) (see [below for nested schema](#nestedatt--destinations--parsing_errors--type_errors_by_column))
+
+
+### Nested Schema for `destinations.parsing_errors.operation_errors_by_operation`
+
+Read-Only:
+
+- `operation_description` (String)
+- `operation_index` (Number) Index of step in the script of this pipeline that caused this error.
+- `row_count` (Number)
+
+
+
+### Nested Schema for `destinations.parsing_errors.parsing_errors_per_day`
+
+Read-Only:
+
+- `day` (String) Format of the timestamp: 'yyyy-MM-dd'
+- `error_type` (String) must be one of ["TYPE", "OPERATION"]
+- `row_count` (Number)
+
+
+
+### Nested Schema for `destinations.parsing_errors.type_errors_by_column`
+
+Read-Only:
+
+- `column_name` (String)
+- `row_count` (Number)
+- `type` (String)
+
+
+
+
+### Nested Schema for `destinations.retention_data`
+
+Read-Only:
+
+- `retention_by_day` (Attributes) (see [below for nested schema](#nestedatt--destinations--retention_data--retention_by_day))
+- `retention_policy` (Attributes) Policy for the automatic deletion of rows in the destination. (see [below for nested schema](#nestedatt--destinations--retention_data--retention_policy))
+
+
+### Nested Schema for `destinations.retention_data.retention_by_day`
+
+Read-Only:
+
+- `rows_currently_in_warehouse` (Attributes List) (see [below for nested schema](#nestedatt--destinations--retention_data--retention_by_day--rows_currently_in_warehouse))
+- `rows_removed_from_warehouse` (Attributes List) (see [below for nested schema](#nestedatt--destinations--retention_data--retention_by_day--rows_removed_from_warehouse))
+
+
+### Nested Schema for `destinations.retention_data.retention_by_day.rows_removed_from_warehouse`
+
+Read-Only:
+
+- `date` (String) Format of the timestamp: 'yyyy-MM-dd'
+- `row_count` (Number)
+
+
+
+### Nested Schema for `destinations.retention_data.retention_by_day.rows_removed_from_warehouse`
+
+Read-Only:
+
+- `date` (String) Format of the timestamp: 'yyyy-MM-dd'
+- `row_count` (Number)
+
+
+
+
+### Nested Schema for `destinations.retention_data.retention_policy`
+
+Read-Only:
+
+- `column` (String) Name of the column that is used to calculate the interval. Must be a `date` or a `datetime` column.
+- `period` (Number) Number of days before a row gets removed.
+
+
+
+
+### Nested Schema for `destinations.schema_change_activity`
+
+Read-Only:
+
+- `date_time` (String) The date and time of the schema change. `null` if schema change has not yet been applied.
+- `schema_change_description` (String)
+
+
+
+
+### Nested Schema for `owner`
+
+Read-Only:
+
+- `email_address` (String)
+- `first_name` (String)
+- `id` (String)
+- `last_name` (String)
+
+
+
+### Nested Schema for `parsing_error_settings`
+
+Read-Only:
+
+- `action` (String) Whether Etleap should STOP the pipeline or NOTIFY once the `threshold` is reached. must be one of ["STOP", "NOTIFY"]
+- `threshold` (Number) The parsing error threshold, in percentage points, for the `action` to be triggered.
+
+
+
+### Nested Schema for `refresh_schedule`
+
+Read-Only:
+
+- `daily` (Attributes) (see [below for nested schema](#nestedatt--refresh_schedule--daily))
+- `hourly` (Attributes) (see [below for nested schema](#nestedatt--refresh_schedule--hourly))
+- `monthly` (Attributes) (see [below for nested schema](#nestedatt--refresh_schedule--monthly))
+- `never` (Attributes) (see [below for nested schema](#nestedatt--refresh_schedule--never))
+- `weekly` (Attributes) (see [below for nested schema](#nestedatt--refresh_schedule--weekly))
+
+
+### Nested Schema for `refresh_schedule.daily`
+
+Read-Only:
+
+- `hour_of_day` (Number) Hour of day this schedule should trigger at (in UTC).
+- `mode` (String) must be one of ["DAILY"]
+
+
+
+### Nested Schema for `refresh_schedule.hourly`
+
+Read-Only:
+
+- `mode` (String) must be one of ["HOURLY"]
+
+
+
+### Nested Schema for `refresh_schedule.monthly`
+
+Read-Only:
+
+- `day_of_month` (Number) Day of the month this schedule should trigger at (in UTC).
+- `hour_of_day` (Number) Hour of day this schedule should trigger at (in UTC).
+- `mode` (String) must be one of ["MONTHLY"]
+
+
+
+### Nested Schema for `refresh_schedule.never`
+
+Read-Only:
+
+- `mode` (String) must be one of ["NEVER"]
+
+
+
+### Nested Schema for `refresh_schedule.weekly`
+
+Read-Only:
+
+- `day_of_week` (Number) The day of the week this schedule should trigger at (in UTC).
+- `hour_of_day` (Number) Hour of day this schedule should trigger at (in UTC).
+- `mode` (String) must be one of ["WEEKLY"]
+
+
+
+
+### Nested Schema for `source`
+
+Read-Only:
+
+- `active_campaign` (Attributes) (see [below for nested schema](#nestedatt--source--active_campaign))
+- `bigquery` (Attributes) (see [below for nested schema](#nestedatt--source--bigquery))
+- `bing_ads` (Attributes) (see [below for nested schema](#nestedatt--source--bing_ads))
+- `blackline` (Attributes) (see [below for nested schema](#nestedatt--source--blackline))
+- `criteo` (Attributes) (see [below for nested schema](#nestedatt--source--criteo))
+- `db2` (Attributes) (see [below for nested schema](#nestedatt--source--db2))
+- `db2_sharded` (Attributes) (see [below for nested schema](#nestedatt--source--db2_sharded))
+- `delta_lake` (Attributes) (see [below for nested schema](#nestedatt--source--delta_lake))
+- `elasticsearch` (Attributes) (see [below for nested schema](#nestedatt--source--elasticsearch))
+- `elluminate` (Attributes) (see [below for nested schema](#nestedatt--source--elluminate))
+- `eloqua` (Attributes) (see [below for nested schema](#nestedatt--source--eloqua))
+- `facebook_ads` (Attributes) (see [below for nested schema](#nestedatt--source--facebook_ads))
+- `fifteen_five` (Attributes) (see [below for nested schema](#nestedatt--source--fifteen_five))
+- `freshworks` (Attributes) (see [below for nested schema](#nestedatt--source--freshworks))
+- `ftp` (Attributes) (see [below for nested schema](#nestedatt--source--ftp))
+- `gong` (Attributes) (see [below for nested schema](#nestedatt--source--gong))
+- `google_ads` (Attributes) (see [below for nested schema](#nestedatt--source--google_ads))
+- `google_analytics` (Attributes) (see [below for nested schema](#nestedatt--source--google_analytics))
+- `google_analytics_ga4` (Attributes) (see [below for nested schema](#nestedatt--source--google_analytics_ga4))
+- `google_cloud_storage` (Attributes) (see [below for nested schema](#nestedatt--source--google_cloud_storage))
+- `google_sheets` (Attributes) (see [below for nested schema](#nestedatt--source--google_sheets))
+- `hubspot` (Attributes) (see [below for nested schema](#nestedatt--source--hubspot))
+- `impact_radius` (Attributes) (see [below for nested schema](#nestedatt--source--impact_radius))
+- `intercom` (Attributes) (see [below for nested schema](#nestedatt--source--intercom))
+- `jira` (Attributes) (see [below for nested schema](#nestedatt--source--jira))
+- `jira_align` (Attributes) (see [below for nested schema](#nestedatt--source--jira_align))
+- `kafka` (Attributes) (see [below for nested schema](#nestedatt--source--kafka))
+- `kustomer` (Attributes) (see [below for nested schema](#nestedatt--source--kustomer))
+- `ldap` (Attributes) (see [below for nested schema](#nestedatt--source--ldap))
+- `ldap_virtual_list_view` (Attributes) (see [below for nested schema](#nestedatt--source--ldap_virtual_list_view))
+- `linked_in_ads` (Attributes) (see [below for nested schema](#nestedatt--source--linked_in_ads))
+- `marketo` (Attributes) (see [below for nested schema](#nestedatt--source--marketo))
+- `mixpanel` (Attributes) (see [below for nested schema](#nestedatt--source--mixpanel))
+- `mongodb` (Attributes) (see [below for nested schema](#nestedatt--source--mongodb))
+- `mysql` (Attributes) (see [below for nested schema](#nestedatt--source--mysql))
+- `mysql_sharded` (Attributes) (see [below for nested schema](#nestedatt--source--mysql_sharded))
+- `netsuite` (Attributes) (see [below for nested schema](#nestedatt--source--netsuite))
+- `netsuite_v2` (Attributes) (see [below for nested schema](#nestedatt--source--netsuite_v2))
+- `oracle` (Attributes) (see [below for nested schema](#nestedatt--source--oracle))
+- `oracle_sharded` (Attributes) (see [below for nested schema](#nestedatt--source--oracle_sharded))
+- `outlook` (Attributes) (see [below for nested schema](#nestedatt--source--outlook))
+- `outreach` (Attributes) (see [below for nested schema](#nestedatt--source--outreach))
+- `pinterest_ads` (Attributes) (see [below for nested schema](#nestedatt--source--pinterest_ads))
+- `postgres` (Attributes) (see [below for nested schema](#nestedatt--source--postgres))
+- `postgres_sharded` (Attributes) (see [below for nested schema](#nestedatt--source--postgres_sharded))
+- `quora_ads` (Attributes) (see [below for nested schema](#nestedatt--source--quora_ads))
+- `rave_medidata` (Attributes) (see [below for nested schema](#nestedatt--source--rave_medidata))
+- `recurly` (Attributes) (see [below for nested schema](#nestedatt--source--recurly))
+- `redshift` (Attributes) (see [below for nested schema](#nestedatt--source--redshift))
+- `redshift_sharded` (Attributes) (see [below for nested schema](#nestedatt--source--redshift_sharded))
+- `s3_input` (Attributes) (see [below for nested schema](#nestedatt--source--s3_input))
+- `s3_legacy` (Attributes) (see [below for nested schema](#nestedatt--source--s3_legacy))
+- `salesforce` (Attributes) (see [below for nested schema](#nestedatt--source--salesforce))
+- `salesforce_marketing_cloud` (Attributes) (see [below for nested schema](#nestedatt--source--salesforce_marketing_cloud))
+- `sap_hana` (Attributes) (see [below for nested schema](#nestedatt--source--sap_hana))
+- `sap_hana_sharded` (Attributes) (see [below for nested schema](#nestedatt--source--sap_hana_sharded))
+- `seismic` (Attributes) (see [below for nested schema](#nestedatt--source--seismic))
+- `sftp` (Attributes) (see [below for nested schema](#nestedatt--source--sftp))
+- `shopify` (Attributes) (see [below for nested schema](#nestedatt--source--shopify))
+- `skyward` (Attributes) (see [below for nested schema](#nestedatt--source--skyward))
+- `snapchat_ads` (Attributes) (see [below for nested schema](#nestedatt--source--snapchat_ads))
+- `snowflake` (Attributes) (see [below for nested schema](#nestedatt--source--snowflake))
+- `snowflake_sharded` (Attributes) (see [below for nested schema](#nestedatt--source--snowflake_sharded))
+- `sql_server` (Attributes) (see [below for nested schema](#nestedatt--source--sql_server))
+- `sql_server_sharded` (Attributes) (see [below for nested schema](#nestedatt--source--sql_server_sharded))
+- `square` (Attributes) (see [below for nested schema](#nestedatt--source--square))
+- `streaming` (Attributes) (see [below for nested schema](#nestedatt--source--streaming))
+- `stripe` (Attributes) (see [below for nested schema](#nestedatt--source--stripe))
+- `sumtotal` (Attributes) (see [below for nested schema](#nestedatt--source--sumtotal))
+- `the_trade_desk` (Attributes) (see [below for nested schema](#nestedatt--source--the_trade_desk))
+- `tik_tok_ads` (Attributes) (see [below for nested schema](#nestedatt--source--tik_tok_ads))
+- `twilio` (Attributes) (see [below for nested schema](#nestedatt--source--twilio))
+- `twitter_ads` (Attributes) (see [below for nested schema](#nestedatt--source--twitter_ads))
+- `user_defined_api` (Attributes) (see [below for nested schema](#nestedatt--source--user_defined_api))
+- `uservoice` (Attributes) (see [below for nested schema](#nestedatt--source--uservoice))
+- `veeva` (Attributes) (see [below for nested schema](#nestedatt--source--veeva))
+- `verizon_media_dsp` (Attributes) (see [below for nested schema](#nestedatt--source--verizon_media_dsp))
+- `workday_report` (Attributes) (see [below for nested schema](#nestedatt--source--workday_report))
+- `workfront` (Attributes) (see [below for nested schema](#nestedatt--source--workfront))
+- `zendesk` (Attributes) (see [below for nested schema](#nestedatt--source--zendesk))
+- `zoom_phone` (Attributes) (see [below for nested schema](#nestedatt--source--zoom_phone))
+- `zuora` (Attributes) (see [below for nested schema](#nestedatt--source--zuora))
+
+
+### Nested Schema for `source.active_campaign`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The ActiveCampaign resource. Example: Contacts, Custom Fields and Custom Values
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["ACTIVE_CAMPAIGN"]
+
+
+
+### Nested Schema for `source.bigquery`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `dataset` (String) Name of dataset in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used.
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both.
+- `type` (String) must be one of ["BIGQUERY"]
+
+
+
+### Nested Schema for `source.bing_ads`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The report type.
+- `fields` (List of String) The field names.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["BING_ADS"]
+
+
+
+### Nested Schema for `source.blackline`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The Blackline report name. Example: Account Details Extract Template
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["BLACKLINE"]
+
+
+
+### Nested Schema for `source.criteo`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `currency` (List of String) Specify the report `currency` if and only if the entity is 'report_placement', 'report_statistics' or 'report_transactions'. Example values: [USD, EUR]
+- `dimensions` (List of String) Specify the report `dimension` if and only if the entity is 'report_placement' or 'report_statistics'. Example values: [Day, advertiserId, adsetId]
+- `entity` (String) The Criteo resource. Example: ad_set, advertiser, audience, campaign, report_placement, report_statistics, and report_transactions.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `metrics` (List of String) Specify the report `metric` if and only if the entity is 'report_placement' or 'report_statistics'. Example values: [clicks, displays]
+- `timezone` (List of String) Specify the report `timezone` if and only if the entity is 'report_placement' or 'report_transactions'. Example values: [UTC, ETC/GMT-3]
+- `type` (String) must be one of ["CRITEO"]
+
+
+
+### Nested Schema for `source.db2`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array.
+- `schema` (String) Name of the schema in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both.
+- `type` (String) must be one of ["DB2"]
+
+
+
+### Nested Schema for `source.db2_sharded`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array.
+- `schema` (String) Name of the schema in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both.
+- `type` (String) must be one of ["DB2_SHARDED"]
+
+
+
+### Nested Schema for `source.delta_lake`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array.
+- `schema` (String) Name of the schema in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both.
+- `type` (String) must be one of ["DELTA_LAKE"]
+
+
+
+### Nested Schema for `source.elasticsearch`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The index name.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["ELASTICSEARCH"]
+
+
+
+### Nested Schema for `source.elluminate`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `domain_name` (List of String) The Domain that you want to extract from. If no domain is specified Etleap will extract data from all schema's domains.
+- `entity` (String) The Elluminate study name.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `schema_name` (String) The Schema that you want to extract from.
+- `type` (String) must be one of ["ELLUMINATE"]
+
+
+
+### Nested Schema for `source.eloqua`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The Eloqua resource, spelled as it is shown in the Eloqua UI. Each ActivityType is a different entity and is spelled without spaces like EmailClickthrough and EmailSend.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["ELOQUA"]
+
+
+
+### Nested Schema for `source.facebook_ads`
+
+Read-Only:
+
+- `breakdowns` (List of String) The breakdown fields. The first one must be `date_start`. See the [Facebook Documentation on Breakdowns.](https://developers.facebook.com/docs/marketing-api/insights/breakdowns/v16.0#insights-api-breakdowns)
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The aggregation level of the Facebook report. Example values: [Insights by Ad, Insights by Adset, Insights by Campaign, Insights by Account]
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["FACEBOOK_ADS"]
+
+
+
+### Nested Schema for `source.fifteen_five`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The 15Five entity. Example values: [answer, report, department, feature-status, group-type, group, high-five, objective_objective_id_history, objective, attribute_value, attribute, priority, question, security-audit, vacation, user]
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["FIFTEEN_FIVE"]
+
+
+
+### Nested Schema for `source.freshworks`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String)
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["FRESHWORKS"]
+
+
+
+### Nested Schema for `source.ftp`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `file_name_filter` (String) Regular expression matching the names of the files to be processed by this pipeline. `fileNameFilter` or `paths` must be specified.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `low_watermark` (String) Timestamp of the earliest modified file that should be processed by the pipeline. Only the files modified after this timestamp will be processed. Format of the timestamp: 'yyyy-MM-dd'.
+- `new_file_behavior` (String) Specifies whether new files update, add to or replace existing files. See the documentation for more details. must be one of ["UPDATE", "APPEND", "REPLACE"]
+- `paths` (List of String) File or folder paths for the files to be extracted from the source. In the case when `fileNameFilter` is specified exactly one folder path must be given here.
+- `type` (String) must be one of ["FTP"]
+
+
+
+### Nested Schema for `source.gong`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The Gong resource. Example values: [Answered Scorecards, Call Transcripts, Calls, Calls Extensive, Folders, Interactions, Scorecards, Users, Users Activity, Users Extensive, Workspaces]
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["GONG"]
+
+
+
+### Nested Schema for `source.google_ads`
+
+Read-Only:
+
+- `attributed_resources` (List of String) Specify the report `attributed resources`. Example values: [campaign_budget.id, campaign_budget.name, bidding_strategy.type]
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The Google Ads report type, capitalized and spelled with spaces between words.
+- `fields` (List of String) Specify the report `fields`. Example values: [campaign.resource_name, campaign.campaign_budget, campaign.advertising_channel_type]
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `metrics` (List of String) Specify the report `metrics`. Example values: [metrics.clicks, metrics.all_conversions, metrics.average_cost]
+- `segments` (List of String) Specify the report `segmentation` groups. Example values: [segments.date, segments.click_type, segments.geo_target_county]
+- `type` (String) must be one of ["GOOGLE_ADS"]
+
+
+
+### Nested Schema for `source.google_analytics`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `dimensions` (List of String)
+- `entity` (String) The full name of the site in Google Analytics
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `max_accuracy_start_date` (String) Format of the timestamp: 'yyyy-MM-dd'.
+- `metrics` (List of String)
+- `segment` (String)
+- `service` (String) | | |
+| - | - |
+| `REPORTING` | Gives you access to Google Analytics data, including segments. |
+| `MULTI_CHANNEL_FUNNELS ` | Get conversion path data which shows user interactions with various traffic sources. |
+must be one of ["REPORTING", "MULTI_CHANNEL_FUNNELS"]
+- `type` (String) must be one of ["GOOGLE_ANALYTICS"]
+
+
+
+### Nested Schema for `source.google_analytics_ga4`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `dimensions` (List of String) Dimensions are attributes for your data. Example values: [date, browser].
+- `entity` (String) The Google Analytics GA4 resource. Provide the ID of the GA4 resource. You can find out how to retrieve the ID of you resource here.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `metrics` (List of String) Metrics represent quantitative measurements calculated by Google Analytics. Example values: [active1DayUsers, conversions]
+- `type` (String) must be one of ["GOOGLE_ANALYTICS_GA4"]
+
+
+
+### Nested Schema for `source.google_cloud_storage`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `file_name_filter` (String) Regular expression matching the names of the files to be processed by this pipeline. `fileNameFilter` or `paths` must be specified.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `low_watermark` (String) Timestamp of the earliest modified file that should be processed by the pipeline. Only the files modified after this timestamp will be processed. Format of the timestamp: 'yyyy-MM-dd'.
+- `new_file_behavior` (String) Specifies whether new files update, add to or replace existing files. See the documentation for more details. must be one of ["UPDATE", "APPEND", "REPLACE"]
+- `paths` (List of String) File or folder paths for the files to be extracted from the source. In the case when `fileNameFilter` is specified exactly one folder path must be given here.
+- `type` (String) must be one of ["GOOGLE_CLOUD_STORAGE"]
+
+
+
+### Nested Schema for `source.google_sheets`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) Google Sheets entities are in the form `SpreadsheetID/SheetID`. You can find both values by clicking on the sheet (tab) you want and looking at the URL: docs.google.com/spreadsheets/d/`1pRAGMSRpEEG31kbtG2qcpr-HDeDfvafp_v00`/edit#gid=`642381756`
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["GOOGLE_SHEETS"]
+
+
+
+### Nested Schema for `source.hubspot`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The Hubspot entity. Example values: [Campaigns, Contacts, Email Events, Engagements, Deals, Owners, Deal Pipelines, Companies, Marketing Emails, Pages, Landing Pages Analytics]
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["HUBSPOT"]
+
+
+
+### Nested Schema for `source.impact_radius`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The Impact Radius entity, spelled the same way as in the UI.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["IMPACT_RADIUS"]
+
+
+
+### Nested Schema for `source.intercom`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The Intercom entity. Example values: [User, Lead, Contact, Company, Admin, Tag, Segment, Note, Event, Counts, Conversation Counts, Admin Conversation Counts, User Tags Counts, User Segments Counts, Company Tags Counts, Company Segments Counts, Conversation, Conversation Parts, Conversation Tags, Subscription]
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["INTERCOM"]
+
+
+
+### Nested Schema for `source.jira`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The JIRA entity. Example values: [Issues, Issue Links, Issue Types, Changelog, Comments, Worklogs, Fields, Groups, Group Members, Priorities, Projects, Resolutions, Statuses, Status Categories, Users, Multiple Choice Field]
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["JIRA"]
+
+
+
+### Nested Schema for `source.jira_align`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The JIRA Align entity. Spelled without spaces except for the Audit Logs. Example values: [Capabilities, Cities, Customers, Defects, Epics, Epics Audit Logs, Features, Features Audit Logs, Goals, Ideas, Iterations, KeyResults, Milestones, Milestones Audit Logs, Objectives, Objectives Audit Logs, Portfolios, Products, Programs, Regions, ReleaseVehicles, Releases, Snapshots, Stories, Tasks, Teams, Themes, Users, ValueStreams]
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["JIRA_ALIGN"]
+
+
+
+### Nested Schema for `source.kafka`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) You can ingest data from Kafka topics.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["KAFKA"]
+
+
+
+### Nested Schema for `source.kustomer`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The Kustomer entity. Example values: [AUDIT_LOGS, BRANDS, BUSINESS_SCHEDULES, CARDS, COMPANIES, CONVERSATION_ATTACHMENTS, CONVERSATION_EVENTS, CONVERSATION_FORWARDS, CONVERSATION_TIMES, CONVERSATIONS, CUSTOM_ATTRIBUTE_METADATA, CUSTOMER_DRAFTS, CUSTOMER_MERGES, CUSTOMERS, KOBJECTS, KLASSES, MESSAGES, NOTES, NOTIFICATIONS, OUTBOUND_ACCOUNTS, QUEUES, SLAS, SATISFACTIONS, SHORTCUTS, SNOOZES, SPAM_SENDERS, TEAM_ROUTING_SETTINGS, TEAMS, USERS]
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["KUSTOMER"]
+
+
+
+### Nested Schema for `source.ldap`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String)
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["LDAP"]
+
+
+
+### Nested Schema for `source.ldap_virtual_list_view`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String)
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["LDAP_VIRTUAL_LIST_VIEW"]
+
+
+
+### Nested Schema for `source.linked_in_ads`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The LinkedIn resource. Example values: [ACCOUNTS, ACCOUNT_USERS, AD_ANALYTICS, CAMPAIGNS, CAMPAIGN_GROUPS, CONVERSIONS, INSIGHT_TAG_DOMAINS]
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `metrics` (List of String) Specify the report `metrics` if and only if the entity is 'AD_ANALYTICS'. Example values: [dateRange, pivotValues, clicks]
+- `pivots` (List of String) Specify the report `pivots` groups if and only if the entity is 'AD_ANALYTICS'. Example values: [ACCOUNT, CAMPAIGN, COMPANY]
+- `type` (String) must be one of ["LINKED_IN_ADS"]
+
+
+
+### Nested Schema for `source.marketo`
+
+Read-Only:
+
+- `activity_types` (List of String) Specify `activityTypes` if and only if the entity is 'Activities'
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The Marketo entity type. Example values: [Leads, Activities, Campaigns, Programs, Tags]
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["MARKETO"]
+
+
+
+### Nested Schema for `source.mixpanel`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) 'Raw Data' is the only entity available for Mixpanel.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["MIXPANEL"]
+
+
+
+### Nested Schema for `source.mongodb`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both.
+- `type` (String) must be one of ["MONGODB"]
+
+
+
+### Nested Schema for `source.mysql`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `database` (String) Name of the database in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used.
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both.
+- `type` (String) must be one of ["MYSQL"]
+
+
+
+### Nested Schema for `source.mysql_sharded`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `database` (String) Name of the database in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used.
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both.
+- `type` (String) must be one of ["MYSQL_SHARDED"]
+
+
+
+### Nested Schema for `source.netsuite`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The Netsuite entity. Spelled capitalized without spaces unless you have defined a custom entity in Netsuite with a different capitalization.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["NETSUITE"]
+
+
+
+### Nested Schema for `source.netsuite_v2`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The Netsuite entity. Spelled capitalized with spaces.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["NETSUITE_V2"]
+
+
+
+### Nested Schema for `source.oracle`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array.
+- `schema` (String) Name of the schema in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both.
+- `type` (String) must be one of ["ORACLE"]
+
+
+
+### Nested Schema for `source.oracle_sharded`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array.
+- `schema` (String) Name of the schema in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both.
+- `type` (String) must be one of ["ORACLE_SHARDED"]
+
+
+
+### Nested Schema for `source.outlook`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The Outlook entity. Example values: [Messages, Events].
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["OUTLOOK"]
+
+
+
+### Nested Schema for `source.outreach`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String)
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["OUTREACH"]
+
+
+
+### Nested Schema for `source.pinterest_ads`
+
+Read-Only:
+
+- `columns` (List of String) Specify the report `metrics` if and only if the entity is 'reports'. Example values: [SPEND_IN_MICRO_DOLLAR, PAID_IMPRESSION, CPC_IN_MICRO_DOLLAR]
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The Pinterest Ads resource. Example values: [ad_accounts, ad_groups, ads, campaigns and reports]
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `level` (List of String) Specify the report `data level` if and only if the entity is 'reports'. Example values: [ADVERTISER, CAMPAIGN, AD_GROUP]
+- `targeting_types` (List of String) Specify the report `targeting types` if and only if the entity is 'reports'. Example values: [KEYWORD, APPTYPE, LOCATION]
+- `type` (String) must be one of ["PINTEREST_ADS"]
+
+
+
+### Nested Schema for `source.postgres`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array.
+- `schema` (String) Name of the schema in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both.
+- `type` (String) must be one of ["POSTGRES"]
+
+
+
+### Nested Schema for `source.postgres_sharded`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array.
+- `schema` (String) Name of the schema in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both.
+- `type` (String) must be one of ["POSTGRES_SHARDED"]
+
+
+
+### Nested Schema for `source.quora_ads`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The level of aggregation for your Quora Ads data. Example values: [Account, Campaign, Ad Set, Ad]
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["QUORA_ADS"]
+
+
+
+### Nested Schema for `source.rave_medidata`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The Rave Medidata entity. Example values: [dataset, study, @-@]
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["RAVE_MEDIDATA"]
+
+
+
+### Nested Schema for `source.recurly`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The Recurly entity. Example values: [Account, Account Acquisition, Line Item, Coupon, Coupon Redemption, Credit Payment, Invoice, Measured Unit, Plan, Plan Add-On, Subscription, Transaction]
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["RECURLY"]
+
+
+
+### Nested Schema for `source.redshift`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array.
+- `schema` (String) Name of the schema in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both.
+- `type` (String) must be one of ["REDSHIFT"]
+
+
+
+### Nested Schema for `source.redshift_sharded`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array.
+- `schema` (String) Name of the schema in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both.
+- `type` (String) must be one of ["REDSHIFT_SHARDED"]
+
+
+
+### Nested Schema for `source.s3_input`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `file_name_filter` (String) Regular expression matching the names of the files to be processed by this pipeline. `fileNameFilter` or `paths` must be specified.
+- `files_can_change` (Boolean) Etleap can check whether files that were already processed have changed. If the file has changed, then Etleap fetches the new file and removes the old file's data in the destination and adds the changed data.
This can only be enabled when `newFileBehavior` is set to `APPEND`. Defaults to `false`.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `low_watermark` (String) Timestamp of the earliest modified file that should be processed by the pipeline. Only the files modified after this timestamp will be processed. Format of the timestamp: 'yyyy-MM-dd'.
+- `new_file_behavior` (String) Specifies whether new files update, add to or replace existing files. See the documentation for more details. must be one of ["UPDATE", "APPEND", "REPLACE"]
+- `paths` (List of String) File or folder paths for the files to be extracted from the source. In the case when `fileNameFilter` is specified exactly one folder path must be given here.
+- `triggered_by_event` (Boolean) Whether this source should be triggered by a `Batch Added` event (`true`) or Etleap should inspect the source to find new files to process (`false`). Defaults to `false`.
+- `type` (String) must be one of ["S3_INPUT"]
+
+
+
+### Nested Schema for `source.s3_legacy`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `file_name_filter` (String) Regular expression matching the names of the files to be processed by this pipeline. `fileNameFilter` or `paths` must be specified.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `low_watermark` (String) Timestamp of the earliest modified file that should be processed by the pipeline. Only the files modified after this timestamp will be processed. Format of the timestamp: 'yyyy-MM-dd'.
+- `new_file_behavior` (String) Specifies whether new files update, add to or replace existing files. See the documentation for more details. must be one of ["UPDATE", "APPEND", "REPLACE"]
+- `paths` (List of String) File or folder paths for the files to be extracted from the source. In the case when `fileNameFilter` is specified exactly one folder path must be given here.
+- `type` (String) must be one of ["S3_LEGACY"]
+
+
+
+### Nested Schema for `source.salesforce`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The Salesforce table. Spelled capitalized without spaces, unless it is a custom table like `My_Table__c`.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["SALESFORCE"]
+
+
+
+### Nested Schema for `source.salesforce_marketing_cloud`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The Salesforce Marketing Cloud entity. Example Values: [Bounce Event, Campaign, Click Event, Content Area, Data Extension, Data Extension Object, Email, Folders, List Subscriber, Lists, Open Event, Send, Sent Event, Subscribers, Unsub Event]
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["SALESFORCE_MARKETING_CLOUD"]
+
+
+
+### Nested Schema for `source.sap_hana`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array.
+- `schema` (String) Name of the schema in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both.
+- `type` (String) must be one of ["SAP_HANA"]
+
+
+
+### Nested Schema for `source.sap_hana_sharded`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array.
+- `schema` (String) Name of the schema in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both.
+- `type` (String) must be one of ["SAP_HANA_SHARDED"]
+
+
+
+### Nested Schema for `source.seismic`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The Seismic entity. Example values: [Livesend Links, Livesend Link Contents, Livesend Link Members, Livesend Page Views, Users, User Activity]
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["SEISMIC"]
+
+
+
+### Nested Schema for `source.sftp`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `file_name_filter` (String) Regular expression matching the names of the files to be processed by this pipeline. `fileNameFilter` or `paths` must be specified.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `low_watermark` (String) Timestamp of the earliest modified file that should be processed by the pipeline. Only the files modified after this timestamp will be processed. Format of the timestamp: 'yyyy-MM-dd'.
+- `new_file_behavior` (String) Specifies whether new files update, add to or replace existing files. See the documentation for more details. must be one of ["UPDATE", "APPEND", "REPLACE"]
+- `paths` (List of String) File or folder paths for the files to be extracted from the source. In the case when `fileNameFilter` is specified exactly one folder path must be given here.
+- `type` (String) must be one of ["SFTP"]
+
+
+
+### Nested Schema for `source.shopify`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The Shopify entity. Spelled with spaces and only first word capitalized. Nested JSON objects are selected by appending the field name. For example, `Orders fulfillments line items` has the lineItems field from the `Order fulfillments` entity. Start creating a pipeline in the Etleap UI for the full list of entities.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["SHOPIFY"]
+
+
+
+### Nested Schema for `source.skyward`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The Skyward entity. Spelled capitalized with spaces. Example Values: [Academic Sessions, Categories, Classes, Courses, Demographics, Enrollments, Grading Periods, Line Items, Orgs, Results, Schools, Students, Teachers, Terms, Users]
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["SKYWARD"]
+
+
+
+### Nested Schema for `source.snapchat_ads`
+
+Read-Only:
+
+- `additional_metrics` (List of String) Specify the report `additional metrics` if and only if the entity is 'ad_account_report_hourly' or 'ad_account_report_daily'. Example values: [android_installs, attachment_avg_view_time_millis, attachment_frequency]
+- `breakdown` (String) Specify the report `breakdown` if and only if the entity is 'ad_account_report_hourly' or 'ad_account_report_daily'. Example values: [ad, adsquad, campaign]
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The Snapchat Ads entity. Example values: [ad, adaccount, ad_account_report_hourly, ad_account_report_daily]
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `metrics` (List of String) Specify the report `metrics` if and only if the entity is 'ad_account_report_hourly' or 'ad_account_report_daily'. Example values: [impressions, swipes, screen_time_millis]
+- `report_dimension` (List of String) Specify the report `dimension` groups if and only if the entity is 'ad_account_report_hourly' or 'ad_account_report_daily'. Example values: [country, region, gender]
+- `type` (String) must be one of ["SNAPCHAT_ADS"]
+
+
+
+### Nested Schema for `source.snowflake`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array.
+- `schema` (String) Name of the schema in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both.
+- `type` (String) must be one of ["ACTIVE_CAMPAIGN", "BIGQUERY", "BING_ADS", "BLACKLINE", "CRITEO", "DB2", "DB2_SHARDED", "DELTA_LAKE", "ELASTICSEARCH", "ELLUMINATE", "ELOQUA", "FACEBOOK_ADS", "FIFTEEN_FIVE", "FRESHWORKS", "FTP", "GONG", "GOOGLE_ANALYTICS", "GOOGLE_ANALYTICS_GA4", "GOOGLE_CLOUD_STORAGE", "GOOGLE_ADS", "GOOGLE_SHEETS", "HUBSPOT", "INTERCOM", "IMPACT_RADIUS", "JIRA", "JIRA_ALIGN", "KAFKA", "KUSTOMER", "LDAP", "LDAP_VIRTUAL_LIST_VIEW", "LINKED_IN_ADS", "MARKETO", "MIXPANEL", "MONGODB", "MYSQL", "MYSQL_SHARDED", "NETSUITE", "NETSUITE_V2", "ORACLE", "ORACLE_SHARDED", "OUTREACH", "OUTLOOK", "PINTEREST_ADS", "POSTGRES", "POSTGRES_SHARDED", "QUORA_ADS", "RAVE_MEDIDATA", "RECURLY", "REDSHIFT", "REDSHIFT_SHARDED", "S3_LEGACY", "S3_INPUT", "S3_DATA_LAKE", "SALESFORCE_MARKETING_CLOUD", "SAP_HANA", "SAP_HANA_SHARDED", "SEISMIC", "SHOPIFY", "SKYWARD", "SALESFORCE", "SFTP", "SQL_SERVER", "SQL_SERVER_SHARDED", "STREAMING", "SNOWFLAKE", "SNOWFLAKE_SHARDED", "SQUARE", "SNAPCHAT_ADS", "STRIPE", "SUMTOTAL", "THE_TRADE_DESK", "TIK_TOK_ADS", "TWILIO", "TWITTER_ADS", "USER_DEFINED_API", "USERVOICE", "VEEVA", "VERIZON_MEDIA_DSP", "WORKDAY_REPORT", "WORKFRONT", "ZENDESK", "ZOOM_PHONE", "ZUORA"]
+
+
+
+### Nested Schema for `source.snowflake_sharded`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array.
+- `schema` (String) Name of the schema in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both.
+- `type` (String) must be one of ["SNOWFLAKE_SHARDED"]
+
+
+
+### Nested Schema for `source.sql_server`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array.
+- `schema` (String) Name of the schema in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both.
+- `type` (String) must be one of ["SQL_SERVER"]
+
+
+
+### Nested Schema for `source.sql_server_sharded`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array.
+- `schema` (String) Name of the schema in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both.
+- `type` (String) must be one of ["SQL_SERVER_SHARDED"]
+
+
+
+### Nested Schema for `source.square`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The Square entity. Example values: [Catalog, Customers, Loyalty Accounts, Loyalty Events, Loyalty Rewards, Orders, Refunds]
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["SQUARE"]
+
+
+
+### Nested Schema for `source.streaming`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `file_name_filter` (String) Regular expression matching the names of the files to be processed by this pipeline. `fileNameFilter` or `paths` must be specified.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `low_watermark` (String) Timestamp of the earliest modified file that should be processed by the pipeline. Only the files modified after this timestamp will be processed. Format of the timestamp: 'yyyy-MM-dd'.
+- `new_file_behavior` (String) Specifies whether new files update, add to or replace existing files. See the documentation for more details. must be one of ["UPDATE", "APPEND", "REPLACE"]
+- `paths` (List of String) File or folder paths for the files to be extracted from the source. In the case when `fileNameFilter` is specified exactly one folder path must be given here.
+- `type` (String) must be one of ["STREAMING"]
+
+
+
+### Nested Schema for `source.stripe`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The Stripe entity. Example values: [Subscriptions, Invoice, InvoiceItems, Events]
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["STRIPE"]
+
+
+
+### Nested Schema for `source.sumtotal`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The SumTotal entity. Example values: [Activities, Audiences, Competencies, Domains, Grades, Jobs, Organizations, Skills, Social, Topics, User Activities, User Activities Progress, User Courses, Users]
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["SUMTOTAL"]
+
+
+
+### Nested Schema for `source.the_trade_desk`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String)
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["THE_TRADE_DESK"]
+
+
+
+### Nested Schema for `source.tik_tok_ads`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `data_level` (String) Specify the report `data level` if and only if the entity is 'REPORT'. Example values: [AUCTION_AD, AUCTION_CAMPAIGN, RESERVATION_AD]
+- `dimensions` (List of String) Specify the report `dimension` groups if and only if the entity is 'REPORT'. Example values: [start_time_day, start_time_hour, campaign_id]
+- `entity` (String) The TikTok Ads resource. Example values: [AD, ADGROUP, ADVERTISER, CAMPAIGN and REPORT]
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `metrics` (List of String) Specify the report `metrics` if and only if the entity is 'REPORT'. Example values: [ad_name, clicks, conversion]
+- `type` (String) must be one of ["TIK_TOK_ADS"]
+
+
+
+### Nested Schema for `source.twilio`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The Twilio entity. Example values: [Calls, Calls summary, Messages, Usage records]
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["TWILIO"]
+
+
+
+### Nested Schema for `source.twitter_ads`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The Twitter entity. Example values: [Account, Campaign, Funding Instrument, Line Item, Media Creative, Promoted Tweet, Followers, Tweets Likes, Tweets Quotes, Retweets, Recent Mentions,Tweets, Account Report, Campaign Report, Funding Instrument Report, Line Item Report, Media Creative Report, Promoted Tweet Report]
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["TWITTER_ADS"]
+
+
+
+### Nested Schema for `source.user_defined_api`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The User-Defined API entity.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["USER_DEFINED_API"]
+
+
+
+### Nested Schema for `source.uservoice`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The UserVoice entity. Example values: [Category, Comment, Feature, Feature Status, Feedback Record, Forum, Forum Invitation, Internal Status, Label, NPS Rating, Note, Permission, Product Area, Score, Segment, Segmented Values, Status, Status Updates, Suggestion, Suggestion Activity Entry, Supporter, Supporter Message, Team, User, VSTS Work Item]
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["USERVOICE"]
+
+
+
+### Nested Schema for `source.veeva`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The Veeva Entity name. Example values: [APQR, APQR Item, Action, Activity, Admin Link, Admin Section, Admin Section Controller Code, Answer Library Design, Application Context Selector, Application License Model, Application License Model Field, Application Manifest, Application Provisioner, Application Role]
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["VEEVA"]
+
+
+
+### Nested Schema for `source.verizon_media_dsp`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `dimensions` (List of String)
+- `entity` (String)
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `metrics` (List of String)
+- `type` (String) must be one of ["VERIZON_MEDIA_DSP"]
+
+
+
+### Nested Schema for `source.workday_report`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The Workday Report name. Spelled the same as Workday UI but all spaces are replaced with underscores.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["WORKDAY_REPORT"]
+
+
+
+### Nested Schema for `source.workfront`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The Workfront entity. Spelled capitalized without spaces. For the full list, start creating a pipeline in the Etleap UI.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["WORKFRONT"]
+
+
+
+### Nested Schema for `source.zendesk`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The Zendesk entity. Example values: [Group Memberships, Groups, Macros, Organizations, Satisfaction Ratings, SLA Policies, Tags, Ticket Audits, Ticket Comments, Ticket Fields, Ticket Forms, Tickets, Ticket Metrics, Users]
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["ZENDESK"]
+
+
+
+### Nested Schema for `source.zoom_phone`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String)
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["ZOOM_PHONE"]
+
+
+
+### Nested Schema for `source.zuora`
+
+Read-Only:
+
+- `connection_id` (String) The universally unique identifier for the source.
+- `entity` (String) The Zuora entity. Spelled capitalized with spaces. For the full list, start creating a pipeline in the Etleap UI.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`.
+- `type` (String) must be one of ["ZUORA"]
+
+
+
+
+### Nested Schema for `update_schedule`
+
+Read-Only:
+
+- `daily` (Attributes) The update schedule defines when Etleap should automatically check the source for new data. See Updates & Refreshes for more information. (see [below for nested schema](#nestedatt--update_schedule--daily))
+- `hourly` (Attributes) The update schedule defines when Etleap should automatically check the source for new data. See Updates & Refreshes for more information. (see [below for nested schema](#nestedatt--update_schedule--hourly))
+- `interval` (Attributes) Specify how long to wait after each extraction before polling for new data. When undefined, the pipeline will default to the schedule set on the source connection. (see [below for nested schema](#nestedatt--update_schedule--interval))
+- `monthly` (Attributes) The update schedule defines when Etleap should automatically check the source for new data. See Updates & Refreshes for more information. (see [below for nested schema](#nestedatt--update_schedule--monthly))
+- `weekly` (Attributes) The update schedule defines when Etleap should automatically check the source for new data. See Updates & Refreshes for more information. (see [below for nested schema](#nestedatt--update_schedule--weekly))
+
+
+### Nested Schema for `update_schedule.daily`
+
+Read-Only:
+
+- `hour_of_day` (Number) Hour of day the pipeline update should be started at (in UTC).
+- `mode` (String) must be one of ["DAILY"]
+
+
+
+### Nested Schema for `update_schedule.hourly`
+
+Read-Only:
+
+- `mode` (String) must be one of ["HOURLY"]
+
+
+
+### Nested Schema for `update_schedule.interval`
+
+Read-Only:
+
+- `interval_minutes` (Number) Time to wait before new data is pulled (in minutes).
+- `mode` (String) must be one of ["INTERVAL"]
+
+
+
+### Nested Schema for `update_schedule.monthly`
+
+Read-Only:
+
+- `day_of_month` (Number)
+- `hour_of_day` (Number) Hour of day the pipeline update should be started at (in UTC).
+- `mode` (String) must be one of ["MONTHLY"]
+
+
+
+### Nested Schema for `update_schedule.weekly`
+
+Read-Only:
+
+- `day_of_week` (Number)
+- `hour_of_day` (Number) Hour of day the pipeline update should be started at (in UTC).
+- `mode` (String) must be one of ["WEEKLY"]
+
+
diff --git a/docs/data-sources/team.md b/docs/data-sources/team.md
new file mode 100644
index 0000000..57f6779
--- /dev/null
+++ b/docs/data-sources/team.md
@@ -0,0 +1,42 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "etleap_team Data Source - terraform-provider-etleap"
+subcategory: ""
+description: |-
+ Team DataSource
+---
+
+# etleap_team (Data Source)
+
+Team DataSource
+
+## Example Usage
+
+```terraform
+data "etleap_team" "my_team" {
+ id = "1d66b24b-f8cf-4b72-961b-24a4a5fe09a5"
+}
+```
+
+
+## Schema
+
+### Read-Only
+
+- `create_date` (String) The date and time when then the team was created.
+- `description` (String)
+- `id` (String) The ID of this resource.
+- `members` (Attributes List) (see [below for nested schema](#nestedatt--members))
+- `name` (String)
+
+
+### Nested Schema for `members`
+
+Read-Only:
+
+- `email_address` (String)
+- `first_name` (String)
+- `id` (String)
+- `last_name` (String)
+
+
diff --git a/docs/index.md b/docs/index.md
new file mode 100644
index 0000000..c8356fe
--- /dev/null
+++ b/docs/index.md
@@ -0,0 +1,37 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "etleap Provider"
+subcategory: ""
+description: |-
+ Etleap API v2: Etleap API v2
+---
+
+# etleap Provider
+
+Etleap API v2: Etleap API v2
+
+## Example Usage
+
+```terraform
+terraform {
+ required_providers {
+ etleap = {
+ source = "etleap/etleap"
+ version = "0.0.2"
+ }
+ }
+}
+
+provider "etleap" {
+ # Configuration options
+}
+```
+
+
+## Schema
+
+### Optional
+
+- `password` (String, Sensitive)
+- `server_url` (String) Server URL (defaults to https://api.etleap.com/api/v2)
+- `username` (String, Sensitive)
diff --git a/docs/resources/dbt_schedule.md b/docs/resources/dbt_schedule.md
new file mode 100644
index 0000000..536fda7
--- /dev/null
+++ b/docs/resources/dbt_schedule.md
@@ -0,0 +1,62 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "etleap_dbt_schedule Resource - terraform-provider-etleap"
+subcategory: ""
+description: |-
+ DbtSchedule Resource
+---
+
+# etleap_dbt_schedule (Resource)
+
+DbtSchedule Resource
+
+## Example Usage
+
+```terraform
+resource "etleap_dbt_schedule" "my_dbtschedule" {
+ connection_id = "...my_connection_id..."
+ cron = "...my_cron..."
+ name = "Henry Macejkovic"
+ paused = true
+ selector = "...my_selector..."
+ skip_build_if_no_new_data = false
+ target_schema = "...my_target_schema..."
+}
+```
+
+
+## Schema
+
+### Required
+
+- `connection_id` (String) The [connection](https://docs.etleap.com/docs/api-v2/edbec13814bbc-connection) where the dbt build runs. The only supported connections are Redshift, Snowflake or Databricks Delta Lake destinations. Requires replacement if changed.
+- `cron` (String) The cron expression that defines triggers for this schedule. The maximum supported cron schedule precision is 1 minute.
+- `name` (String) The name of the dbt schedule.
+- `selector` (String) The selector (from `selectors.yaml`) to run the build for. Requires replacement if changed.
+- `target_schema` (String) The target schema for the dbt build. See [here](https://docs.getdbt.com/docs/build/custom-schemas) for details on how it's used.
+
+### Optional
+
+- `paused` (Boolean) `true` if the schedule should start as paused; defaults to `false`. Default: false
+- `skip_build_if_no_new_data` (Boolean) Whether the dbt build is skipped if no new data has been ingested for any of the pipelines this schedule depends on. Default: false
+
+### Read-Only
+
+- `create_date` (String)
+- `current_activity` (String) must be one of ["LOADING", "BUILDING"]
+- `id` (String) The id of the dbt schedule
+- `last_dbt_build_date` (String) The last time that a successful dbt build started.
+- `last_dbt_run_time` (Number) The duration of the last successful dbt build.
+- `owner` (Attributes) (see [below for nested schema](#nestedatt--owner))
+
+
+### Nested Schema for `owner`
+
+Read-Only:
+
+- `email_address` (String)
+- `first_name` (String)
+- `id` (String)
+- `last_name` (String)
+
+
diff --git a/docs/resources/model.md b/docs/resources/model.md
new file mode 100644
index 0000000..9d5828b
--- /dev/null
+++ b/docs/resources/model.md
@@ -0,0 +1,239 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "etleap_model Resource - terraform-provider-etleap"
+subcategory: ""
+description: |-
+ Model Resource
+---
+
+# etleap_model (Resource)
+
+Model Resource
+
+## Example Usage
+
+```terraform
+resource "etleap_model" "my_model" {
+ deletion_of_export_products = true
+ name = "Dewey Koch"
+ query_and_triggers = {
+ query = "...my_query..."
+ triggers = [
+ "...",
+ ]
+ }
+ update_schedule = {
+ daily = {
+ hour_of_day = 4
+ mode = "DAILY"
+ }
+ }
+ warehouse = {
+ redshift = {
+ connection_id = "...my_connection_id..."
+ distribution_style = {
+ one = "ALL"
+ }
+ materialized_view = true
+ pending_renamed_table = "...my_pending_renamed_table..."
+ schema = "...my_schema..."
+ sort_columns = [
+ "...",
+ ]
+ table = "...my_table..."
+ type = "REDSHIFT"
+ wait_for_update_preparation = false
+ }
+ }
+}
+```
+
+
+## Schema
+
+### Required
+
+- `deletion_of_export_products` (Boolean) Specifies whether the table in the destination created by this model should be deleted.
+- `name` (String)
+- `query_and_triggers` (Attributes) (see [below for nested schema](#nestedatt--query_and_triggers))
+- `update_schedule` (Attributes) How often this model should update. Etleap will periodically update the model table in your warehouse according to this schedule. See [the Model Updates documentation](https://docs.etleap.com/docs/documentation/ZG9jOjI0MzU2NDY3-introduction-to-models#model-updates) for more information. (see [below for nested schema](#nestedatt--update_schedule))
+- `warehouse` (Attributes) (see [below for nested schema](#nestedatt--warehouse))
+
+### Optional
+
+- `shares` (List of String) An array of users' emails to share the model with. Once shared, a model cannot be unshared, and future calls to `PATCH` can only add to this list.
+
+### Read-Only
+
+- `create_date` (String) The date and time when then the model was created.
+- `dependencies` (Attributes List) (see [below for nested schema](#nestedatt--dependencies))
+- `id` (String) The unique identifier of the model.
+- `last_update_duration` (Number) How long the latest update took to complete, in milliseconds, or the duration of the current update if one is in progress.
+- `last_update_time` (String) The date and time of the latest successful update for this model.
+- `owner` (Attributes) (see [below for nested schema](#nestedatt--owner))
+- `paused` (Boolean)
+
+
+### Nested Schema for `query_and_triggers`
+
+Required:
+
+- `query` (String) The SQL query used to build this model. To specify dependencies on pipelines or other models, replace the schema and table name of the dependency with the id of the dependency enclosed in `{{` and `}}`. The dependency must load data into the same Etleap connection as the one given in `warehouse.connectionId` for this model.
+
+**For Example**
+Say there is a pipeline with the id `abcd1234` which loads data to the table "schema"."my_table". To create a model in Etleap that has a dependency on this pipeline, the following query:
+
+```sql
+SELECT col1, col2 FROM "schema"."my_table";
+```
+
+becomes:
+```sql
+SELECT col1, col2 FROM {{abcd1234}};
+```
+
+[See the Model documentation](https://docs.etleap.com/docs/documentation/ZG9jOjI0MzU2NDY3-introduction-to-models#model-dependencies) for more information on Model dependencies.
+- `triggers` (List of String) A list of model dependency ids. An update will be automatically triggered in this model if any of the dependencies listed here get new data. Any ids given here must be present as dependencies in the `query`.
+
+
+
+### Nested Schema for `update_schedule`
+
+Optional:
+
+- `daily` (Attributes) (see [below for nested schema](#nestedatt--update_schedule--daily))
+- `hourly` (Attributes) (see [below for nested schema](#nestedatt--update_schedule--hourly))
+- `monthly` (Attributes) (see [below for nested schema](#nestedatt--update_schedule--monthly))
+- `never` (Attributes) (see [below for nested schema](#nestedatt--update_schedule--never))
+- `weekly` (Attributes) (see [below for nested schema](#nestedatt--update_schedule--weekly))
+
+
+### Nested Schema for `update_schedule.daily`
+
+Optional:
+
+- `hour_of_day` (Number) Hour of day this schedule should trigger at (in UTC). Not Null
+- `mode` (String) Not Null; must be one of ["DAILY"]
+
+
+
+### Nested Schema for `update_schedule.hourly`
+
+Optional:
+
+- `mode` (String) Not Null; must be one of ["HOURLY"]
+
+
+
+### Nested Schema for `update_schedule.monthly`
+
+Optional:
+
+- `day_of_month` (Number) Day of the month this schedule should trigger at (in UTC). Not Null
+- `hour_of_day` (Number) Hour of day this schedule should trigger at (in UTC). Not Null
+- `mode` (String) Not Null; must be one of ["MONTHLY"]
+
+
+
+### Nested Schema for `update_schedule.never`
+
+Optional:
+
+- `mode` (String) Not Null; must be one of ["NEVER"]
+
+
+
+### Nested Schema for `update_schedule.weekly`
+
+Optional:
+
+- `day_of_week` (Number) The day of the week this schedule should trigger at (in UTC). Not Null
+- `hour_of_day` (Number) Hour of day this schedule should trigger at (in UTC). Not Null
+- `mode` (String) Not Null; must be one of ["WEEKLY"]
+
+
+
+
+### Nested Schema for `warehouse`
+
+Optional:
+
+- `redshift` (Attributes) (see [below for nested schema](#nestedatt--warehouse--redshift))
+- `snowflake` (Attributes) (see [below for nested schema](#nestedatt--warehouse--snowflake))
+
+
+### Nested Schema for `warehouse.redshift`
+
+Optional:
+
+- `connection_id` (String) Requires replacement if changed. ; Not Null
+- `distribution_style` (Attributes) Not Null (see [below for nested schema](#nestedatt--warehouse--redshift--distribution_style))
+- `materialized_view` (Boolean) Requires replacement if changed. ; Not Null
+- `schema` (String) Requires replacement if changed.
+- `sort_columns` (List of String) The sort columns to use.
+- `table` (String) Not Null
+- `type` (String) Not Null; must be one of ["REDSHIFT"]
+- `wait_for_update_preparation` (Boolean) Requires replacement if changed. ; Not Null
+
+Read-Only:
+
+- `pending_renamed_table` (String) Only set when a table rename was triggered but is not complete yet.
+
+
+### Nested Schema for `warehouse.redshift.distribution_style`
+
+Optional:
+
+- `distribution_style_key` (Attributes) (see [below for nested schema](#nestedatt--warehouse--redshift--distribution_style--distribution_style_key))
+- `one` (String) must be one of ["ALL", "AUTO", "EVEN"]
+
+
+### Nested Schema for `warehouse.redshift.distribution_style.one`
+
+Optional:
+
+- `column` (String) Not Null
+- `type` (String) Not Null; must be one of ["KEY"]
+
+
+
+
+
+### Nested Schema for `warehouse.snowflake`
+
+Optional:
+
+- `connection_id` (String) Requires replacement if changed. ; Not Null
+- `materialized_view` (Boolean) Requires replacement if changed. ; Not Null
+- `schema` (String) Requires replacement if changed.
+- `table` (String) Not Null
+- `type` (String) Not Null; must be one of ["SNOWFLAKE"]
+- `wait_for_update_preparation` (Boolean) Requires replacement if changed. ; Not Null
+
+Read-Only:
+
+- `pending_renamed_table` (String) Only set when a table rename was triggered but is not complete yet.
+
+
+
+
+### Nested Schema for `dependencies`
+
+Read-Only:
+
+- `id` (String) The unique identifier of the pipeline or model.
+- `name` (String) The name of the pipeline or model.
+- `type` (String) must be one of ["PIPELINE", "MODEL"]
+
+
+
+### Nested Schema for `owner`
+
+Read-Only:
+
+- `email_address` (String)
+- `first_name` (String)
+- `id` (String)
+- `last_name` (String)
+
+
diff --git a/docs/resources/pipeline.md b/docs/resources/pipeline.md
new file mode 100644
index 0000000..d11b367
--- /dev/null
+++ b/docs/resources/pipeline.md
@@ -0,0 +1,1840 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "etleap_pipeline Resource - terraform-provider-etleap"
+subcategory: ""
+description: |-
+ Pipeline Resource
+---
+
+# etleap_pipeline (Resource)
+
+Pipeline Resource
+
+## Example Usage
+
+```terraform
+resource "etleap_pipeline" "my_pipeline" {
+ deletion_of_export_products = false
+ destination = {
+ delta_lake = {
+ automatic_schema_changes = true
+ connection_id = "...my_connection_id..."
+ last_updated_column = "...my_last_updated_column..."
+ pre10_dot2_runtime_support = true
+ primary_key = [
+ "...",
+ ]
+ retain_history = true
+ schema = "...my_schema..."
+ table = "...my_table..."
+ type = "DELTA_LAKE"
+ wait_for_quality_check = false
+ }
+ }
+ name = "Felipe Block"
+ paused = true
+ script = {
+ legacy_script = {
+ legacy_script = "...my_legacy_script..."
+ }
+ }
+ source = {
+ active_campaign = {
+ connection_id = "...my_connection_id..."
+ entity = "Contact"
+ latency_threshold = 3
+ type = "ACTIVE_CAMPAIGN"
+ }
+ }
+}
+```
+
+
+## Schema
+
+### Required
+
+- `deletion_of_export_products` (Boolean) Specifies whether any remaining export products in the destination created by this pipeline should be deleted. For REDSHIFT and SNOWFLAKE destinations this means tables, and for S3 DATA LAKE destinations this means data output to S3 as well as any tables created in Glue.
+- `destination` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--destination))
+- `name` (String)
+- `source` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source))
+
+### Optional
+
+- `parsing_error_settings` (Attributes) (see [below for nested schema](#nestedatt--parsing_error_settings))
+- `paused` (Boolean) If the pipeline is paused. Defaults to `false`.
+- `script` (Attributes) Whenever a script is required, we accept and/or return two types of scripts: a Script or Legacy Script. We return a Script object if all transforms specified in that script are supported by this API. Otherwise it will return a Legacy Script. Either Script or Legacy Script can be used when adding a script to a pipeline. Requires replacement if changed. (see [below for nested schema](#nestedatt--script))
+- `shares` (List of String) An array of user email's to share the pipeline with.
+
+Once shared, a pipeline cannot be unshared. Future call to `PATCH` on a pipeline can only add to this list.
+
+### Read-Only
+
+- `create_date` (String) The date and time when then the pipeline was created.
+- `destinations` (Attributes List) A pipeline may have multiple destinations if it is in the process of being migrated from one to another. (see [below for nested schema](#nestedatt--destinations))
+- `id` (String) The unique identifier of the pipeline.
+- `last_refresh_finish_date` (String) The date and time when the last refresh finished. `null` if the pipeline was never refreshed.
+- `last_refresh_start_date` (String) The date and time when the last refresh was started. `null` if the pipeline was never refreshed.
+- `latency` (Number) The end-to-end latency in seconds for this pipeline. Not `null` if the pipeline is running (not paused or stopped) and if the initial backfill has finished. See the documentation for more details.
+- `latest_script_version` (Number) Valid script versions are whole numbers and range from 1 to this number.
+- `owner` (Attributes) (see [below for nested schema](#nestedatt--owner))
+- `pipeline_mode` (String) The pipeline mode refers to how the pipeline fetches data changes from the source and how those changes are applied to the destination table. See the documentation for more details. must be one of ["UPDATE", "APPEND", "REPLACE", "QUERY"]
+- `refresh_schedule` (Attributes) A pipeline refresh processes all data in your source from the beginning to re-establish consistency with your destination. The pipeline refresh schedule defines when Etleap should automatically refresh the pipeline. See Updates & Refreshes for more information.
+
+Setting this to `null` is equivalent to setting the Refresh Schedule to `NEVER`. (see [below for nested schema](#nestedatt--refresh_schedule))
+- `stop_reason` (String) Describes the reason a pipeline has stopped. `null` if the pipeline is currently running. must be one of ["PAUSED", "PARSING_ERRORS", "SCHEMA_CHANGES", "REDSHIFT_RESIZE", "REDSHIFT_MAINTENANCE", "SOURCE_CONNECTION_DOWN", "DESTINATION_CONNECTION_DOWN", "PERMANENTLY_STOPPED", "SOURCE_BROKEN", "QUOTA_REACHED", "SOURCE_INACTIVE", "DESTINATION_INACTIVE", "PIPELINE_MODE_CHANGE"]
+- `update_schedule` (Attributes) The update schedule defines when Etleap should automatically check the source for new data. See Updates & Refreshes for more information. When undefined, the pipeline will default to the schedule set on the source connection. (see [below for nested schema](#nestedatt--update_schedule))
+
+
+### Nested Schema for `destination`
+
+Optional:
+
+- `delta_lake` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--destination--delta_lake))
+- `redshift` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--destination--redshift))
+- `s3_data_lake` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--destination--s3_data_lake))
+- `snowflake` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--destination--snowflake))
+
+
+### Nested Schema for `destination.delta_lake`
+
+Required:
+
+- `connection_id` (String) The universally unique identifier of the destination connection. Requires replacement if changed.
+- `schema` (String) The schema in the destination that the tables will be created in. Requires replacement if changed.
+- `table` (String) Requires replacement if changed.
+- `type` (String)
+> Delta Lake connections are currently in Beta which means that they are subject to non-backwards-compatible and breaking changes.
+Requires replacement if changed. ; must be one of ["DELTA_LAKE"]
+
+Optional:
+
+- `automatic_schema_changes` (Boolean) Whether schema changes detected during transformation should be handled automatically or not. Defaults to `true`. Requires replacement if changed.
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the destination. Requires replacement if changed.
+- `pre10_dot2_runtime_support` (Boolean) This setting disables column mapping on the tables created by this pipeline.
+
+When enabled, this pipeline will create Delta Lake tables that can be read by Databricks clusters with runtime versions before 10.2.
+
+However, without column mapping, native schema changes are not supported and will cause the table's underlying Parquet files to be rewritten, which can be slow. Schema changes will also not preserve column constraints such as `NOT NULL` on the destination tables.
+Requires replacement if changed. ; Default: false
+- `primary_key` (List of String) The destination column names that constitute the primary key.
If the pipline has a sharded source include a column that specifies the shard identifier. Requires replacement if changed.
+- `retain_history` (Boolean) If the destination table should retain the history of the source. More information here: https://support.etleap.com/hc/en-us/articles/360008168574. Defaults to `false`. Requires replacement if changed.
+- `wait_for_quality_check` (Boolean) If set to `true`, a `Transformation Complete` event is published once a transformation completes, and the pipeline waits for a `Quality Check Complete` event before loading to the destination. Defaults to `false`. Requires replacement if changed.
+
+
+
+### Nested Schema for `destination.redshift`
+
+Required:
+
+- `connection_id` (String) The universally unique identifier of the destination connection. Requires replacement if changed.
+- `table` (String) Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; must be one of ["REDSHIFT"]
+
+Optional:
+
+- `automatic_schema_changes` (Boolean) Whether schema changes detected during transformation should be handled automatically or not. Defaults to `true`. Requires replacement if changed.
+- `compress_columns` (Boolean) Whether columns should be compressed. Defaults to `true`. Requires replacement if changed.
+- `distribution_style` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--destination--redshift--distribution_style))
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the destination. Requires replacement if changed.
+- `primary_key` (List of String) The destination column names that constitute the primary key.
If the pipline has a sharded source include a column that specifies the shard identifier. Requires replacement if changed.
+- `retain_history` (Boolean) If the destination table should retain the history of the source. More information here: https://support.etleap.com/hc/en-us/articles/360008168574. Defaults to `false`. Requires replacement if changed.
+- `schema` (String) The schema in the destination that the tables will be created in. If this is not specified or set to `null` then the schema specified on the connection is used. Requires replacement if changed.
+- `sort_columns` (List of String) The sort columns to use. Requires replacement if changed.
+- `truncate_strings` (Boolean) Truncate strings to 64K characters, the max allowed by Redshift in a single column. Defaults to `false`. Requires replacement if changed.
+- `wait_for_quality_check` (Boolean) If set to `true`, a `Transformation Complete` event is published once a transformation completes, and the pipeline waits for a `Quality Check Complete` event before loading to the destination. Defaults to `false`. Requires replacement if changed.
+
+
+### Nested Schema for `destination.redshift.distribution_style`
+
+Optional:
+
+- `distribution_style_key` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--destination--redshift--distribution_style--distribution_style_key))
+- `one` (String) Requires replacement if changed. ; must be one of ["ALL", "AUTO", "EVEN"]
+
+
+### Nested Schema for `destination.redshift.distribution_style.one`
+
+Required:
+
+- `column` (String) Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; must be one of ["KEY"]
+
+
+
+
+
+### Nested Schema for `destination.s3_data_lake`
+
+Required:
+
+- `connection_id` (String) The universally unique identifier of the destination connection. Requires replacement if changed.
+- `path_prefix` (String) The S3 path prefix to use for this pipeline. The data key in the destination bucket starts with `{connection.pathPrefix}/{pathPrefix}/v{version.pipeline}/`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; must be one of ["S3_DATA_LAKE"]
+
+Optional:
+
+- `automatic_schema_changes` (Boolean) Whether schema changes detected during transformation should be handled automatically or not. Defaults to `true`. Requires replacement if changed.
+- `generate_snapshots` (Boolean) Defaults to 'false'. Requires replacement if changed.
+- `output_format` (String) Format for output files. Defaults to `PARQUET`. For Glue-enabled destinations, only `PARQUET` is a valid format. Requires replacement if changed. ; must be one of ["PARQUET", "CSV"]; Default: "PARQUET"
+- `primary_key` (List of String) The destination column names that constitute the primary key.
If the pipline has a sharded source include a column that specifies the shard identifier. Requires replacement if changed.
+- `wait_for_quality_check` (Boolean) If set to `true`, a `Transformation Complete` event is published once a transformation completes, and the pipeline waits for a `Quality Check Complete` event before loading to the destination. Defaults to `false`. Requires replacement if changed.
+
+
+
+### Nested Schema for `destination.snowflake`
+
+Required:
+
+- `connection_id` (String) The universally unique identifier of the destination connection. Requires replacement if changed.
+- `table` (String) Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; must be one of ["SNOWFLAKE"]
+
+Optional:
+
+- `automatic_schema_changes` (Boolean) Whether schema changes detected during transformation should be handled automatically or not. Defaults to `true`. Requires replacement if changed.
+- `clustering_keys` (List of String) Keys to cluster the table on. If unspecified, the table will use "automatic clustering". Requires replacement if changed.
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the destination. Requires replacement if changed.
+- `primary_key` (List of String) The destination column names that constitute the primary key.
If the pipline has a sharded source include a column that specifies the shard identifier. Requires replacement if changed.
+- `retain_history` (Boolean) If the destination table should retain the history of the source. More information here: https://support.etleap.com/hc/en-us/articles/360008168574. Defaults to `false`. Requires replacement if changed.
+- `schema` (String) The schema in the destination that the tables will be created in. If this is not specified or set to `null` then the schema specified on the connection is used. Requires replacement if changed.
+- `wait_for_quality_check` (Boolean) If set to `true`, a `Transformation Complete` event is published once a transformation completes, and the pipeline waits for a `Quality Check Complete` event before loading to the destination. Defaults to `false`. Requires replacement if changed.
+
+
+
+
+### Nested Schema for `source`
+
+Optional:
+
+- `active_campaign` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--active_campaign))
+- `bigquery` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--bigquery))
+- `bing_ads` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--bing_ads))
+- `blackline` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--blackline))
+- `criteo` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--criteo))
+- `db2` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--db2))
+- `db2_sharded` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--db2_sharded))
+- `delta_lake` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--delta_lake))
+- `elasticsearch` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--elasticsearch))
+- `elluminate` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--elluminate))
+- `eloqua` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--eloqua))
+- `facebook_ads` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--facebook_ads))
+- `fifteen_five` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--fifteen_five))
+- `freshworks` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--freshworks))
+- `ftp` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--ftp))
+- `gong` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--gong))
+- `google_ads` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--google_ads))
+- `google_analytics` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--google_analytics))
+- `google_analytics_ga4` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--google_analytics_ga4))
+- `google_cloud_storage` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--google_cloud_storage))
+- `google_sheets` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--google_sheets))
+- `hubspot` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--hubspot))
+- `impact_radius` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--impact_radius))
+- `intercom` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--intercom))
+- `jira` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--jira))
+- `jira_align` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--jira_align))
+- `kafka` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--kafka))
+- `kustomer` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--kustomer))
+- `ldap` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--ldap))
+- `ldap_virtual_list_view` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--ldap_virtual_list_view))
+- `linked_in_ads` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--linked_in_ads))
+- `marketo` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--marketo))
+- `mixpanel` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--mixpanel))
+- `mongodb` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--mongodb))
+- `mysql` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--mysql))
+- `mysql_sharded` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--mysql_sharded))
+- `netsuite` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--netsuite))
+- `netsuite_v2` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--netsuite_v2))
+- `oracle` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--oracle))
+- `oracle_sharded` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--oracle_sharded))
+- `outlook` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--outlook))
+- `outreach` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--outreach))
+- `pinterest_ads` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--pinterest_ads))
+- `postgres` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--postgres))
+- `postgres_sharded` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--postgres_sharded))
+- `quora_ads` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--quora_ads))
+- `rave_medidata` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--rave_medidata))
+- `recurly` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--recurly))
+- `redshift` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--redshift))
+- `redshift_sharded` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--redshift_sharded))
+- `s3_input` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--s3_input))
+- `s3_legacy` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--s3_legacy))
+- `salesforce` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--salesforce))
+- `salesforce_marketing_cloud` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--salesforce_marketing_cloud))
+- `sap_hana` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--sap_hana))
+- `sap_hana_sharded` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--sap_hana_sharded))
+- `seismic` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--seismic))
+- `sftp` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--sftp))
+- `shopify` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--shopify))
+- `skyward` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--skyward))
+- `snapchat_ads` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--snapchat_ads))
+- `snowflake` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--snowflake))
+- `snowflake_sharded` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--snowflake_sharded))
+- `sql_server` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--sql_server))
+- `sql_server_sharded` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--sql_server_sharded))
+- `square` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--square))
+- `streaming` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--streaming))
+- `stripe` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--stripe))
+- `sumtotal` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--sumtotal))
+- `the_trade_desk` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--the_trade_desk))
+- `tik_tok_ads` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--tik_tok_ads))
+- `twilio` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--twilio))
+- `twitter_ads` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--twitter_ads))
+- `user_defined_api` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--user_defined_api))
+- `uservoice` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--uservoice))
+- `veeva` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--veeva))
+- `verizon_media_dsp` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--verizon_media_dsp))
+- `workday_report` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--workday_report))
+- `workfront` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--workfront))
+- `zendesk` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--zendesk))
+- `zoom_phone` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--zoom_phone))
+- `zuora` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--source--zuora))
+
+
+### Nested Schema for `source.active_campaign`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The ActiveCampaign resource. Example: Contacts, Custom Fields and Custom Values. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["ACTIVE_CAMPAIGN"]
+
+
+
+### Nested Schema for `source.bigquery`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `dataset` (String) Name of dataset in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used. Requires replacement if changed.
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source. Requires replacement if changed.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array. Requires replacement if changed. ; Not Null
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both. Requires replacement if changed.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["BIGQUERY"]
+
+
+
+### Nested Schema for `source.bing_ads`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The report type. Requires replacement if changed. ; Not Null
+- `fields` (List of String) The field names. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["BING_ADS"]
+
+
+
+### Nested Schema for `source.blackline`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The Blackline report name. Example: Account Details Extract Template. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["BLACKLINE"]
+
+
+
+### Nested Schema for `source.criteo`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `currency` (List of String) Specify the report `currency` if and only if the entity is 'report_placement', 'report_statistics' or 'report_transactions'. Example values: [USD, EUR]. Requires replacement if changed.
+- `dimensions` (List of String) Specify the report `dimension` if and only if the entity is 'report_placement' or 'report_statistics'. Example values: [Day, advertiserId, adsetId]. Requires replacement if changed.
+- `entity` (String) The Criteo resource. Example: ad_set, advertiser, audience, campaign, report_placement, report_statistics, and report_transactions. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `metrics` (List of String) Specify the report `metric` if and only if the entity is 'report_placement' or 'report_statistics'. Example values: [clicks, displays]. Requires replacement if changed.
+- `timezone` (List of String) Specify the report `timezone` if and only if the entity is 'report_placement' or 'report_transactions'. Example values: [UTC, ETC/GMT-3]. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["CRITEO"]
+
+
+
+### Nested Schema for `source.db2`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source. Requires replacement if changed.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array. Requires replacement if changed. ; Not Null
+- `schema` (String) Name of the schema in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used. Requires replacement if changed.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both. Requires replacement if changed.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["DB2"]
+
+
+
+### Nested Schema for `source.db2_sharded`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source. Requires replacement if changed.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array. Requires replacement if changed. ; Not Null
+- `schema` (String) Name of the schema in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used. Requires replacement if changed.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both. Requires replacement if changed.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["DB2_SHARDED"]
+
+
+
+### Nested Schema for `source.delta_lake`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source. Requires replacement if changed.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array. Requires replacement if changed. ; Not Null
+- `schema` (String) Name of the schema in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used. Requires replacement if changed.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both. Requires replacement if changed.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["DELTA_LAKE"]
+
+
+
+### Nested Schema for `source.elasticsearch`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The index name. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["ELASTICSEARCH"]
+
+
+
+### Nested Schema for `source.elluminate`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `domain_name` (List of String) The Domain that you want to extract from. If no domain is specified Etleap will extract data from all schema's domains. . Requires replacement if changed.
+- `entity` (String) The Elluminate study name. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `schema_name` (String) The Schema that you want to extract from. Requires replacement if changed. ; Not Null
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["ELLUMINATE"]
+
+
+
+### Nested Schema for `source.eloqua`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The Eloqua resource, spelled as it is shown in the Eloqua UI. Each ActivityType is a different entity and is spelled without spaces like EmailClickthrough and EmailSend. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["ELOQUA"]
+
+
+
+### Nested Schema for `source.facebook_ads`
+
+Optional:
+
+- `breakdowns` (List of String) The breakdown fields. The first one must be `date_start`. See the [Facebook Documentation on Breakdowns.](https://developers.facebook.com/docs/marketing-api/insights/breakdowns/v16.0#insights-api-breakdowns). Requires replacement if changed. ; Not Null
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The aggregation level of the Facebook report. Example values: [Insights by Ad, Insights by Adset, Insights by Campaign, Insights by Account]. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["FACEBOOK_ADS"]
+
+
+
+### Nested Schema for `source.fifteen_five`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The 15Five entity. Example values: [answer, report, department, feature-status, group-type, group, high-five, objective_objective_id_history, objective, attribute_value, attribute, priority, question, security-audit, vacation, user]. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["FIFTEEN_FIVE"]
+
+
+
+### Nested Schema for `source.freshworks`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["FRESHWORKS"]
+
+
+
+### Nested Schema for `source.ftp`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `file_name_filter` (String) Regular expression matching the names of the files to be processed by this pipeline. `fileNameFilter` or `paths` must be specified. Requires replacement if changed.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `low_watermark` (String) Timestamp of the earliest modified file that should be processed by the pipeline. Only the files modified after this timestamp will be processed. Format of the timestamp: 'yyyy-MM-dd'. Requires replacement if changed.
+- `new_file_behavior` (String) Specifies whether new files update, add to or replace existing files. See the documentation for more details. Requires replacement if changed. ; Not Null; must be one of ["UPDATE", "APPEND", "REPLACE"]
+- `paths` (List of String) File or folder paths for the files to be extracted from the source. In the case when `fileNameFilter` is specified exactly one folder path must be given here. Requires replacement if changed. ; Not Null
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["FTP"]
+
+
+
+### Nested Schema for `source.gong`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The Gong resource. Example values: [Answered Scorecards, Call Transcripts, Calls, Calls Extensive, Folders, Interactions, Scorecards, Users, Users Activity, Users Extensive, Workspaces]. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["GONG"]
+
+
+
+### Nested Schema for `source.google_ads`
+
+Optional:
+
+- `attributed_resources` (List of String) Specify the report `attributed resources`. Example values: [campaign_budget.id, campaign_budget.name, bidding_strategy.type]. Requires replacement if changed.
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The Google Ads report type, capitalized and spelled with spaces between words. Requires replacement if changed. ; Not Null
+- `fields` (List of String) Specify the report `fields`. Example values: [campaign.resource_name, campaign.campaign_budget, campaign.advertising_channel_type]. Requires replacement if changed.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `metrics` (List of String) Specify the report `metrics`. Example values: [metrics.clicks, metrics.all_conversions, metrics.average_cost]. Requires replacement if changed.
+- `segments` (List of String) Specify the report `segmentation` groups. Example values: [segments.date, segments.click_type, segments.geo_target_county]. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["GOOGLE_ADS"]
+
+
+
+### Nested Schema for `source.google_analytics`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `dimensions` (List of String) Requires replacement if changed. ; Not Null
+- `entity` (String) The full name of the site in Google Analytics. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `max_accuracy_start_date` (String) Format of the timestamp: 'yyyy-MM-dd'. Requires replacement if changed.
+- `metrics` (List of String) Requires replacement if changed. ; Not Null
+- `segment` (String) Requires replacement if changed.
+- `service` (String) | | |
+| - | - |
+| `REPORTING` | Gives you access to Google Analytics data, including segments. |
+| `MULTI_CHANNEL_FUNNELS ` | Get conversion path data which shows user interactions with various traffic sources. |
+Requires replacement if changed. ; Not Null; must be one of ["REPORTING", "MULTI_CHANNEL_FUNNELS"]
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["GOOGLE_ANALYTICS"]
+
+
+
+### Nested Schema for `source.google_analytics_ga4`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `dimensions` (List of String) Dimensions are attributes for your data. Example values: [date, browser]. Requires replacement if changed. ; Not Null
+- `entity` (String) The Google Analytics GA4 resource. Provide the ID of the GA4 resource. You can find out how to retrieve the ID of you resource here. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `metrics` (List of String) Metrics represent quantitative measurements calculated by Google Analytics. Example values: [active1DayUsers, conversions]. Requires replacement if changed. ; Not Null
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["GOOGLE_ANALYTICS_GA4"]
+
+
+
+### Nested Schema for `source.google_cloud_storage`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `file_name_filter` (String) Regular expression matching the names of the files to be processed by this pipeline. `fileNameFilter` or `paths` must be specified. Requires replacement if changed.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `low_watermark` (String) Timestamp of the earliest modified file that should be processed by the pipeline. Only the files modified after this timestamp will be processed. Format of the timestamp: 'yyyy-MM-dd'. Requires replacement if changed.
+- `new_file_behavior` (String) Specifies whether new files update, add to or replace existing files. See the documentation for more details. Requires replacement if changed. ; Not Null; must be one of ["UPDATE", "APPEND", "REPLACE"]
+- `paths` (List of String) File or folder paths for the files to be extracted from the source. In the case when `fileNameFilter` is specified exactly one folder path must be given here. Requires replacement if changed. ; Not Null
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["GOOGLE_CLOUD_STORAGE"]
+
+
+
+### Nested Schema for `source.google_sheets`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) Google Sheets entities are in the form `SpreadsheetID/SheetID`. You can find both values by clicking on the sheet (tab) you want and looking at the URL: docs.google.com/spreadsheets/d/`1pRAGMSRpEEG31kbtG2qcpr-HDeDfvafp_v00`/edit#gid=`642381756`. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["GOOGLE_SHEETS"]
+
+
+
+### Nested Schema for `source.hubspot`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The Hubspot entity. Example values: [Campaigns, Contacts, Email Events, Engagements, Deals, Owners, Deal Pipelines, Companies, Marketing Emails, Pages, Landing Pages Analytics]. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["HUBSPOT"]
+
+
+
+### Nested Schema for `source.impact_radius`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The Impact Radius entity, spelled the same way as in the UI. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["IMPACT_RADIUS"]
+
+
+
+### Nested Schema for `source.intercom`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The Intercom entity. Example values: [User, Lead, Contact, Company, Admin, Tag, Segment, Note, Event, Counts, Conversation Counts, Admin Conversation Counts, User Tags Counts, User Segments Counts, Company Tags Counts, Company Segments Counts, Conversation, Conversation Parts, Conversation Tags, Subscription]. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["INTERCOM"]
+
+
+
+### Nested Schema for `source.jira`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The JIRA entity. Example values: [Issues, Issue Links, Issue Types, Changelog, Comments, Worklogs, Fields, Groups, Group Members, Priorities, Projects, Resolutions, Statuses, Status Categories, Users, Multiple Choice Field]. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["JIRA"]
+
+
+
+### Nested Schema for `source.jira_align`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The JIRA Align entity. Spelled without spaces except for the Audit Logs. Example values: [Capabilities, Cities, Customers, Defects, Epics, Epics Audit Logs, Features, Features Audit Logs, Goals, Ideas, Iterations, KeyResults, Milestones, Milestones Audit Logs, Objectives, Objectives Audit Logs, Portfolios, Products, Programs, Regions, ReleaseVehicles, Releases, Snapshots, Stories, Tasks, Teams, Themes, Users, ValueStreams]. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["JIRA_ALIGN"]
+
+
+
+### Nested Schema for `source.kafka`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) You can ingest data from Kafka topics. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["KAFKA"]
+
+
+
+### Nested Schema for `source.kustomer`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The Kustomer entity. Example values: [AUDIT_LOGS, BRANDS, BUSINESS_SCHEDULES, CARDS, COMPANIES, CONVERSATION_ATTACHMENTS, CONVERSATION_EVENTS, CONVERSATION_FORWARDS, CONVERSATION_TIMES, CONVERSATIONS, CUSTOM_ATTRIBUTE_METADATA, CUSTOMER_DRAFTS, CUSTOMER_MERGES, CUSTOMERS, KOBJECTS, KLASSES, MESSAGES, NOTES, NOTIFICATIONS, OUTBOUND_ACCOUNTS, QUEUES, SLAS, SATISFACTIONS, SHORTCUTS, SNOOZES, SPAM_SENDERS, TEAM_ROUTING_SETTINGS, TEAMS, USERS]. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["KUSTOMER"]
+
+
+
+### Nested Schema for `source.ldap`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["LDAP"]
+
+
+
+### Nested Schema for `source.ldap_virtual_list_view`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["LDAP_VIRTUAL_LIST_VIEW"]
+
+
+
+### Nested Schema for `source.linked_in_ads`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The LinkedIn resource. Example values: [ACCOUNTS, ACCOUNT_USERS, AD_ANALYTICS, CAMPAIGNS, CAMPAIGN_GROUPS, CONVERSIONS, INSIGHT_TAG_DOMAINS]. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `metrics` (List of String) Specify the report `metrics` if and only if the entity is 'AD_ANALYTICS'. Example values: [dateRange, pivotValues, clicks]. Requires replacement if changed.
+- `pivots` (List of String) Specify the report `pivots` groups if and only if the entity is 'AD_ANALYTICS'. Example values: [ACCOUNT, CAMPAIGN, COMPANY]. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["LINKED_IN_ADS"]
+
+
+
+### Nested Schema for `source.marketo`
+
+Optional:
+
+- `activity_types` (List of String) Specify `activityTypes` if and only if the entity is 'Activities'. Requires replacement if changed.
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The Marketo entity type. Example values: [Leads, Activities, Campaigns, Programs, Tags]. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["MARKETO"]
+
+
+
+### Nested Schema for `source.mixpanel`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) 'Raw Data' is the only entity available for Mixpanel. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["MIXPANEL"]
+
+
+
+### Nested Schema for `source.mongodb`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both. Requires replacement if changed.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["MONGODB"]
+
+
+
+### Nested Schema for `source.mysql`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `database` (String) Name of the database in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used. Requires replacement if changed.
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source. Requires replacement if changed.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array. Requires replacement if changed. ; Not Null
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both. Requires replacement if changed.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["MYSQL"]
+
+
+
+### Nested Schema for `source.mysql_sharded`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `database` (String) Name of the database in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used. Requires replacement if changed.
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source. Requires replacement if changed.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array. Requires replacement if changed. ; Not Null
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both. Requires replacement if changed.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["MYSQL_SHARDED"]
+
+
+
+### Nested Schema for `source.netsuite`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The Netsuite entity. Spelled capitalized without spaces unless you have defined a custom entity in Netsuite with a different capitalization. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["NETSUITE"]
+
+
+
+### Nested Schema for `source.netsuite_v2`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The Netsuite entity. Spelled capitalized with spaces. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["NETSUITE_V2"]
+
+
+
+### Nested Schema for `source.oracle`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source. Requires replacement if changed.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array. Requires replacement if changed. ; Not Null
+- `schema` (String) Name of the schema in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used. Requires replacement if changed.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both. Requires replacement if changed.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["ORACLE"]
+
+
+
+### Nested Schema for `source.oracle_sharded`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source. Requires replacement if changed.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array. Requires replacement if changed. ; Not Null
+- `schema` (String) Name of the schema in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used. Requires replacement if changed.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both. Requires replacement if changed.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["ORACLE_SHARDED"]
+
+
+
+### Nested Schema for `source.outlook`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The Outlook entity. Example values: [Messages, Events]. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["OUTLOOK"]
+
+
+
+### Nested Schema for `source.outreach`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["OUTREACH"]
+
+
+
+### Nested Schema for `source.pinterest_ads`
+
+Optional:
+
+- `columns` (List of String) Specify the report `metrics` if and only if the entity is 'reports'. Example values: [SPEND_IN_MICRO_DOLLAR, PAID_IMPRESSION, CPC_IN_MICRO_DOLLAR]. Requires replacement if changed.
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The Pinterest Ads resource. Example values: [ad_accounts, ad_groups, ads, campaigns and reports]. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `level` (List of String) Specify the report `data level` if and only if the entity is 'reports'. Example values: [ADVERTISER, CAMPAIGN, AD_GROUP]. Requires replacement if changed.
+- `targeting_types` (List of String) Specify the report `targeting types` if and only if the entity is 'reports'. Example values: [KEYWORD, APPTYPE, LOCATION]. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["PINTEREST_ADS"]
+
+
+
+### Nested Schema for `source.postgres`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source. Requires replacement if changed.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array. Requires replacement if changed. ; Not Null
+- `schema` (String) Name of the schema in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used. Requires replacement if changed.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both. Requires replacement if changed.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["POSTGRES"]
+
+
+
+### Nested Schema for `source.postgres_sharded`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source. Requires replacement if changed.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array. Requires replacement if changed. ; Not Null
+- `schema` (String) Name of the schema in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used. Requires replacement if changed.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both. Requires replacement if changed.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["POSTGRES_SHARDED"]
+
+
+
+### Nested Schema for `source.quora_ads`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The level of aggregation for your Quora Ads data. Example values: [Account, Campaign, Ad Set, Ad]. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["QUORA_ADS"]
+
+
+
+### Nested Schema for `source.rave_medidata`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The Rave Medidata entity. Example values: [dataset, study, @-@]. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["RAVE_MEDIDATA"]
+
+
+
+### Nested Schema for `source.recurly`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The Recurly entity. Example values: [Account, Account Acquisition, Line Item, Coupon, Coupon Redemption, Credit Payment, Invoice, Measured Unit, Plan, Plan Add-On, Subscription, Transaction]. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["RECURLY"]
+
+
+
+### Nested Schema for `source.redshift`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source. Requires replacement if changed.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array. Requires replacement if changed. ; Not Null
+- `schema` (String) Name of the schema in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used. Requires replacement if changed.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both. Requires replacement if changed.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["REDSHIFT"]
+
+
+
+### Nested Schema for `source.redshift_sharded`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source. Requires replacement if changed.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array. Requires replacement if changed. ; Not Null
+- `schema` (String) Name of the schema in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used. Requires replacement if changed.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both. Requires replacement if changed.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["REDSHIFT_SHARDED"]
+
+
+
+### Nested Schema for `source.s3_input`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `file_name_filter` (String) Regular expression matching the names of the files to be processed by this pipeline. `fileNameFilter` or `paths` must be specified. Requires replacement if changed.
+- `files_can_change` (Boolean) Etleap can check whether files that were already processed have changed. If the file has changed, then Etleap fetches the new file and removes the old file's data in the destination and adds the changed data.
This can only be enabled when `newFileBehavior` is set to `APPEND`. Defaults to `false`. Requires replacement if changed.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `low_watermark` (String) Timestamp of the earliest modified file that should be processed by the pipeline. Only the files modified after this timestamp will be processed. Format of the timestamp: 'yyyy-MM-dd'. Requires replacement if changed.
+- `new_file_behavior` (String) Specifies whether new files update, add to or replace existing files. See the documentation for more details. Requires replacement if changed. ; Not Null; must be one of ["UPDATE", "APPEND", "REPLACE"]
+- `paths` (List of String) File or folder paths for the files to be extracted from the source. In the case when `fileNameFilter` is specified exactly one folder path must be given here. Requires replacement if changed. ; Not Null
+- `triggered_by_event` (Boolean) Whether this source should be triggered by a `Batch Added` event (`true`) or Etleap should inspect the source to find new files to process (`false`). Defaults to `false`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["S3_INPUT"]
+
+
+
+### Nested Schema for `source.s3_legacy`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `file_name_filter` (String) Regular expression matching the names of the files to be processed by this pipeline. `fileNameFilter` or `paths` must be specified. Requires replacement if changed.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `low_watermark` (String) Timestamp of the earliest modified file that should be processed by the pipeline. Only the files modified after this timestamp will be processed. Format of the timestamp: 'yyyy-MM-dd'. Requires replacement if changed.
+- `new_file_behavior` (String) Specifies whether new files update, add to or replace existing files. See the documentation for more details. Requires replacement if changed. ; Not Null; must be one of ["UPDATE", "APPEND", "REPLACE"]
+- `paths` (List of String) File or folder paths for the files to be extracted from the source. In the case when `fileNameFilter` is specified exactly one folder path must be given here. Requires replacement if changed. ; Not Null
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["S3_LEGACY"]
+
+
+
+### Nested Schema for `source.salesforce`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The Salesforce table. Spelled capitalized without spaces, unless it is a custom table like `My_Table__c`. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["SALESFORCE"]
+
+
+
+### Nested Schema for `source.salesforce_marketing_cloud`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The Salesforce Marketing Cloud entity. Example Values: [Bounce Event, Campaign, Click Event, Content Area, Data Extension, Data Extension Object, Email, Folders, List Subscriber, Lists, Open Event, Send, Sent Event, Subscribers, Unsub Event]. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["SALESFORCE_MARKETING_CLOUD"]
+
+
+
+### Nested Schema for `source.sap_hana`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source. Requires replacement if changed.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array. Requires replacement if changed. ; Not Null
+- `schema` (String) Name of the schema in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used. Requires replacement if changed.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both. Requires replacement if changed.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["SAP_HANA"]
+
+
+
+### Nested Schema for `source.sap_hana_sharded`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source. Requires replacement if changed.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array. Requires replacement if changed. ; Not Null
+- `schema` (String) Name of the schema in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used. Requires replacement if changed.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both. Requires replacement if changed.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["SAP_HANA_SHARDED"]
+
+
+
+### Nested Schema for `source.seismic`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The Seismic entity. Example values: [Livesend Links, Livesend Link Contents, Livesend Link Members, Livesend Page Views, Users, User Activity]. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["SEISMIC"]
+
+
+
+### Nested Schema for `source.sftp`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `file_name_filter` (String) Regular expression matching the names of the files to be processed by this pipeline. `fileNameFilter` or `paths` must be specified. Requires replacement if changed.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `low_watermark` (String) Timestamp of the earliest modified file that should be processed by the pipeline. Only the files modified after this timestamp will be processed. Format of the timestamp: 'yyyy-MM-dd'. Requires replacement if changed.
+- `new_file_behavior` (String) Specifies whether new files update, add to or replace existing files. See the documentation for more details. Requires replacement if changed. ; Not Null; must be one of ["UPDATE", "APPEND", "REPLACE"]
+- `paths` (List of String) File or folder paths for the files to be extracted from the source. In the case when `fileNameFilter` is specified exactly one folder path must be given here. Requires replacement if changed. ; Not Null
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["SFTP"]
+
+
+
+### Nested Schema for `source.shopify`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The Shopify entity. Spelled with spaces and only first word capitalized. Nested JSON objects are selected by appending the field name. For example, `Orders fulfillments line items` has the lineItems field from the `Order fulfillments` entity. Start creating a pipeline in the Etleap UI for the full list of entities. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["SHOPIFY"]
+
+
+
+### Nested Schema for `source.skyward`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The Skyward entity. Spelled capitalized with spaces. Example Values: [Academic Sessions, Categories, Classes, Courses, Demographics, Enrollments, Grading Periods, Line Items, Orgs, Results, Schools, Students, Teachers, Terms, Users]. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["SKYWARD"]
+
+
+
+### Nested Schema for `source.snapchat_ads`
+
+Optional:
+
+- `additional_metrics` (List of String) Specify the report `additional metrics` if and only if the entity is 'ad_account_report_hourly' or 'ad_account_report_daily'. Example values: [android_installs, attachment_avg_view_time_millis, attachment_frequency]. Requires replacement if changed.
+- `breakdown` (String) Specify the report `breakdown` if and only if the entity is 'ad_account_report_hourly' or 'ad_account_report_daily'. Example values: [ad, adsquad, campaign]. Requires replacement if changed.
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The Snapchat Ads entity. Example values: [ad, adaccount, ad_account_report_hourly, ad_account_report_daily]. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `metrics` (List of String) Specify the report `metrics` if and only if the entity is 'ad_account_report_hourly' or 'ad_account_report_daily'. Example values: [impressions, swipes, screen_time_millis]. Requires replacement if changed.
+- `report_dimension` (List of String) Specify the report `dimension` groups if and only if the entity is 'ad_account_report_hourly' or 'ad_account_report_daily'. Example values: [country, region, gender]. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["SNAPCHAT_ADS"]
+
+
+
+### Nested Schema for `source.snowflake`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source. Requires replacement if changed.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array. Requires replacement if changed. ; Not Null
+- `schema` (String) Name of the schema in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used. Requires replacement if changed.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both. Requires replacement if changed.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["ACTIVE_CAMPAIGN", "BIGQUERY", "BING_ADS", "BLACKLINE", "CRITEO", "DB2", "DB2_SHARDED", "DELTA_LAKE", "ELASTICSEARCH", "ELLUMINATE", "ELOQUA", "FACEBOOK_ADS", "FIFTEEN_FIVE", "FRESHWORKS", "FTP", "GONG", "GOOGLE_ANALYTICS", "GOOGLE_ANALYTICS_GA4", "GOOGLE_CLOUD_STORAGE", "GOOGLE_ADS", "GOOGLE_SHEETS", "HUBSPOT", "INTERCOM", "IMPACT_RADIUS", "JIRA", "JIRA_ALIGN", "KAFKA", "KUSTOMER", "LDAP", "LDAP_VIRTUAL_LIST_VIEW", "LINKED_IN_ADS", "MARKETO", "MIXPANEL", "MONGODB", "MYSQL", "MYSQL_SHARDED", "NETSUITE", "NETSUITE_V2", "ORACLE", "ORACLE_SHARDED", "OUTREACH", "OUTLOOK", "PINTEREST_ADS", "POSTGRES", "POSTGRES_SHARDED", "QUORA_ADS", "RAVE_MEDIDATA", "RECURLY", "REDSHIFT", "REDSHIFT_SHARDED", "S3_LEGACY", "S3_INPUT", "S3_DATA_LAKE", "SALESFORCE_MARKETING_CLOUD", "SAP_HANA", "SAP_HANA_SHARDED", "SEISMIC", "SHOPIFY", "SKYWARD", "SALESFORCE", "SFTP", "SQL_SERVER", "SQL_SERVER_SHARDED", "STREAMING", "SNOWFLAKE", "SNOWFLAKE_SHARDED", "SQUARE", "SNAPCHAT_ADS", "STRIPE", "SUMTOTAL", "THE_TRADE_DESK", "TIK_TOK_ADS", "TWILIO", "TWITTER_ADS", "USER_DEFINED_API", "USERVOICE", "VEEVA", "VERIZON_MEDIA_DSP", "WORKDAY_REPORT", "WORKFRONT", "ZENDESK", "ZOOM_PHONE", "ZUORA"]
+
+
+
+### Nested Schema for `source.snowflake_sharded`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source. Requires replacement if changed.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array. Requires replacement if changed. ; Not Null
+- `schema` (String) Name of the schema in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used. Requires replacement if changed.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both. Requires replacement if changed.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["SNOWFLAKE_SHARDED"]
+
+
+
+### Nested Schema for `source.sql_server`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source. Requires replacement if changed.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array. Requires replacement if changed. ; Not Null
+- `schema` (String) Name of the schema in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used. Requires replacement if changed.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both. Requires replacement if changed.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["SQL_SERVER"]
+
+
+
+### Nested Schema for `source.sql_server_sharded`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the source. Requires replacement if changed.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `primary_key_columns` (List of String) Columns that make up the primary key of the source. The specified order of columns matters for composite primary keys.
For source tables that do not have primary keys please specify an empty array.
For sharded sources include `shard_id` as first primary key column.
The **default value** is an empty array. Requires replacement if changed. ; Not Null
+- `schema` (String) Name of the schema in the source from which the data is to be extracted. If not specified, the source connection schema or the default schema for connection type will be used. Requires replacement if changed.
+- `table` (String) Name of the table to be extracted from the source. Either `table` or `tableNameFilter` must be specified, but not both. Requires replacement if changed.
+- `table_name_filter` (String) Regular expression matching all partitions of a table. Partitions must have the same table schema. Either `tableNameFilter` or `table` must be specified, but not both. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["SQL_SERVER_SHARDED"]
+
+
+
+### Nested Schema for `source.square`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The Square entity. Example values: [Catalog, Customers, Loyalty Accounts, Loyalty Events, Loyalty Rewards, Orders, Refunds]. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["SQUARE"]
+
+
+
+### Nested Schema for `source.streaming`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `file_name_filter` (String) Regular expression matching the names of the files to be processed by this pipeline. `fileNameFilter` or `paths` must be specified. Requires replacement if changed.
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `low_watermark` (String) Timestamp of the earliest modified file that should be processed by the pipeline. Only the files modified after this timestamp will be processed. Format of the timestamp: 'yyyy-MM-dd'. Requires replacement if changed.
+- `new_file_behavior` (String) Specifies whether new files update, add to or replace existing files. See the documentation for more details. Requires replacement if changed. ; Not Null; must be one of ["UPDATE", "APPEND", "REPLACE"]
+- `paths` (List of String) File or folder paths for the files to be extracted from the source. In the case when `fileNameFilter` is specified exactly one folder path must be given here. Requires replacement if changed. ; Not Null
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["STREAMING"]
+
+
+
+### Nested Schema for `source.stripe`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The Stripe entity. Example values: [Subscriptions, Invoice, InvoiceItems, Events]. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["STRIPE"]
+
+
+
+### Nested Schema for `source.sumtotal`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The SumTotal entity. Example values: [Activities, Audiences, Competencies, Domains, Grades, Jobs, Organizations, Skills, Social, Topics, User Activities, User Activities Progress, User Courses, Users]. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["SUMTOTAL"]
+
+
+
+### Nested Schema for `source.the_trade_desk`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["THE_TRADE_DESK"]
+
+
+
+### Nested Schema for `source.tik_tok_ads`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `data_level` (String) Specify the report `data level` if and only if the entity is 'REPORT'. Example values: [AUCTION_AD, AUCTION_CAMPAIGN, RESERVATION_AD]. Requires replacement if changed.
+- `dimensions` (List of String) Specify the report `dimension` groups if and only if the entity is 'REPORT'. Example values: [start_time_day, start_time_hour, campaign_id]. Requires replacement if changed.
+- `entity` (String) The TikTok Ads resource. Example values: [AD, ADGROUP, ADVERTISER, CAMPAIGN and REPORT]. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `metrics` (List of String) Specify the report `metrics` if and only if the entity is 'REPORT'. Example values: [ad_name, clicks, conversion]. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["TIK_TOK_ADS"]
+
+
+
+### Nested Schema for `source.twilio`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The Twilio entity. Example values: [Calls, Calls summary, Messages, Usage records]. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["TWILIO"]
+
+
+
+### Nested Schema for `source.twitter_ads`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The Twitter entity. Example values: [Account, Campaign, Funding Instrument, Line Item, Media Creative, Promoted Tweet, Followers, Tweets Likes, Tweets Quotes, Retweets, Recent Mentions,Tweets, Account Report, Campaign Report, Funding Instrument Report, Line Item Report, Media Creative Report, Promoted Tweet Report]. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["TWITTER_ADS"]
+
+
+
+### Nested Schema for `source.user_defined_api`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The User-Defined API entity. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["USER_DEFINED_API"]
+
+
+
+### Nested Schema for `source.uservoice`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The UserVoice entity. Example values: [Category, Comment, Feature, Feature Status, Feedback Record, Forum, Forum Invitation, Internal Status, Label, NPS Rating, Note, Permission, Product Area, Score, Segment, Segmented Values, Status, Status Updates, Suggestion, Suggestion Activity Entry, Supporter, Supporter Message, Team, User, VSTS Work Item]. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["USERVOICE"]
+
+
+
+### Nested Schema for `source.veeva`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The Veeva Entity name. Example values: [APQR, APQR Item, Action, Activity, Admin Link, Admin Section, Admin Section Controller Code, Answer Library Design, Application Context Selector, Application License Model, Application License Model Field, Application Manifest, Application Provisioner, Application Role]. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["VEEVA"]
+
+
+
+### Nested Schema for `source.verizon_media_dsp`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `dimensions` (List of String) Requires replacement if changed. ; Not Null
+- `entity` (String) Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `metrics` (List of String) Requires replacement if changed. ; Not Null
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["VERIZON_MEDIA_DSP"]
+
+
+
+### Nested Schema for `source.workday_report`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The Workday Report name. Spelled the same as Workday UI but all spaces are replaced with underscores. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["WORKDAY_REPORT"]
+
+
+
+### Nested Schema for `source.workfront`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The Workfront entity. Spelled capitalized without spaces. For the full list, start creating a pipeline in the Etleap UI. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["WORKFRONT"]
+
+
+
+### Nested Schema for `source.zendesk`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The Zendesk entity. Example values: [Group Memberships, Groups, Macros, Organizations, Satisfaction Ratings, SLA Policies, Tags, Ticket Audits, Ticket Comments, Ticket Fields, Ticket Forms, Tickets, Ticket Metrics, Users]. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["ZENDESK"]
+
+
+
+### Nested Schema for `source.zoom_phone`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["ZOOM_PHONE"]
+
+
+
+### Nested Schema for `source.zuora`
+
+Optional:
+
+- `connection_id` (String) The universally unique identifier for the source. Requires replacement if changed. ; Not Null
+- `entity` (String) The Zuora entity. Spelled capitalized with spaces. For the full list, start creating a pipeline in the Etleap UI. Requires replacement if changed. ; Not Null
+- `latency_threshold` (Number) Notify if we can't extract for `x` hours. Setting it to `null` disables the notification. Defaults to `null`. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; Not Null; must be one of ["ZUORA"]
+
+
+
+
+### Nested Schema for `parsing_error_settings`
+
+Optional:
+
+- `action` (String) Whether Etleap should STOP the pipeline or NOTIFY once the `threshold` is reached. Not Null; must be one of ["STOP", "NOTIFY"]
+- `threshold` (Number) The parsing error threshold, in percentage points, for the `action` to be triggered. Not Null
+
+
+
+### Nested Schema for `script`
+
+Optional:
+
+- `legacy_script` (Attributes) To be used only for copying a script exactly. Requires replacement if changed. (see [below for nested schema](#nestedatt--script--legacy_script))
+- `script` (Attributes) Specifies a script based on an array of transforms. Not all transforms available in the Wrangler are supported yet for this script format. Requires replacement if changed. (see [below for nested schema](#nestedatt--script--script))
+
+
+### Nested Schema for `script.legacy_script`
+
+Required:
+
+- `legacy_script` (String) The serialization of a script. Not meant to be human-readable. Requires replacement if changed.
+
+
+
+### Nested Schema for `script.script`
+
+Required:
+
+- `transforms` (Attributes List) An array consisting of a single "To Rows" transform followed by any number of other transforms. Requires replacement if changed. (see [below for nested schema](#nestedatt--script--script--transforms))
+
+Optional:
+
+- `charset` (String) Defaults to UTF-8. Requires replacement if changed.
+- `inferred_column_types` (Attributes Map) Maps output column names to types inferred from data. Requires replacement if changed. (see [below for nested schema](#nestedatt--script--script--inferred_column_types))
+
+
+### Nested Schema for `script.script.transforms`
+
+Optional:
+
+- `transform_add_file_path` (Attributes) Add the file path. Creates a column containing the path where each row was extracted from.
The new column is called `file_path`. Requires replacement if changed. (see [below for nested schema](#nestedatt--script--script--transforms--transform_add_file_path))
+- `transform_extract_json_fields` (Attributes) Flattens a JSON object into columns. Requires replacement if changed. (see [below for nested schema](#nestedatt--script--script--transforms--transform_extract_json_fields))
+- `transform_parquet_to_rows` (Attributes) Parses Parquet files to rows of JSON objects. Requires replacement if changed. (see [below for nested schema](#nestedatt--script--script--transforms--transform_parquet_to_rows))
+- `transform_parse_by_regex` (Attributes) Parse a column using a regular expression. Use regular expression capture groups to extract matched content into new columns.
The new parsed columns are called `parse`, `parse1` etc. Requires replacement if changed. (see [below for nested schema](#nestedatt--script--script--transforms--transform_parse_by_regex))
+- `transform_rename_columns` (Attributes) Rename exisiting columns. Requires replacement if changed. (see [below for nested schema](#nestedatt--script--script--transforms--transform_rename_columns))
+
+
+### Nested Schema for `script.script.transforms.transform_rename_columns`
+
+Required:
+
+- `type` (String) Requires replacement if changed. ; must be one of ["ADD_FILE_PATH"]
+
+
+
+### Nested Schema for `script.script.transforms.transform_rename_columns`
+
+Required:
+
+- `column` (String) The input column containing the JSON to flatten. Requires replacement if changed.
+- `discover_new_keys` (Boolean) If enabled, Etleap will discover new JSON keys at runtime and add these to the script. See the documentation for more details: https://support.etleap.com/hc/en-us/articles/360006296714-Auto-Discovery-of-JSON-Keys. Requires replacement if changed.
+- `keys` (Attributes List) Maps keys to extract from the JSON object to their types. Requires replacement if changed. (see [below for nested schema](#nestedatt--script--script--transforms--transform_rename_columns--keys))
+- `type` (String) Requires replacement if changed. ; must be one of ["FLATTEN_JSON_OBJECT"]
+
+Optional:
+
+- `prefix` (String) If specified, Etleap will prepend this value to column names produced by this transform. This can be useful to add context for column names and avoid collisions between column names of keys extracted from different objects. Requires replacement if changed.
+
+
+### Nested Schema for `script.script.transforms.transform_rename_columns.keys`
+
+Required:
+
+- `name` (String) Requires replacement if changed.
+- `type` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--script--script--transforms--transform_rename_columns--keys--type))
+
+
+### Nested Schema for `script.script.transforms.transform_rename_columns.keys.type`
+
+Optional:
+
+- `one` (String) Requires replacement if changed. ; must be one of ["AUTO", "BIGINT", "BOOLEAN", "DATE", "DATETIME", "DOUBLE", "STRING", "JSON_OBJECT"]
+- `type_decimal` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--script--script--transforms--transform_rename_columns--keys--type--type_decimal))
+- `type_string_with_max_length` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--script--script--transforms--transform_rename_columns--keys--type--type_string_with_max_length))
+
+
+### Nested Schema for `script.script.transforms.transform_rename_columns.keys.type.type_string_with_max_length`
+
+Required:
+
+- `precision` (Number) Requires replacement if changed.
+- `scale` (Number) Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; must be one of ["DECIMAL"]
+
+
+
+### Nested Schema for `script.script.transforms.transform_rename_columns.keys.type.type_string_with_max_length`
+
+Required:
+
+- `length` (Number) Maximum string length in bytes. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; must be one of ["STRING"]
+
+
+
+
+
+
+### Nested Schema for `script.script.transforms.transform_rename_columns`
+
+Required:
+
+- `type` (String) Requires replacement if changed. ; must be one of ["PARQUET_TO_ROWS"]
+
+
+
+### Nested Schema for `script.script.transforms.transform_rename_columns`
+
+Required:
+
+- `column` (String) The input column that should be parsed by regex. Requires replacement if changed.
+- `regex` (String) The regular expression. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; must be one of ["PARSE_BY_REGEX"]
+
+
+
+### Nested Schema for `script.script.transforms.transform_rename_columns`
+
+Required:
+
+- `columns` (Attributes List) Maps old column names to the new column names. Requires replacement if changed. (see [below for nested schema](#nestedatt--script--script--transforms--transform_rename_columns--columns))
+- `type` (String) Requires replacement if changed. ; must be one of ["RENAME_COLUMNS"]
+
+
+### Nested Schema for `script.script.transforms.transform_rename_columns.columns`
+
+Required:
+
+- `column` (String) Old column name. Requires replacement if changed.
+- `name` (String) New column name. Name must be unique in column list. Requires replacement if changed.
+
+
+
+
+
+### Nested Schema for `script.script.inferred_column_types`
+
+Optional:
+
+- `type_1` (String) Requires replacement if changed. ; must be one of ["AUTO", "BIGINT", "BOOLEAN", "DATE", "DATETIME", "DOUBLE", "STRING", "JSON_OBJECT"]
+- `type_decimal` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--script--script--inferred_column_types--type_decimal))
+- `type_string_with_max_length` (Attributes) Requires replacement if changed. (see [below for nested schema](#nestedatt--script--script--inferred_column_types--type_string_with_max_length))
+
+
+### Nested Schema for `script.script.inferred_column_types.type_string_with_max_length`
+
+Required:
+
+- `precision` (Number) Requires replacement if changed.
+- `scale` (Number) Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; must be one of ["DECIMAL"]
+
+
+
+### Nested Schema for `script.script.inferred_column_types.type_string_with_max_length`
+
+Required:
+
+- `length` (Number) Maximum string length in bytes. Requires replacement if changed.
+- `type` (String) Requires replacement if changed. ; must be one of ["STRING"]
+
+
+
+
+
+
+### Nested Schema for `destinations`
+
+Read-Only:
+
+- `current_version` (Number) The version of the pipeline that is currently writing to the output table.
+- `destination` (Attributes) (see [below for nested schema](#nestedatt--destinations--destination))
+- `parsing_errors` (Attributes) Parsing errors that occur during the transformation of the pipeline. (see [below for nested schema](#nestedatt--destinations--parsing_errors))
+- `refresh_version` (Number) The version of the pipeline that is currently writing to the temporary refresh table. Only specified if there's currently a refresh in progress.
+- `retention_data` (Attributes) Etleap can remove old rows from your destination. This is a summary of the data retention. (see [below for nested schema](#nestedatt--destinations--retention_data))
+- `schema_change_activity` (Attributes List) Array of schema change objects. (see [below for nested schema](#nestedatt--destinations--schema_change_activity))
+
+
+### Nested Schema for `destinations.destination`
+
+Read-Only:
+
+- `delta_lake` (Attributes) (see [below for nested schema](#nestedatt--destinations--destination--delta_lake))
+- `redshift` (Attributes) (see [below for nested schema](#nestedatt--destinations--destination--redshift))
+- `s3_data_lake` (Attributes) (see [below for nested schema](#nestedatt--destinations--destination--s3_data_lake))
+- `snowflake` (Attributes) (see [below for nested schema](#nestedatt--destinations--destination--snowflake))
+
+
+### Nested Schema for `destinations.destination.delta_lake`
+
+Read-Only:
+
+- `automatic_schema_changes` (Boolean) Whether schema changes detected during transformation should be handled automatically or not. Defaults to `true`.
+- `connection_id` (String) The universally unique identifier of the destination connection.
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the destination.
+- `pre10_dot2_runtime_support` (Boolean) This setting disables column mapping on the tables created by this pipeline.
+
+When enabled, this pipeline will create Delta Lake tables that can be read by Databricks clusters with runtime versions before 10.2.
+
+However, without column mapping, native schema changes are not supported and will cause the table's underlying Parquet files to be rewritten, which can be slow. Schema changes will also not preserve column constraints such as `NOT NULL` on the destination tables.
+Default: false
+- `primary_key` (List of String) The destination column names that constitute the primary key.
If the pipline has a sharded source include a column that specifies the shard identifier.
+- `retain_history` (Boolean) If the destination table should retain the history of the source. More information here: https://support.etleap.com/hc/en-us/articles/360008168574. Defaults to `false`.
+- `schema` (String) The schema in the destination that the tables will be created in.
+- `table` (String)
+- `type` (String)
+> Delta Lake connections are currently in Beta which means that they are subject to non-backwards-compatible and breaking changes.
+must be one of ["DELTA_LAKE"]
+- `wait_for_quality_check` (Boolean) If set to `true`, a `Transformation Complete` event is published once a transformation completes, and the pipeline waits for a `Quality Check Complete` event before loading to the destination. Defaults to `false`.
+
+
+
+### Nested Schema for `destinations.destination.redshift`
+
+Read-Only:
+
+- `automatic_schema_changes` (Boolean) Whether schema changes detected during transformation should be handled automatically or not. Defaults to `true`.
+- `compress_columns` (Boolean) Whether columns should be compressed. Defaults to `true`.
+- `connection_id` (String) The universally unique identifier of the destination connection.
+- `distribution_style` (Attributes) (see [below for nested schema](#nestedatt--destinations--destination--redshift--distribution_style))
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the destination.
+- `primary_key` (List of String) The destination column names that constitute the primary key.
If the pipline has a sharded source include a column that specifies the shard identifier.
+- `retain_history` (Boolean) If the destination table should retain the history of the source. More information here: https://support.etleap.com/hc/en-us/articles/360008168574. Defaults to `false`.
+- `schema` (String) The schema in the destination that the tables will be created in. If this is not specified or set to `null` then the schema specified on the connection is used.
+- `sort_columns` (List of String) The sort columns to use.
+- `table` (String)
+- `truncate_strings` (Boolean) Truncate strings to 64K characters, the max allowed by Redshift in a single column. Defaults to `false`.
+- `type` (String) must be one of ["REDSHIFT"]
+- `wait_for_quality_check` (Boolean) If set to `true`, a `Transformation Complete` event is published once a transformation completes, and the pipeline waits for a `Quality Check Complete` event before loading to the destination. Defaults to `false`.
+
+
+### Nested Schema for `destinations.destination.redshift.wait_for_quality_check`
+
+Read-Only:
+
+- `distribution_style_key` (Attributes) (see [below for nested schema](#nestedatt--destinations--destination--redshift--wait_for_quality_check--distribution_style_key))
+- `one` (String) must be one of ["ALL", "AUTO", "EVEN"]
+
+
+### Nested Schema for `destinations.destination.redshift.wait_for_quality_check.distribution_style_key`
+
+Read-Only:
+
+- `column` (String)
+- `type` (String) must be one of ["KEY"]
+
+
+
+
+
+### Nested Schema for `destinations.destination.s3_data_lake`
+
+Read-Only:
+
+- `automatic_schema_changes` (Boolean) Whether schema changes detected during transformation should be handled automatically or not. Defaults to `true`.
+- `connection_id` (String) The universally unique identifier of the destination connection.
+- `generate_snapshots` (Boolean) Defaults to 'false'.
+- `output_format` (String) Format for output files. Defaults to `PARQUET`. For Glue-enabled destinations, only `PARQUET` is a valid format. must be one of ["PARQUET", "CSV"]; Default: "PARQUET"
+- `path_prefix` (String) The S3 path prefix to use for this pipeline. The data key in the destination bucket starts with `{connection.pathPrefix}/{pathPrefix}/v{version.pipeline}/`.
+- `primary_key` (List of String) The destination column names that constitute the primary key.
If the pipline has a sharded source include a column that specifies the shard identifier.
+- `type` (String) must be one of ["S3_DATA_LAKE"]
+- `wait_for_quality_check` (Boolean) If set to `true`, a `Transformation Complete` event is published once a transformation completes, and the pipeline waits for a `Quality Check Complete` event before loading to the destination. Defaults to `false`.
+
+
+
+### Nested Schema for `destinations.destination.snowflake`
+
+Read-Only:
+
+- `automatic_schema_changes` (Boolean) Whether schema changes detected during transformation should be handled automatically or not. Defaults to `true`.
+- `clustering_keys` (List of String) Keys to cluster the table on. If unspecified, the table will use "automatic clustering".
+- `connection_id` (String) The universally unique identifier of the destination connection.
+- `last_updated_column` (String) Name of a column that indicates the time the record was updated at the destination.
+- `primary_key` (List of String) The destination column names that constitute the primary key.
If the pipline has a sharded source include a column that specifies the shard identifier.
+- `retain_history` (Boolean) If the destination table should retain the history of the source. More information here: https://support.etleap.com/hc/en-us/articles/360008168574. Defaults to `false`.
+- `schema` (String) The schema in the destination that the tables will be created in. If this is not specified or set to `null` then the schema specified on the connection is used.
+- `table` (String)
+- `type` (String) must be one of ["SNOWFLAKE"]
+- `wait_for_quality_check` (Boolean) If set to `true`, a `Transformation Complete` event is published once a transformation completes, and the pipeline waits for a `Quality Check Complete` event before loading to the destination. Defaults to `false`.
+
+
+
+
+### Nested Schema for `destinations.parsing_errors`
+
+Read-Only:
+
+- `operation_errors_by_operation` (Attributes List) (see [below for nested schema](#nestedatt--destinations--parsing_errors--operation_errors_by_operation))
+- `parsing_errors_per_day` (Attributes List) (see [below for nested schema](#nestedatt--destinations--parsing_errors--parsing_errors_per_day))
+- `type_errors_by_column` (Attributes List) (see [below for nested schema](#nestedatt--destinations--parsing_errors--type_errors_by_column))
+
+
+### Nested Schema for `destinations.parsing_errors.operation_errors_by_operation`
+
+Read-Only:
+
+- `operation_description` (String)
+- `operation_index` (Number) Index of step in the script of this pipeline that caused this error.
+- `row_count` (Number)
+
+
+
+### Nested Schema for `destinations.parsing_errors.parsing_errors_per_day`
+
+Read-Only:
+
+- `day` (String) Format of the timestamp: 'yyyy-MM-dd'
+- `error_type` (String) must be one of ["TYPE", "OPERATION"]
+- `row_count` (Number)
+
+
+
+### Nested Schema for `destinations.parsing_errors.type_errors_by_column`
+
+Read-Only:
+
+- `column_name` (String)
+- `row_count` (Number)
+- `type` (String)
+
+
+
+
+### Nested Schema for `destinations.retention_data`
+
+Read-Only:
+
+- `retention_by_day` (Attributes) (see [below for nested schema](#nestedatt--destinations--retention_data--retention_by_day))
+- `retention_policy` (Attributes) Policy for the automatic deletion of rows in the destination. (see [below for nested schema](#nestedatt--destinations--retention_data--retention_policy))
+
+
+### Nested Schema for `destinations.retention_data.retention_by_day`
+
+Read-Only:
+
+- `rows_currently_in_warehouse` (Attributes List) (see [below for nested schema](#nestedatt--destinations--retention_data--retention_by_day--rows_currently_in_warehouse))
+- `rows_removed_from_warehouse` (Attributes List) (see [below for nested schema](#nestedatt--destinations--retention_data--retention_by_day--rows_removed_from_warehouse))
+
+
+### Nested Schema for `destinations.retention_data.retention_by_day.rows_removed_from_warehouse`
+
+Read-Only:
+
+- `date` (String) Format of the timestamp: 'yyyy-MM-dd'
+- `row_count` (Number)
+
+
+
+### Nested Schema for `destinations.retention_data.retention_by_day.rows_removed_from_warehouse`
+
+Read-Only:
+
+- `date` (String) Format of the timestamp: 'yyyy-MM-dd'
+- `row_count` (Number)
+
+
+
+
+### Nested Schema for `destinations.retention_data.retention_policy`
+
+Read-Only:
+
+- `column` (String) Name of the column that is used to calculate the interval. Must be a `date` or a `datetime` column.
+- `period` (Number) Number of days before a row gets removed.
+
+
+
+
+### Nested Schema for `destinations.schema_change_activity`
+
+Read-Only:
+
+- `date_time` (String) The date and time of the schema change. `null` if schema change has not yet been applied.
+- `schema_change_description` (String)
+
+
+
+
+### Nested Schema for `owner`
+
+Read-Only:
+
+- `email_address` (String)
+- `first_name` (String)
+- `id` (String)
+- `last_name` (String)
+
+
+
+### Nested Schema for `refresh_schedule`
+
+Read-Only:
+
+- `daily` (Attributes) (see [below for nested schema](#nestedatt--refresh_schedule--daily))
+- `hourly` (Attributes) (see [below for nested schema](#nestedatt--refresh_schedule--hourly))
+- `monthly` (Attributes) (see [below for nested schema](#nestedatt--refresh_schedule--monthly))
+- `never` (Attributes) (see [below for nested schema](#nestedatt--refresh_schedule--never))
+- `weekly` (Attributes) (see [below for nested schema](#nestedatt--refresh_schedule--weekly))
+
+
+### Nested Schema for `refresh_schedule.daily`
+
+Read-Only:
+
+- `hour_of_day` (Number) Hour of day this schedule should trigger at (in UTC).
+- `mode` (String) must be one of ["DAILY"]
+
+
+
+### Nested Schema for `refresh_schedule.hourly`
+
+Read-Only:
+
+- `mode` (String) must be one of ["HOURLY"]
+
+
+
+### Nested Schema for `refresh_schedule.monthly`
+
+Read-Only:
+
+- `day_of_month` (Number) Day of the month this schedule should trigger at (in UTC).
+- `hour_of_day` (Number) Hour of day this schedule should trigger at (in UTC).
+- `mode` (String) must be one of ["MONTHLY"]
+
+
+
+### Nested Schema for `refresh_schedule.never`
+
+Read-Only:
+
+- `mode` (String) must be one of ["NEVER"]
+
+
+
+### Nested Schema for `refresh_schedule.weekly`
+
+Read-Only:
+
+- `day_of_week` (Number) The day of the week this schedule should trigger at (in UTC).
+- `hour_of_day` (Number) Hour of day this schedule should trigger at (in UTC).
+- `mode` (String) must be one of ["WEEKLY"]
+
+
+
+
+### Nested Schema for `update_schedule`
+
+Read-Only:
+
+- `daily` (Attributes) The update schedule defines when Etleap should automatically check the source for new data. See Updates & Refreshes for more information. (see [below for nested schema](#nestedatt--update_schedule--daily))
+- `hourly` (Attributes) The update schedule defines when Etleap should automatically check the source for new data. See Updates & Refreshes for more information. (see [below for nested schema](#nestedatt--update_schedule--hourly))
+- `interval` (Attributes) Specify how long to wait after each extraction before polling for new data. When undefined, the pipeline will default to the schedule set on the source connection. (see [below for nested schema](#nestedatt--update_schedule--interval))
+- `monthly` (Attributes) The update schedule defines when Etleap should automatically check the source for new data. See Updates & Refreshes for more information. (see [below for nested schema](#nestedatt--update_schedule--monthly))
+- `weekly` (Attributes) The update schedule defines when Etleap should automatically check the source for new data. See Updates & Refreshes for more information. (see [below for nested schema](#nestedatt--update_schedule--weekly))
+
+
+### Nested Schema for `update_schedule.daily`
+
+Read-Only:
+
+- `hour_of_day` (Number) Hour of day the pipeline update should be started at (in UTC).
+- `mode` (String) must be one of ["DAILY"]
+
+
+
+### Nested Schema for `update_schedule.hourly`
+
+Read-Only:
+
+- `mode` (String) must be one of ["HOURLY"]
+
+
+
+### Nested Schema for `update_schedule.interval`
+
+Read-Only:
+
+- `interval_minutes` (Number) Time to wait before new data is pulled (in minutes).
+- `mode` (String) must be one of ["INTERVAL"]
+
+
+
+### Nested Schema for `update_schedule.monthly`
+
+Read-Only:
+
+- `day_of_month` (Number)
+- `hour_of_day` (Number) Hour of day the pipeline update should be started at (in UTC).
+- `mode` (String) must be one of ["MONTHLY"]
+
+
+
+### Nested Schema for `update_schedule.weekly`
+
+Read-Only:
+
+- `day_of_week` (Number)
+- `hour_of_day` (Number) Hour of day the pipeline update should be started at (in UTC).
+- `mode` (String) must be one of ["WEEKLY"]
+
+
diff --git a/docs/resources/team.md b/docs/resources/team.md
new file mode 100644
index 0000000..f801b13
--- /dev/null
+++ b/docs/resources/team.md
@@ -0,0 +1,46 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "etleap_team Resource - terraform-provider-etleap"
+subcategory: ""
+description: |-
+ Team Resource
+---
+
+# etleap_team (Resource)
+
+Team Resource
+
+## Example Usage
+
+```terraform
+resource "etleap_team" "my_team" {
+ description = "...my_description..."
+ name = "Arthur Hegmann"
+}
+```
+
+
+## Schema
+
+### Required
+
+- `description` (String) Requires replacement if changed.
+- `name` (String) Requires replacement if changed.
+
+### Read-Only
+
+- `create_date` (String) The date and time when then the team was created.
+- `id` (String) The unique identifier of the team.
+- `members` (Attributes List) (see [below for nested schema](#nestedatt--members))
+
+
+### Nested Schema for `members`
+
+Read-Only:
+
+- `email_address` (String)
+- `first_name` (String)
+- `id` (String)
+- `last_name` (String)
+
+
diff --git a/examples/provider/provider.tf b/examples/provider/provider.tf
index 2a36dc0..3abe7b2 100644
--- a/examples/provider/provider.tf
+++ b/examples/provider/provider.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
etleap = {
source = "etleap/etleap"
- version = "0.2.0"
+ version = "0.0.2"
}
}
}
diff --git a/go.mod b/go.mod
index efeb464..c9020c5 100644
--- a/go.mod
+++ b/go.mod
@@ -1,13 +1,12 @@
module github.com/etleap/terraform-provider-etleap
-go 1.18
+go 1.20
require (
github.com/cenkalti/backoff/v4 v4.2.0
github.com/ericlagergren/decimal v0.0.0-20221120152707-495c53812d05
github.com/hashicorp/terraform-plugin-framework v1.3.5
github.com/hashicorp/terraform-plugin-go v0.18.0
- github.com/spyzhov/ajson v0.9.0
)
require (
diff --git a/go.sum b/go.sum
index 853448b..3638012 100644
--- a/go.sum
+++ b/go.sum
@@ -171,8 +171,6 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
-github.com/spyzhov/ajson v0.9.0 h1:tF46gJGOenYVj+k9K1U1XpCxVWhmiyY5PsVCAs1+OJ0=
-github.com/spyzhov/ajson v0.9.0/go.mod h1:a6oSw0MMb7Z5aD2tPoPO+jq11ETKgXUr2XktHdT8Wt8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
diff --git a/internal/sdk/pkg/models/shared/authentication.go b/internal/sdk/pkg/models/shared/authentication.go
index 2f2f27b..06e1ff5 100644
--- a/internal/sdk/pkg/models/shared/authentication.go
+++ b/internal/sdk/pkg/models/shared/authentication.go
@@ -27,6 +27,7 @@ type Authentication struct {
func CreateAuthenticationBasic(basic BasicAuthentication) Authentication {
typ := AuthenticationTypeBasic
+
typStr := BasicAuthenticationType(typ)
basic.Type = &typStr
@@ -38,6 +39,7 @@ func CreateAuthenticationBasic(basic BasicAuthentication) Authentication {
func CreateAuthenticationBearer(bearer BearerAuthentication) Authentication {
typ := AuthenticationTypeBearer
+
typStr := BearerAuthenticationType(typ)
bearer.Type = &typStr
@@ -49,6 +51,7 @@ func CreateAuthenticationBearer(bearer BearerAuthentication) Authentication {
func CreateAuthenticationHeader(header HeaderAuthentication) Authentication {
typ := AuthenticationTypeHeader
+
typStr := HeaderAuthenticationType(typ)
header.Type = &typStr
diff --git a/internal/sdk/pkg/models/shared/connectiontypes.go b/internal/sdk/pkg/models/shared/connectiontypes.go
index 12908e8..bed7d0e 100644
--- a/internal/sdk/pkg/models/shared/connectiontypes.go
+++ b/internal/sdk/pkg/models/shared/connectiontypes.go
@@ -181,6 +181,7 @@ type ConnectionTypes struct {
func CreateConnectionTypesActiveCampaign(activeCampaign ConnectionActiveCampaign) ConnectionTypes {
typ := ConnectionTypesTypeActiveCampaign
+
typStr := ConnectionActiveCampaignType(typ)
activeCampaign.Type = typStr
@@ -192,6 +193,7 @@ func CreateConnectionTypesActiveCampaign(activeCampaign ConnectionActiveCampaign
func CreateConnectionTypesBigquery(bigquery ConnectionBigQuery) ConnectionTypes {
typ := ConnectionTypesTypeBigquery
+
typStr := ConnectionBigQueryType(typ)
bigquery.Type = typStr
@@ -203,6 +205,7 @@ func CreateConnectionTypesBigquery(bigquery ConnectionBigQuery) ConnectionTypes
func CreateConnectionTypesBingAds(bingAds ConnectionBing) ConnectionTypes {
typ := ConnectionTypesTypeBingAds
+
typStr := ConnectionBingType(typ)
bingAds.Type = typStr
@@ -214,6 +217,7 @@ func CreateConnectionTypesBingAds(bingAds ConnectionBing) ConnectionTypes {
func CreateConnectionTypesBlackline(blackline ConnectionBlackline) ConnectionTypes {
typ := ConnectionTypesTypeBlackline
+
typStr := ConnectionBlacklineType(typ)
blackline.Type = typStr
@@ -225,6 +229,7 @@ func CreateConnectionTypesBlackline(blackline ConnectionBlackline) ConnectionTyp
func CreateConnectionTypesCriteo(criteo ConnectionCriteo) ConnectionTypes {
typ := ConnectionTypesTypeCriteo
+
typStr := ConnectionCriteoType(typ)
criteo.Type = typStr
@@ -236,6 +241,7 @@ func CreateConnectionTypesCriteo(criteo ConnectionCriteo) ConnectionTypes {
func CreateConnectionTypesDb2(db2 ConnectionDb2) ConnectionTypes {
typ := ConnectionTypesTypeDb2
+
typStr := ConnectionDb2Type(typ)
db2.Type = typStr
@@ -247,6 +253,7 @@ func CreateConnectionTypesDb2(db2 ConnectionDb2) ConnectionTypes {
func CreateConnectionTypesDb2Sharded(db2Sharded ConnectionDb2Sharded) ConnectionTypes {
typ := ConnectionTypesTypeDb2Sharded
+
typStr := ConnectionDb2ShardedType(typ)
db2Sharded.Type = typStr
@@ -258,6 +265,7 @@ func CreateConnectionTypesDb2Sharded(db2Sharded ConnectionDb2Sharded) Connection
func CreateConnectionTypesDeltaLake(deltaLake ConnectionDeltaLake) ConnectionTypes {
typ := ConnectionTypesTypeDeltaLake
+
typStr := ConnectionDeltaLakeType(typ)
deltaLake.Type = typStr
@@ -269,6 +277,7 @@ func CreateConnectionTypesDeltaLake(deltaLake ConnectionDeltaLake) ConnectionTyp
func CreateConnectionTypesElasticsearch(elasticsearch ConnectionElasticSearch) ConnectionTypes {
typ := ConnectionTypesTypeElasticsearch
+
typStr := ConnectionElasticSearchType(typ)
elasticsearch.Type = typStr
@@ -280,6 +289,7 @@ func CreateConnectionTypesElasticsearch(elasticsearch ConnectionElasticSearch) C
func CreateConnectionTypesElluminate(elluminate ConnectionElluminate) ConnectionTypes {
typ := ConnectionTypesTypeElluminate
+
typStr := ConnectionElluminateType(typ)
elluminate.Type = typStr
@@ -291,6 +301,7 @@ func CreateConnectionTypesElluminate(elluminate ConnectionElluminate) Connection
func CreateConnectionTypesEloqua(eloqua ConnectionEloqua) ConnectionTypes {
typ := ConnectionTypesTypeEloqua
+
typStr := ConnectionEloquaType(typ)
eloqua.Type = typStr
@@ -302,6 +313,7 @@ func CreateConnectionTypesEloqua(eloqua ConnectionEloqua) ConnectionTypes {
func CreateConnectionTypesFacebookAds(facebookAds ConnectionFb) ConnectionTypes {
typ := ConnectionTypesTypeFacebookAds
+
typStr := ConnectionFbType(typ)
facebookAds.Type = typStr
@@ -313,6 +325,7 @@ func CreateConnectionTypesFacebookAds(facebookAds ConnectionFb) ConnectionTypes
func CreateConnectionTypesFifteenFive(fifteenFive ConnectionFifteenFive) ConnectionTypes {
typ := ConnectionTypesTypeFifteenFive
+
typStr := ConnectionFifteenFiveType(typ)
fifteenFive.Type = typStr
@@ -324,6 +337,7 @@ func CreateConnectionTypesFifteenFive(fifteenFive ConnectionFifteenFive) Connect
func CreateConnectionTypesFreshworks(freshworks ConnectionFreshworks) ConnectionTypes {
typ := ConnectionTypesTypeFreshworks
+
typStr := ConnectionFreshworksType(typ)
freshworks.Type = typStr
@@ -335,6 +349,7 @@ func CreateConnectionTypesFreshworks(freshworks ConnectionFreshworks) Connection
func CreateConnectionTypesFtp(ftp ConnectionFtp) ConnectionTypes {
typ := ConnectionTypesTypeFtp
+
typStr := ConnectionFtpType(typ)
ftp.Type = typStr
@@ -346,6 +361,7 @@ func CreateConnectionTypesFtp(ftp ConnectionFtp) ConnectionTypes {
func CreateConnectionTypesGong(gong ConnectionGong) ConnectionTypes {
typ := ConnectionTypesTypeGong
+
typStr := ConnectionGongType(typ)
gong.Type = typStr
@@ -357,6 +373,7 @@ func CreateConnectionTypesGong(gong ConnectionGong) ConnectionTypes {
func CreateConnectionTypesGoogleAds(googleAds ConnectionGoogleAds) ConnectionTypes {
typ := ConnectionTypesTypeGoogleAds
+
typStr := ConnectionGoogleAdsType(typ)
googleAds.Type = typStr
@@ -368,6 +385,7 @@ func CreateConnectionTypesGoogleAds(googleAds ConnectionGoogleAds) ConnectionTyp
func CreateConnectionTypesGoogleAnalytics(googleAnalytics ConnectionGa) ConnectionTypes {
typ := ConnectionTypesTypeGoogleAnalytics
+
typStr := ConnectionGaType(typ)
googleAnalytics.Type = typStr
@@ -379,6 +397,7 @@ func CreateConnectionTypesGoogleAnalytics(googleAnalytics ConnectionGa) Connecti
func CreateConnectionTypesGoogleAnalyticsGa4(googleAnalyticsGa4 ConnectionGoogleAnalyticsGa4) ConnectionTypes {
typ := ConnectionTypesTypeGoogleAnalyticsGa4
+
typStr := ConnectionGoogleAnalyticsGa4Type(typ)
googleAnalyticsGa4.Type = typStr
@@ -390,6 +409,7 @@ func CreateConnectionTypesGoogleAnalyticsGa4(googleAnalyticsGa4 ConnectionGoogle
func CreateConnectionTypesGoogleCloudStorage(googleCloudStorage ConnectionGoogleCloudStorage) ConnectionTypes {
typ := ConnectionTypesTypeGoogleCloudStorage
+
typStr := ConnectionGoogleCloudStorageType(typ)
googleCloudStorage.Type = typStr
@@ -401,6 +421,7 @@ func CreateConnectionTypesGoogleCloudStorage(googleCloudStorage ConnectionGoogle
func CreateConnectionTypesGoogleSheets(googleSheets ConnectionGoogleSheets) ConnectionTypes {
typ := ConnectionTypesTypeGoogleSheets
+
typStr := ConnectionGoogleSheetsType(typ)
googleSheets.Type = typStr
@@ -412,6 +433,7 @@ func CreateConnectionTypesGoogleSheets(googleSheets ConnectionGoogleSheets) Conn
func CreateConnectionTypesHubspot(hubspot ConnectionHubspot) ConnectionTypes {
typ := ConnectionTypesTypeHubspot
+
typStr := ConnectionHubspotType(typ)
hubspot.Type = typStr
@@ -423,6 +445,7 @@ func CreateConnectionTypesHubspot(hubspot ConnectionHubspot) ConnectionTypes {
func CreateConnectionTypesImpactRadius(impactRadius ConnectionImpactRadius) ConnectionTypes {
typ := ConnectionTypesTypeImpactRadius
+
typStr := ConnectionImpactRadiusType(typ)
impactRadius.Type = typStr
@@ -434,6 +457,7 @@ func CreateConnectionTypesImpactRadius(impactRadius ConnectionImpactRadius) Conn
func CreateConnectionTypesIntercom(intercom ConnectionIntercom) ConnectionTypes {
typ := ConnectionTypesTypeIntercom
+
typStr := ConnectionIntercomType(typ)
intercom.Type = typStr
@@ -445,6 +469,7 @@ func CreateConnectionTypesIntercom(intercom ConnectionIntercom) ConnectionTypes
func CreateConnectionTypesJiraAlign(jiraAlign ConnectionJiraAlign) ConnectionTypes {
typ := ConnectionTypesTypeJiraAlign
+
typStr := ConnectionJiraAlignType(typ)
jiraAlign.Type = typStr
@@ -456,6 +481,7 @@ func CreateConnectionTypesJiraAlign(jiraAlign ConnectionJiraAlign) ConnectionTyp
func CreateConnectionTypesKafka(kafka ConnectionKafka) ConnectionTypes {
typ := ConnectionTypesTypeKafka
+
typStr := ConnectionKafkaType(typ)
kafka.Type = typStr
@@ -467,6 +493,7 @@ func CreateConnectionTypesKafka(kafka ConnectionKafka) ConnectionTypes {
func CreateConnectionTypesKustomer(kustomer ConnectionKustomer) ConnectionTypes {
typ := ConnectionTypesTypeKustomer
+
typStr := ConnectionKustomerType(typ)
kustomer.Type = typStr
@@ -478,6 +505,7 @@ func CreateConnectionTypesKustomer(kustomer ConnectionKustomer) ConnectionTypes
func CreateConnectionTypesLdap(ldap ConnectionLdap) ConnectionTypes {
typ := ConnectionTypesTypeLdap
+
typStr := ConnectionLdapType(typ)
ldap.Type = typStr
@@ -489,6 +517,7 @@ func CreateConnectionTypesLdap(ldap ConnectionLdap) ConnectionTypes {
func CreateConnectionTypesLdapVirtualListView(ldapVirtualListView ConnectionLdapVirtualListView) ConnectionTypes {
typ := ConnectionTypesTypeLdapVirtualListView
+
typStr := ConnectionLdapVirtualListViewType(typ)
ldapVirtualListView.Type = typStr
@@ -500,6 +529,7 @@ func CreateConnectionTypesLdapVirtualListView(ldapVirtualListView ConnectionLdap
func CreateConnectionTypesLinkedInAds(linkedInAds ConnectionLinkedInAds) ConnectionTypes {
typ := ConnectionTypesTypeLinkedInAds
+
typStr := ConnectionLinkedInAdsType(typ)
linkedInAds.Type = typStr
@@ -511,6 +541,7 @@ func CreateConnectionTypesLinkedInAds(linkedInAds ConnectionLinkedInAds) Connect
func CreateConnectionTypesMarketo(marketo ConnectionMarketo) ConnectionTypes {
typ := ConnectionTypesTypeMarketo
+
typStr := ConnectionMarketoType(typ)
marketo.Type = typStr
@@ -522,6 +553,7 @@ func CreateConnectionTypesMarketo(marketo ConnectionMarketo) ConnectionTypes {
func CreateConnectionTypesMixpanel(mixpanel ConnectionMixpanel) ConnectionTypes {
typ := ConnectionTypesTypeMixpanel
+
typStr := ConnectionMixpanelType(typ)
mixpanel.Type = typStr
@@ -533,6 +565,7 @@ func CreateConnectionTypesMixpanel(mixpanel ConnectionMixpanel) ConnectionTypes
func CreateConnectionTypesMongodb(mongodb ConnectionMongo) ConnectionTypes {
typ := ConnectionTypesTypeMongodb
+
typStr := ConnectionMongoType(typ)
mongodb.Type = typStr
@@ -544,6 +577,7 @@ func CreateConnectionTypesMongodb(mongodb ConnectionMongo) ConnectionTypes {
func CreateConnectionTypesMysql(mysql ConnectionMysql) ConnectionTypes {
typ := ConnectionTypesTypeMysql
+
typStr := ConnectionMysqlType(typ)
mysql.Type = typStr
@@ -555,6 +589,7 @@ func CreateConnectionTypesMysql(mysql ConnectionMysql) ConnectionTypes {
func CreateConnectionTypesMysqlSharded(mysqlSharded ConnectionMysqlSharded) ConnectionTypes {
typ := ConnectionTypesTypeMysqlSharded
+
typStr := ConnectionMysqlShardedType(typ)
mysqlSharded.Type = typStr
@@ -566,6 +601,7 @@ func CreateConnectionTypesMysqlSharded(mysqlSharded ConnectionMysqlSharded) Conn
func CreateConnectionTypesNetsuite(netsuite ConnectionNetsuite) ConnectionTypes {
typ := ConnectionTypesTypeNetsuite
+
typStr := ConnectionNetsuiteType(typ)
netsuite.Type = typStr
@@ -577,6 +613,7 @@ func CreateConnectionTypesNetsuite(netsuite ConnectionNetsuite) ConnectionTypes
func CreateConnectionTypesNetsuiteV2(netsuiteV2 ConnectionNetsuiteV2) ConnectionTypes {
typ := ConnectionTypesTypeNetsuiteV2
+
typStr := ConnectionNetsuiteV2Type(typ)
netsuiteV2.Type = typStr
@@ -588,6 +625,7 @@ func CreateConnectionTypesNetsuiteV2(netsuiteV2 ConnectionNetsuiteV2) Connection
func CreateConnectionTypesOracle(oracle ConnectionOracle) ConnectionTypes {
typ := ConnectionTypesTypeOracle
+
typStr := ConnectionOracleType(typ)
oracle.Type = typStr
@@ -599,6 +637,7 @@ func CreateConnectionTypesOracle(oracle ConnectionOracle) ConnectionTypes {
func CreateConnectionTypesOracleSharded(oracleSharded ConnectionOracleSharded) ConnectionTypes {
typ := ConnectionTypesTypeOracleSharded
+
typStr := ConnectionOracleShardedType(typ)
oracleSharded.Type = typStr
@@ -610,6 +649,7 @@ func CreateConnectionTypesOracleSharded(oracleSharded ConnectionOracleSharded) C
func CreateConnectionTypesOutlook(outlook ConnectionOutlook) ConnectionTypes {
typ := ConnectionTypesTypeOutlook
+
typStr := ConnectionOutlookType(typ)
outlook.Type = typStr
@@ -621,6 +661,7 @@ func CreateConnectionTypesOutlook(outlook ConnectionOutlook) ConnectionTypes {
func CreateConnectionTypesOutreach(outreach ConnectionOutreach) ConnectionTypes {
typ := ConnectionTypesTypeOutreach
+
typStr := ConnectionOutreachType(typ)
outreach.Type = typStr
@@ -632,6 +673,7 @@ func CreateConnectionTypesOutreach(outreach ConnectionOutreach) ConnectionTypes
func CreateConnectionTypesPinterestAds(pinterestAds ConnectionPinterestAds) ConnectionTypes {
typ := ConnectionTypesTypePinterestAds
+
typStr := ConnectionPinterestAdsType(typ)
pinterestAds.Type = typStr
@@ -643,6 +685,7 @@ func CreateConnectionTypesPinterestAds(pinterestAds ConnectionPinterestAds) Conn
func CreateConnectionTypesPostgres(postgres ConnectionPostgres) ConnectionTypes {
typ := ConnectionTypesTypePostgres
+
typStr := ConnectionPostgresType(typ)
postgres.Type = typStr
@@ -654,6 +697,7 @@ func CreateConnectionTypesPostgres(postgres ConnectionPostgres) ConnectionTypes
func CreateConnectionTypesPostgresSharded(postgresSharded ConnectionPostgresSharded) ConnectionTypes {
typ := ConnectionTypesTypePostgresSharded
+
typStr := ConnectionPostgresShardedType(typ)
postgresSharded.Type = typStr
@@ -665,6 +709,7 @@ func CreateConnectionTypesPostgresSharded(postgresSharded ConnectionPostgresShar
func CreateConnectionTypesQuoraAds(quoraAds ConnectionQuora) ConnectionTypes {
typ := ConnectionTypesTypeQuoraAds
+
typStr := ConnectionQuoraType(typ)
quoraAds.Type = typStr
@@ -676,6 +721,7 @@ func CreateConnectionTypesQuoraAds(quoraAds ConnectionQuora) ConnectionTypes {
func CreateConnectionTypesRaveMedidata(raveMedidata ConnectionRaveMedidata) ConnectionTypes {
typ := ConnectionTypesTypeRaveMedidata
+
typStr := ConnectionRaveMedidataType(typ)
raveMedidata.Type = typStr
@@ -687,6 +733,7 @@ func CreateConnectionTypesRaveMedidata(raveMedidata ConnectionRaveMedidata) Conn
func CreateConnectionTypesRecurly(recurly ConnectionRecurly) ConnectionTypes {
typ := ConnectionTypesTypeRecurly
+
typStr := ConnectionRecurlyType(typ)
recurly.Type = typStr
@@ -698,6 +745,7 @@ func CreateConnectionTypesRecurly(recurly ConnectionRecurly) ConnectionTypes {
func CreateConnectionTypesRedshift(redshift ConnectionRedshift) ConnectionTypes {
typ := ConnectionTypesTypeRedshift
+
typStr := ConnectionRedshiftType(typ)
redshift.Type = typStr
@@ -709,6 +757,7 @@ func CreateConnectionTypesRedshift(redshift ConnectionRedshift) ConnectionTypes
func CreateConnectionTypesRedshiftSharded(redshiftSharded ConnectionRedshiftSharded) ConnectionTypes {
typ := ConnectionTypesTypeRedshiftSharded
+
typStr := ConnectionRedshiftShardedType(typ)
redshiftSharded.Type = typStr
@@ -720,6 +769,7 @@ func CreateConnectionTypesRedshiftSharded(redshiftSharded ConnectionRedshiftShar
func CreateConnectionTypesS3DataLake(s3DataLake ConnectionS3DataLake) ConnectionTypes {
typ := ConnectionTypesTypeS3DataLake
+
typStr := ConnectionS3DataLakeType(typ)
s3DataLake.Type = typStr
@@ -731,6 +781,7 @@ func CreateConnectionTypesS3DataLake(s3DataLake ConnectionS3DataLake) Connection
func CreateConnectionTypesS3Input(s3Input ConnectionS3Input) ConnectionTypes {
typ := ConnectionTypesTypeS3Input
+
typStr := ConnectionS3InputType(typ)
s3Input.Type = typStr
@@ -742,6 +793,7 @@ func CreateConnectionTypesS3Input(s3Input ConnectionS3Input) ConnectionTypes {
func CreateConnectionTypesSalesforce(salesforce ConnectionSalesforce) ConnectionTypes {
typ := ConnectionTypesTypeSalesforce
+
typStr := ConnectionSalesforceType(typ)
salesforce.Type = typStr
@@ -753,6 +805,7 @@ func CreateConnectionTypesSalesforce(salesforce ConnectionSalesforce) Connection
func CreateConnectionTypesSalesforceMarketingCloud(salesforceMarketingCloud ConnectionSalesforceMarketingCloud) ConnectionTypes {
typ := ConnectionTypesTypeSalesforceMarketingCloud
+
typStr := ConnectionSalesforceMarketingCloudType(typ)
salesforceMarketingCloud.Type = typStr
@@ -764,6 +817,7 @@ func CreateConnectionTypesSalesforceMarketingCloud(salesforceMarketingCloud Conn
func CreateConnectionTypesSapHana(sapHana ConnectionSapHana) ConnectionTypes {
typ := ConnectionTypesTypeSapHana
+
typStr := ConnectionSapHanaType(typ)
sapHana.Type = typStr
@@ -775,6 +829,7 @@ func CreateConnectionTypesSapHana(sapHana ConnectionSapHana) ConnectionTypes {
func CreateConnectionTypesSapHanaSharded(sapHanaSharded ConnectionSapHanaSharded) ConnectionTypes {
typ := ConnectionTypesTypeSapHanaSharded
+
typStr := ConnectionSapHanaShardedType(typ)
sapHanaSharded.Type = typStr
@@ -786,6 +841,7 @@ func CreateConnectionTypesSapHanaSharded(sapHanaSharded ConnectionSapHanaSharded
func CreateConnectionTypesSeismic(seismic ConnectionSeismic) ConnectionTypes {
typ := ConnectionTypesTypeSeismic
+
typStr := ConnectionSeismicType(typ)
seismic.Type = typStr
@@ -797,6 +853,7 @@ func CreateConnectionTypesSeismic(seismic ConnectionSeismic) ConnectionTypes {
func CreateConnectionTypesSftp(sftp ConnectionSftp) ConnectionTypes {
typ := ConnectionTypesTypeSftp
+
typStr := ConnectionSftpType(typ)
sftp.Type = typStr
@@ -808,6 +865,7 @@ func CreateConnectionTypesSftp(sftp ConnectionSftp) ConnectionTypes {
func CreateConnectionTypesShopify(shopify ConnectionShopify) ConnectionTypes {
typ := ConnectionTypesTypeShopify
+
typStr := ConnectionShopifyType(typ)
shopify.Type = typStr
@@ -819,6 +877,7 @@ func CreateConnectionTypesShopify(shopify ConnectionShopify) ConnectionTypes {
func CreateConnectionTypesSkyward(skyward ConnectionSkyward) ConnectionTypes {
typ := ConnectionTypesTypeSkyward
+
typStr := ConnectionSkywardType(typ)
skyward.Type = typStr
@@ -830,6 +889,7 @@ func CreateConnectionTypesSkyward(skyward ConnectionSkyward) ConnectionTypes {
func CreateConnectionTypesSnapchatAds(snapchatAds ConnectionSnapchatAds) ConnectionTypes {
typ := ConnectionTypesTypeSnapchatAds
+
typStr := ConnectionSnapchatAdsType(typ)
snapchatAds.Type = typStr
@@ -841,6 +901,7 @@ func CreateConnectionTypesSnapchatAds(snapchatAds ConnectionSnapchatAds) Connect
func CreateConnectionTypesSnowflake(snowflake ConnectionSnowflake) ConnectionTypes {
typ := ConnectionTypesTypeSnowflake
+
typStr := ConnectionSnowflakeType(typ)
snowflake.Type = typStr
@@ -852,6 +913,7 @@ func CreateConnectionTypesSnowflake(snowflake ConnectionSnowflake) ConnectionTyp
func CreateConnectionTypesSnowflakeSharded(snowflakeSharded ConnectionSnowflakeSharded) ConnectionTypes {
typ := ConnectionTypesTypeSnowflakeSharded
+
typStr := ConnectionSnowflakeShardedType(typ)
snowflakeSharded.Type = typStr
@@ -863,6 +925,7 @@ func CreateConnectionTypesSnowflakeSharded(snowflakeSharded ConnectionSnowflakeS
func CreateConnectionTypesSQLServer(sqlServer ConnectionSQLServer) ConnectionTypes {
typ := ConnectionTypesTypeSQLServer
+
typStr := ConnectionSQLServerType(typ)
sqlServer.Type = typStr
@@ -874,6 +937,7 @@ func CreateConnectionTypesSQLServer(sqlServer ConnectionSQLServer) ConnectionTyp
func CreateConnectionTypesSQLServerSharded(sqlServerSharded ConnectionSQLServerSharded) ConnectionTypes {
typ := ConnectionTypesTypeSQLServerSharded
+
typStr := ConnectionSQLServerShardedType(typ)
sqlServerSharded.Type = typStr
@@ -885,6 +949,7 @@ func CreateConnectionTypesSQLServerSharded(sqlServerSharded ConnectionSQLServerS
func CreateConnectionTypesSquare(square ConnectionSquare) ConnectionTypes {
typ := ConnectionTypesTypeSquare
+
typStr := ConnectionSquareType(typ)
square.Type = typStr
@@ -896,6 +961,7 @@ func CreateConnectionTypesSquare(square ConnectionSquare) ConnectionTypes {
func CreateConnectionTypesStripe(stripe ConnectionStripe) ConnectionTypes {
typ := ConnectionTypesTypeStripe
+
typStr := ConnectionStripeType(typ)
stripe.Type = typStr
@@ -907,6 +973,7 @@ func CreateConnectionTypesStripe(stripe ConnectionStripe) ConnectionTypes {
func CreateConnectionTypesSumtotal(sumtotal ConnectionSumTotal) ConnectionTypes {
typ := ConnectionTypesTypeSumtotal
+
typStr := ConnectionSumTotalType(typ)
sumtotal.Type = typStr
@@ -918,6 +985,7 @@ func CreateConnectionTypesSumtotal(sumtotal ConnectionSumTotal) ConnectionTypes
func CreateConnectionTypesTheTradeDesk(theTradeDesk ConnectionTheTradeDesk) ConnectionTypes {
typ := ConnectionTypesTypeTheTradeDesk
+
typStr := ConnectionTheTradeDeskType(typ)
theTradeDesk.Type = typStr
@@ -929,6 +997,7 @@ func CreateConnectionTypesTheTradeDesk(theTradeDesk ConnectionTheTradeDesk) Conn
func CreateConnectionTypesTikTokAds(tikTokAds ConnectionTikTokAds) ConnectionTypes {
typ := ConnectionTypesTypeTikTokAds
+
typStr := ConnectionTikTokAdsType(typ)
tikTokAds.Type = typStr
@@ -940,6 +1009,7 @@ func CreateConnectionTypesTikTokAds(tikTokAds ConnectionTikTokAds) ConnectionTyp
func CreateConnectionTypesTwilio(twilio ConnectionTwilio) ConnectionTypes {
typ := ConnectionTypesTypeTwilio
+
typStr := ConnectionTwilioType(typ)
twilio.Type = typStr
@@ -951,6 +1021,7 @@ func CreateConnectionTypesTwilio(twilio ConnectionTwilio) ConnectionTypes {
func CreateConnectionTypesTwitterAds(twitterAds ConnectionTwitter) ConnectionTypes {
typ := ConnectionTypesTypeTwitterAds
+
typStr := ConnectionTwitterType(typ)
twitterAds.Type = typStr
@@ -962,6 +1033,7 @@ func CreateConnectionTypesTwitterAds(twitterAds ConnectionTwitter) ConnectionTyp
func CreateConnectionTypesUservoice(uservoice ConnectionUserVoice) ConnectionTypes {
typ := ConnectionTypesTypeUservoice
+
typStr := ConnectionUserVoiceType(typ)
uservoice.Type = typStr
@@ -973,6 +1045,7 @@ func CreateConnectionTypesUservoice(uservoice ConnectionUserVoice) ConnectionTyp
func CreateConnectionTypesUserDefinedAPI(userDefinedAPI ConnectionUserDefinedAPI) ConnectionTypes {
typ := ConnectionTypesTypeUserDefinedAPI
+
typStr := ConnectionUserDefinedAPIType(typ)
userDefinedAPI.Type = typStr
@@ -984,6 +1057,7 @@ func CreateConnectionTypesUserDefinedAPI(userDefinedAPI ConnectionUserDefinedAPI
func CreateConnectionTypesVeeva(veeva ConnectionVeeva) ConnectionTypes {
typ := ConnectionTypesTypeVeeva
+
typStr := ConnectionVeevaType(typ)
veeva.Type = typStr
@@ -995,6 +1069,7 @@ func CreateConnectionTypesVeeva(veeva ConnectionVeeva) ConnectionTypes {
func CreateConnectionTypesVerizonMediaDsp(verizonMediaDsp ConnectionVerizonMedia) ConnectionTypes {
typ := ConnectionTypesTypeVerizonMediaDsp
+
typStr := ConnectionVerizonMediaType(typ)
verizonMediaDsp.Type = typStr
@@ -1006,6 +1081,7 @@ func CreateConnectionTypesVerizonMediaDsp(verizonMediaDsp ConnectionVerizonMedia
func CreateConnectionTypesWorkdayReport(workdayReport ConnectionWorkdayReport) ConnectionTypes {
typ := ConnectionTypesTypeWorkdayReport
+
typStr := ConnectionWorkdayReportType(typ)
workdayReport.Type = typStr
@@ -1017,6 +1093,7 @@ func CreateConnectionTypesWorkdayReport(workdayReport ConnectionWorkdayReport) C
func CreateConnectionTypesWorkfront(workfront ConnectionWorkfront) ConnectionTypes {
typ := ConnectionTypesTypeWorkfront
+
typStr := ConnectionWorkfrontType(typ)
workfront.Type = typStr
@@ -1028,6 +1105,7 @@ func CreateConnectionTypesWorkfront(workfront ConnectionWorkfront) ConnectionTyp
func CreateConnectionTypesZendesk(zendesk ConnectionZendesk) ConnectionTypes {
typ := ConnectionTypesTypeZendesk
+
typStr := ConnectionZendeskType(typ)
zendesk.Type = typStr
@@ -1039,6 +1117,7 @@ func CreateConnectionTypesZendesk(zendesk ConnectionZendesk) ConnectionTypes {
func CreateConnectionTypesZoomPhone(zoomPhone ConnectionZoomPhone) ConnectionTypes {
typ := ConnectionTypesTypeZoomPhone
+
typStr := ConnectionZoomPhoneType(typ)
zoomPhone.Type = typStr
@@ -1050,6 +1129,7 @@ func CreateConnectionTypesZoomPhone(zoomPhone ConnectionZoomPhone) ConnectionTyp
func CreateConnectionTypesZuora(zuora ConnectionZuora) ConnectionTypes {
typ := ConnectionTypesTypeZuora
+
typStr := ConnectionZuoraType(typ)
zuora.Type = typStr
diff --git a/internal/sdk/pkg/models/shared/connectiontypesinput.go b/internal/sdk/pkg/models/shared/connectiontypesinput.go
index cc18565..10b0a43 100644
--- a/internal/sdk/pkg/models/shared/connectiontypesinput.go
+++ b/internal/sdk/pkg/models/shared/connectiontypesinput.go
@@ -181,6 +181,7 @@ type ConnectionTypesInput struct {
func CreateConnectionTypesInputActiveCampaign(activeCampaign ConnectionActiveCampaignInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeActiveCampaign
+
typStr := ConnectionActiveCampaignType(typ)
activeCampaign.Type = typStr
@@ -192,6 +193,7 @@ func CreateConnectionTypesInputActiveCampaign(activeCampaign ConnectionActiveCam
func CreateConnectionTypesInputBigquery(bigquery ConnectionBigQueryInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeBigquery
+
typStr := ConnectionBigQueryType(typ)
bigquery.Type = typStr
@@ -203,6 +205,7 @@ func CreateConnectionTypesInputBigquery(bigquery ConnectionBigQueryInput) Connec
func CreateConnectionTypesInputBingAds(bingAds ConnectionBingInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeBingAds
+
typStr := ConnectionBingType(typ)
bingAds.Type = typStr
@@ -214,6 +217,7 @@ func CreateConnectionTypesInputBingAds(bingAds ConnectionBingInput) ConnectionTy
func CreateConnectionTypesInputBlackline(blackline ConnectionBlacklineInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeBlackline
+
typStr := ConnectionBlacklineType(typ)
blackline.Type = typStr
@@ -225,6 +229,7 @@ func CreateConnectionTypesInputBlackline(blackline ConnectionBlacklineInput) Con
func CreateConnectionTypesInputCriteo(criteo ConnectionCriteoInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeCriteo
+
typStr := ConnectionCriteoType(typ)
criteo.Type = typStr
@@ -236,6 +241,7 @@ func CreateConnectionTypesInputCriteo(criteo ConnectionCriteoInput) ConnectionTy
func CreateConnectionTypesInputDb2(db2 ConnectionDb2Input) ConnectionTypesInput {
typ := ConnectionTypesInputTypeDb2
+
typStr := ConnectionDb2Type(typ)
db2.Type = typStr
@@ -247,6 +253,7 @@ func CreateConnectionTypesInputDb2(db2 ConnectionDb2Input) ConnectionTypesInput
func CreateConnectionTypesInputDb2Sharded(db2Sharded ConnectionDb2ShardedInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeDb2Sharded
+
typStr := ConnectionDb2ShardedType(typ)
db2Sharded.Type = typStr
@@ -258,6 +265,7 @@ func CreateConnectionTypesInputDb2Sharded(db2Sharded ConnectionDb2ShardedInput)
func CreateConnectionTypesInputDeltaLake(deltaLake ConnectionDeltaLakeInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeDeltaLake
+
typStr := ConnectionDeltaLakeType(typ)
deltaLake.Type = typStr
@@ -269,6 +277,7 @@ func CreateConnectionTypesInputDeltaLake(deltaLake ConnectionDeltaLakeInput) Con
func CreateConnectionTypesInputElasticsearch(elasticsearch ConnectionElasticSearchInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeElasticsearch
+
typStr := ConnectionElasticSearchType(typ)
elasticsearch.Type = typStr
@@ -280,6 +289,7 @@ func CreateConnectionTypesInputElasticsearch(elasticsearch ConnectionElasticSear
func CreateConnectionTypesInputElluminate(elluminate ConnectionElluminateInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeElluminate
+
typStr := ConnectionElluminateType(typ)
elluminate.Type = typStr
@@ -291,6 +301,7 @@ func CreateConnectionTypesInputElluminate(elluminate ConnectionElluminateInput)
func CreateConnectionTypesInputEloqua(eloqua ConnectionEloquaInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeEloqua
+
typStr := ConnectionEloquaType(typ)
eloqua.Type = typStr
@@ -302,6 +313,7 @@ func CreateConnectionTypesInputEloqua(eloqua ConnectionEloquaInput) ConnectionTy
func CreateConnectionTypesInputFacebookAds(facebookAds ConnectionFbInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeFacebookAds
+
typStr := ConnectionFbType(typ)
facebookAds.Type = typStr
@@ -313,6 +325,7 @@ func CreateConnectionTypesInputFacebookAds(facebookAds ConnectionFbInput) Connec
func CreateConnectionTypesInputFifteenFive(fifteenFive ConnectionFifteenFiveInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeFifteenFive
+
typStr := ConnectionFifteenFiveType(typ)
fifteenFive.Type = typStr
@@ -324,6 +337,7 @@ func CreateConnectionTypesInputFifteenFive(fifteenFive ConnectionFifteenFiveInpu
func CreateConnectionTypesInputFreshworks(freshworks ConnectionFreshworksInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeFreshworks
+
typStr := ConnectionFreshworksType(typ)
freshworks.Type = typStr
@@ -335,6 +349,7 @@ func CreateConnectionTypesInputFreshworks(freshworks ConnectionFreshworksInput)
func CreateConnectionTypesInputFtp(ftp ConnectionFtpInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeFtp
+
typStr := ConnectionFtpType(typ)
ftp.Type = typStr
@@ -346,6 +361,7 @@ func CreateConnectionTypesInputFtp(ftp ConnectionFtpInput) ConnectionTypesInput
func CreateConnectionTypesInputGong(gong ConnectionGongInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeGong
+
typStr := ConnectionGongType(typ)
gong.Type = typStr
@@ -357,6 +373,7 @@ func CreateConnectionTypesInputGong(gong ConnectionGongInput) ConnectionTypesInp
func CreateConnectionTypesInputGoogleAds(googleAds ConnectionGoogleAdsInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeGoogleAds
+
typStr := ConnectionGoogleAdsType(typ)
googleAds.Type = typStr
@@ -368,6 +385,7 @@ func CreateConnectionTypesInputGoogleAds(googleAds ConnectionGoogleAdsInput) Con
func CreateConnectionTypesInputGoogleAnalytics(googleAnalytics ConnectionGaInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeGoogleAnalytics
+
typStr := ConnectionGaType(typ)
googleAnalytics.Type = typStr
@@ -379,6 +397,7 @@ func CreateConnectionTypesInputGoogleAnalytics(googleAnalytics ConnectionGaInput
func CreateConnectionTypesInputGoogleAnalyticsGa4(googleAnalyticsGa4 ConnectionGoogleAnalyticsGa4Input) ConnectionTypesInput {
typ := ConnectionTypesInputTypeGoogleAnalyticsGa4
+
typStr := ConnectionGoogleAnalyticsGa4Type(typ)
googleAnalyticsGa4.Type = typStr
@@ -390,6 +409,7 @@ func CreateConnectionTypesInputGoogleAnalyticsGa4(googleAnalyticsGa4 ConnectionG
func CreateConnectionTypesInputGoogleCloudStorage(googleCloudStorage ConnectionGoogleCloudStorageInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeGoogleCloudStorage
+
typStr := ConnectionGoogleCloudStorageType(typ)
googleCloudStorage.Type = typStr
@@ -401,6 +421,7 @@ func CreateConnectionTypesInputGoogleCloudStorage(googleCloudStorage ConnectionG
func CreateConnectionTypesInputGoogleSheets(googleSheets ConnectionGoogleSheetsInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeGoogleSheets
+
typStr := ConnectionGoogleSheetsType(typ)
googleSheets.Type = typStr
@@ -412,6 +433,7 @@ func CreateConnectionTypesInputGoogleSheets(googleSheets ConnectionGoogleSheetsI
func CreateConnectionTypesInputHubspot(hubspot ConnectionHubspotInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeHubspot
+
typStr := ConnectionHubspotType(typ)
hubspot.Type = typStr
@@ -423,6 +445,7 @@ func CreateConnectionTypesInputHubspot(hubspot ConnectionHubspotInput) Connectio
func CreateConnectionTypesInputImpactRadius(impactRadius ConnectionImpactRadiusInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeImpactRadius
+
typStr := ConnectionImpactRadiusType(typ)
impactRadius.Type = typStr
@@ -434,6 +457,7 @@ func CreateConnectionTypesInputImpactRadius(impactRadius ConnectionImpactRadiusI
func CreateConnectionTypesInputIntercom(intercom ConnectionIntercomInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeIntercom
+
typStr := ConnectionIntercomType(typ)
intercom.Type = typStr
@@ -445,6 +469,7 @@ func CreateConnectionTypesInputIntercom(intercom ConnectionIntercomInput) Connec
func CreateConnectionTypesInputJiraAlign(jiraAlign ConnectionJiraAlignInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeJiraAlign
+
typStr := ConnectionJiraAlignType(typ)
jiraAlign.Type = typStr
@@ -456,6 +481,7 @@ func CreateConnectionTypesInputJiraAlign(jiraAlign ConnectionJiraAlignInput) Con
func CreateConnectionTypesInputKafka(kafka ConnectionKafkaInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeKafka
+
typStr := ConnectionKafkaType(typ)
kafka.Type = typStr
@@ -467,6 +493,7 @@ func CreateConnectionTypesInputKafka(kafka ConnectionKafkaInput) ConnectionTypes
func CreateConnectionTypesInputKustomer(kustomer ConnectionKustomerInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeKustomer
+
typStr := ConnectionKustomerType(typ)
kustomer.Type = typStr
@@ -478,6 +505,7 @@ func CreateConnectionTypesInputKustomer(kustomer ConnectionKustomerInput) Connec
func CreateConnectionTypesInputLdap(ldap ConnectionLdapInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeLdap
+
typStr := ConnectionLdapType(typ)
ldap.Type = typStr
@@ -489,6 +517,7 @@ func CreateConnectionTypesInputLdap(ldap ConnectionLdapInput) ConnectionTypesInp
func CreateConnectionTypesInputLdapVirtualListView(ldapVirtualListView ConnectionLdapVirtualListViewInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeLdapVirtualListView
+
typStr := ConnectionLdapVirtualListViewType(typ)
ldapVirtualListView.Type = typStr
@@ -500,6 +529,7 @@ func CreateConnectionTypesInputLdapVirtualListView(ldapVirtualListView Connectio
func CreateConnectionTypesInputLinkedInAds(linkedInAds ConnectionLinkedInAdsInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeLinkedInAds
+
typStr := ConnectionLinkedInAdsType(typ)
linkedInAds.Type = typStr
@@ -511,6 +541,7 @@ func CreateConnectionTypesInputLinkedInAds(linkedInAds ConnectionLinkedInAdsInpu
func CreateConnectionTypesInputMarketo(marketo ConnectionMarketoInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeMarketo
+
typStr := ConnectionMarketoType(typ)
marketo.Type = typStr
@@ -522,6 +553,7 @@ func CreateConnectionTypesInputMarketo(marketo ConnectionMarketoInput) Connectio
func CreateConnectionTypesInputMixpanel(mixpanel ConnectionMixpanelInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeMixpanel
+
typStr := ConnectionMixpanelType(typ)
mixpanel.Type = typStr
@@ -533,6 +565,7 @@ func CreateConnectionTypesInputMixpanel(mixpanel ConnectionMixpanelInput) Connec
func CreateConnectionTypesInputMongodb(mongodb ConnectionMongoInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeMongodb
+
typStr := ConnectionMongoType(typ)
mongodb.Type = typStr
@@ -544,6 +577,7 @@ func CreateConnectionTypesInputMongodb(mongodb ConnectionMongoInput) ConnectionT
func CreateConnectionTypesInputMysql(mysql ConnectionMysqlInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeMysql
+
typStr := ConnectionMysqlType(typ)
mysql.Type = typStr
@@ -555,6 +589,7 @@ func CreateConnectionTypesInputMysql(mysql ConnectionMysqlInput) ConnectionTypes
func CreateConnectionTypesInputMysqlSharded(mysqlSharded ConnectionMysqlShardedInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeMysqlSharded
+
typStr := ConnectionMysqlShardedType(typ)
mysqlSharded.Type = typStr
@@ -566,6 +601,7 @@ func CreateConnectionTypesInputMysqlSharded(mysqlSharded ConnectionMysqlShardedI
func CreateConnectionTypesInputNetsuite(netsuite ConnectionNetsuiteInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeNetsuite
+
typStr := ConnectionNetsuiteType(typ)
netsuite.Type = typStr
@@ -577,6 +613,7 @@ func CreateConnectionTypesInputNetsuite(netsuite ConnectionNetsuiteInput) Connec
func CreateConnectionTypesInputNetsuiteV2(netsuiteV2 ConnectionNetsuiteV2Input) ConnectionTypesInput {
typ := ConnectionTypesInputTypeNetsuiteV2
+
typStr := ConnectionNetsuiteV2Type(typ)
netsuiteV2.Type = typStr
@@ -588,6 +625,7 @@ func CreateConnectionTypesInputNetsuiteV2(netsuiteV2 ConnectionNetsuiteV2Input)
func CreateConnectionTypesInputOracle(oracle ConnectionOracleInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeOracle
+
typStr := ConnectionOracleType(typ)
oracle.Type = typStr
@@ -599,6 +637,7 @@ func CreateConnectionTypesInputOracle(oracle ConnectionOracleInput) ConnectionTy
func CreateConnectionTypesInputOracleSharded(oracleSharded ConnectionOracleShardedInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeOracleSharded
+
typStr := ConnectionOracleShardedType(typ)
oracleSharded.Type = typStr
@@ -610,6 +649,7 @@ func CreateConnectionTypesInputOracleSharded(oracleSharded ConnectionOracleShard
func CreateConnectionTypesInputOutlook(outlook ConnectionOutlookInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeOutlook
+
typStr := ConnectionOutlookType(typ)
outlook.Type = typStr
@@ -621,6 +661,7 @@ func CreateConnectionTypesInputOutlook(outlook ConnectionOutlookInput) Connectio
func CreateConnectionTypesInputOutreach(outreach ConnectionOutreachInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeOutreach
+
typStr := ConnectionOutreachType(typ)
outreach.Type = typStr
@@ -632,6 +673,7 @@ func CreateConnectionTypesInputOutreach(outreach ConnectionOutreachInput) Connec
func CreateConnectionTypesInputPinterestAds(pinterestAds ConnectionPinterestAdsInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypePinterestAds
+
typStr := ConnectionPinterestAdsType(typ)
pinterestAds.Type = typStr
@@ -643,6 +685,7 @@ func CreateConnectionTypesInputPinterestAds(pinterestAds ConnectionPinterestAdsI
func CreateConnectionTypesInputPostgres(postgres ConnectionPostgresInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypePostgres
+
typStr := ConnectionPostgresType(typ)
postgres.Type = typStr
@@ -654,6 +697,7 @@ func CreateConnectionTypesInputPostgres(postgres ConnectionPostgresInput) Connec
func CreateConnectionTypesInputPostgresSharded(postgresSharded ConnectionPostgresShardedInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypePostgresSharded
+
typStr := ConnectionPostgresShardedType(typ)
postgresSharded.Type = typStr
@@ -665,6 +709,7 @@ func CreateConnectionTypesInputPostgresSharded(postgresSharded ConnectionPostgre
func CreateConnectionTypesInputQuoraAds(quoraAds ConnectionQuoraInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeQuoraAds
+
typStr := ConnectionQuoraType(typ)
quoraAds.Type = typStr
@@ -676,6 +721,7 @@ func CreateConnectionTypesInputQuoraAds(quoraAds ConnectionQuoraInput) Connectio
func CreateConnectionTypesInputRaveMedidata(raveMedidata ConnectionRaveMedidataInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeRaveMedidata
+
typStr := ConnectionRaveMedidataType(typ)
raveMedidata.Type = typStr
@@ -687,6 +733,7 @@ func CreateConnectionTypesInputRaveMedidata(raveMedidata ConnectionRaveMedidataI
func CreateConnectionTypesInputRecurly(recurly ConnectionRecurlyInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeRecurly
+
typStr := ConnectionRecurlyType(typ)
recurly.Type = typStr
@@ -698,6 +745,7 @@ func CreateConnectionTypesInputRecurly(recurly ConnectionRecurlyInput) Connectio
func CreateConnectionTypesInputRedshift(redshift ConnectionRedshiftInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeRedshift
+
typStr := ConnectionRedshiftType(typ)
redshift.Type = typStr
@@ -709,6 +757,7 @@ func CreateConnectionTypesInputRedshift(redshift ConnectionRedshiftInput) Connec
func CreateConnectionTypesInputRedshiftSharded(redshiftSharded ConnectionRedshiftShardedInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeRedshiftSharded
+
typStr := ConnectionRedshiftShardedType(typ)
redshiftSharded.Type = typStr
@@ -720,6 +769,7 @@ func CreateConnectionTypesInputRedshiftSharded(redshiftSharded ConnectionRedshif
func CreateConnectionTypesInputS3DataLake(s3DataLake ConnectionS3DataLakeInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeS3DataLake
+
typStr := ConnectionS3DataLakeType(typ)
s3DataLake.Type = typStr
@@ -731,6 +781,7 @@ func CreateConnectionTypesInputS3DataLake(s3DataLake ConnectionS3DataLakeInput)
func CreateConnectionTypesInputS3Input(s3Input ConnectionS3InputInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeS3Input
+
typStr := ConnectionS3InputType(typ)
s3Input.Type = typStr
@@ -742,6 +793,7 @@ func CreateConnectionTypesInputS3Input(s3Input ConnectionS3InputInput) Connectio
func CreateConnectionTypesInputSalesforce(salesforce ConnectionSalesforceInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeSalesforce
+
typStr := ConnectionSalesforceType(typ)
salesforce.Type = typStr
@@ -753,6 +805,7 @@ func CreateConnectionTypesInputSalesforce(salesforce ConnectionSalesforceInput)
func CreateConnectionTypesInputSalesforceMarketingCloud(salesforceMarketingCloud ConnectionSalesforceMarketingCloudInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeSalesforceMarketingCloud
+
typStr := ConnectionSalesforceMarketingCloudType(typ)
salesforceMarketingCloud.Type = typStr
@@ -764,6 +817,7 @@ func CreateConnectionTypesInputSalesforceMarketingCloud(salesforceMarketingCloud
func CreateConnectionTypesInputSapHana(sapHana ConnectionSapHanaInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeSapHana
+
typStr := ConnectionSapHanaType(typ)
sapHana.Type = typStr
@@ -775,6 +829,7 @@ func CreateConnectionTypesInputSapHana(sapHana ConnectionSapHanaInput) Connectio
func CreateConnectionTypesInputSapHanaSharded(sapHanaSharded ConnectionSapHanaShardedInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeSapHanaSharded
+
typStr := ConnectionSapHanaShardedType(typ)
sapHanaSharded.Type = typStr
@@ -786,6 +841,7 @@ func CreateConnectionTypesInputSapHanaSharded(sapHanaSharded ConnectionSapHanaSh
func CreateConnectionTypesInputSeismic(seismic ConnectionSeismicInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeSeismic
+
typStr := ConnectionSeismicType(typ)
seismic.Type = typStr
@@ -797,6 +853,7 @@ func CreateConnectionTypesInputSeismic(seismic ConnectionSeismicInput) Connectio
func CreateConnectionTypesInputSftp(sftp ConnectionSftpInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeSftp
+
typStr := ConnectionSftpType(typ)
sftp.Type = typStr
@@ -808,6 +865,7 @@ func CreateConnectionTypesInputSftp(sftp ConnectionSftpInput) ConnectionTypesInp
func CreateConnectionTypesInputShopify(shopify ConnectionShopifyInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeShopify
+
typStr := ConnectionShopifyType(typ)
shopify.Type = typStr
@@ -819,6 +877,7 @@ func CreateConnectionTypesInputShopify(shopify ConnectionShopifyInput) Connectio
func CreateConnectionTypesInputSkyward(skyward ConnectionSkywardInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeSkyward
+
typStr := ConnectionSkywardType(typ)
skyward.Type = typStr
@@ -830,6 +889,7 @@ func CreateConnectionTypesInputSkyward(skyward ConnectionSkywardInput) Connectio
func CreateConnectionTypesInputSnapchatAds(snapchatAds ConnectionSnapchatAdsInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeSnapchatAds
+
typStr := ConnectionSnapchatAdsType(typ)
snapchatAds.Type = typStr
@@ -841,6 +901,7 @@ func CreateConnectionTypesInputSnapchatAds(snapchatAds ConnectionSnapchatAdsInpu
func CreateConnectionTypesInputSnowflake(snowflake ConnectionSnowflakeInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeSnowflake
+
typStr := ConnectionSnowflakeType(typ)
snowflake.Type = typStr
@@ -852,6 +913,7 @@ func CreateConnectionTypesInputSnowflake(snowflake ConnectionSnowflakeInput) Con
func CreateConnectionTypesInputSnowflakeSharded(snowflakeSharded ConnectionSnowflakeShardedInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeSnowflakeSharded
+
typStr := ConnectionSnowflakeShardedType(typ)
snowflakeSharded.Type = typStr
@@ -863,6 +925,7 @@ func CreateConnectionTypesInputSnowflakeSharded(snowflakeSharded ConnectionSnowf
func CreateConnectionTypesInputSQLServer(sqlServer ConnectionSQLServerInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeSQLServer
+
typStr := ConnectionSQLServerType(typ)
sqlServer.Type = typStr
@@ -874,6 +937,7 @@ func CreateConnectionTypesInputSQLServer(sqlServer ConnectionSQLServerInput) Con
func CreateConnectionTypesInputSQLServerSharded(sqlServerSharded ConnectionSQLServerShardedInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeSQLServerSharded
+
typStr := ConnectionSQLServerShardedType(typ)
sqlServerSharded.Type = typStr
@@ -885,6 +949,7 @@ func CreateConnectionTypesInputSQLServerSharded(sqlServerSharded ConnectionSQLSe
func CreateConnectionTypesInputSquare(square ConnectionSquareInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeSquare
+
typStr := ConnectionSquareType(typ)
square.Type = typStr
@@ -896,6 +961,7 @@ func CreateConnectionTypesInputSquare(square ConnectionSquareInput) ConnectionTy
func CreateConnectionTypesInputStripe(stripe ConnectionStripeInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeStripe
+
typStr := ConnectionStripeType(typ)
stripe.Type = typStr
@@ -907,6 +973,7 @@ func CreateConnectionTypesInputStripe(stripe ConnectionStripeInput) ConnectionTy
func CreateConnectionTypesInputSumtotal(sumtotal ConnectionSumTotalInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeSumtotal
+
typStr := ConnectionSumTotalType(typ)
sumtotal.Type = typStr
@@ -918,6 +985,7 @@ func CreateConnectionTypesInputSumtotal(sumtotal ConnectionSumTotalInput) Connec
func CreateConnectionTypesInputTheTradeDesk(theTradeDesk ConnectionTheTradeDeskInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeTheTradeDesk
+
typStr := ConnectionTheTradeDeskType(typ)
theTradeDesk.Type = typStr
@@ -929,6 +997,7 @@ func CreateConnectionTypesInputTheTradeDesk(theTradeDesk ConnectionTheTradeDeskI
func CreateConnectionTypesInputTikTokAds(tikTokAds ConnectionTikTokAdsInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeTikTokAds
+
typStr := ConnectionTikTokAdsType(typ)
tikTokAds.Type = typStr
@@ -940,6 +1009,7 @@ func CreateConnectionTypesInputTikTokAds(tikTokAds ConnectionTikTokAdsInput) Con
func CreateConnectionTypesInputTwilio(twilio ConnectionTwilioInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeTwilio
+
typStr := ConnectionTwilioType(typ)
twilio.Type = typStr
@@ -951,6 +1021,7 @@ func CreateConnectionTypesInputTwilio(twilio ConnectionTwilioInput) ConnectionTy
func CreateConnectionTypesInputTwitterAds(twitterAds ConnectionTwitterInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeTwitterAds
+
typStr := ConnectionTwitterType(typ)
twitterAds.Type = typStr
@@ -962,6 +1033,7 @@ func CreateConnectionTypesInputTwitterAds(twitterAds ConnectionTwitterInput) Con
func CreateConnectionTypesInputUservoice(uservoice ConnectionUserVoiceInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeUservoice
+
typStr := ConnectionUserVoiceType(typ)
uservoice.Type = typStr
@@ -973,6 +1045,7 @@ func CreateConnectionTypesInputUservoice(uservoice ConnectionUserVoiceInput) Con
func CreateConnectionTypesInputUserDefinedAPI(userDefinedAPI ConnectionUserDefinedAPIInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeUserDefinedAPI
+
typStr := ConnectionUserDefinedAPIType(typ)
userDefinedAPI.Type = typStr
@@ -984,6 +1057,7 @@ func CreateConnectionTypesInputUserDefinedAPI(userDefinedAPI ConnectionUserDefin
func CreateConnectionTypesInputVeeva(veeva ConnectionVeevaInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeVeeva
+
typStr := ConnectionVeevaType(typ)
veeva.Type = typStr
@@ -995,6 +1069,7 @@ func CreateConnectionTypesInputVeeva(veeva ConnectionVeevaInput) ConnectionTypes
func CreateConnectionTypesInputVerizonMediaDsp(verizonMediaDsp ConnectionVerizonMediaInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeVerizonMediaDsp
+
typStr := ConnectionVerizonMediaType(typ)
verizonMediaDsp.Type = typStr
@@ -1006,6 +1081,7 @@ func CreateConnectionTypesInputVerizonMediaDsp(verizonMediaDsp ConnectionVerizon
func CreateConnectionTypesInputWorkdayReport(workdayReport ConnectionWorkdayReportInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeWorkdayReport
+
typStr := ConnectionWorkdayReportType(typ)
workdayReport.Type = typStr
@@ -1017,6 +1093,7 @@ func CreateConnectionTypesInputWorkdayReport(workdayReport ConnectionWorkdayRepo
func CreateConnectionTypesInputWorkfront(workfront ConnectionWorkfrontInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeWorkfront
+
typStr := ConnectionWorkfrontType(typ)
workfront.Type = typStr
@@ -1028,6 +1105,7 @@ func CreateConnectionTypesInputWorkfront(workfront ConnectionWorkfrontInput) Con
func CreateConnectionTypesInputZendesk(zendesk ConnectionZendeskInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeZendesk
+
typStr := ConnectionZendeskType(typ)
zendesk.Type = typStr
@@ -1039,6 +1117,7 @@ func CreateConnectionTypesInputZendesk(zendesk ConnectionZendeskInput) Connectio
func CreateConnectionTypesInputZoomPhone(zoomPhone ConnectionZoomPhoneInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeZoomPhone
+
typStr := ConnectionZoomPhoneType(typ)
zoomPhone.Type = typStr
@@ -1050,6 +1129,7 @@ func CreateConnectionTypesInputZoomPhone(zoomPhone ConnectionZoomPhoneInput) Con
func CreateConnectionTypesInputZuora(zuora ConnectionZuoraInput) ConnectionTypesInput {
typ := ConnectionTypesInputTypeZuora
+
typStr := ConnectionZuoraType(typ)
zuora.Type = typStr
diff --git a/internal/sdk/pkg/models/shared/destinationtypes.go b/internal/sdk/pkg/models/shared/destinationtypes.go
index eee7ddc..9712114 100644
--- a/internal/sdk/pkg/models/shared/destinationtypes.go
+++ b/internal/sdk/pkg/models/shared/destinationtypes.go
@@ -29,6 +29,7 @@ type DestinationTypes struct {
func CreateDestinationTypesRedshift(redshift DestinationRedshift) DestinationTypes {
typ := DestinationTypesTypeRedshift
+
typStr := DestinationRedshiftType(typ)
redshift.Type = typStr
@@ -40,6 +41,7 @@ func CreateDestinationTypesRedshift(redshift DestinationRedshift) DestinationTyp
func CreateDestinationTypesSnowflake(snowflake DestinationSnowflake) DestinationTypes {
typ := DestinationTypesTypeSnowflake
+
typStr := DestinationSnowflakeType(typ)
snowflake.Type = typStr
@@ -51,6 +53,7 @@ func CreateDestinationTypesSnowflake(snowflake DestinationSnowflake) Destination
func CreateDestinationTypesDeltaLake(deltaLake DestinationDeltaLake) DestinationTypes {
typ := DestinationTypesTypeDeltaLake
+
typStr := DestinationDeltaLakeType(typ)
deltaLake.Type = typStr
@@ -62,6 +65,7 @@ func CreateDestinationTypesDeltaLake(deltaLake DestinationDeltaLake) Destination
func CreateDestinationTypesS3DataLake(s3DataLake DestinationS3DataLake) DestinationTypes {
typ := DestinationTypesTypeS3DataLake
+
typStr := DestinationS3DataLakeType(typ)
s3DataLake.Type = typStr
diff --git a/internal/sdk/pkg/models/shared/grantwithoutprivilege.go b/internal/sdk/pkg/models/shared/grantwithoutprivilege.go
index 56563ac..02eccb6 100644
--- a/internal/sdk/pkg/models/shared/grantwithoutprivilege.go
+++ b/internal/sdk/pkg/models/shared/grantwithoutprivilege.go
@@ -26,6 +26,7 @@ type GrantWithoutPrivilege struct {
func CreateGrantWithoutPrivilegeUser(user GrantUser) GrantWithoutPrivilege {
typ := GrantWithoutPrivilegeTypeUser
+
typStr := GrantUserType(typ)
user.Type = typStr
@@ -37,6 +38,7 @@ func CreateGrantWithoutPrivilegeUser(user GrantUser) GrantWithoutPrivilege {
func CreateGrantWithoutPrivilegeTeam(team GrantTeam) GrantWithoutPrivilege {
typ := GrantWithoutPrivilegeTypeTeam
+
typStr := GrantTeamType(typ)
team.Type = typStr
diff --git a/internal/sdk/pkg/models/shared/grantwithoutprivilegeinput.go b/internal/sdk/pkg/models/shared/grantwithoutprivilegeinput.go
index d293f23..9eeb574 100644
--- a/internal/sdk/pkg/models/shared/grantwithoutprivilegeinput.go
+++ b/internal/sdk/pkg/models/shared/grantwithoutprivilegeinput.go
@@ -26,6 +26,7 @@ type GrantWithoutPrivilegeInput struct {
func CreateGrantWithoutPrivilegeInputUser(user GrantUserInput) GrantWithoutPrivilegeInput {
typ := GrantWithoutPrivilegeInputTypeUser
+
typStr := GrantUserType(typ)
user.Type = typStr
@@ -37,6 +38,7 @@ func CreateGrantWithoutPrivilegeInputUser(user GrantUserInput) GrantWithoutPrivi
func CreateGrantWithoutPrivilegeInputTeam(team GrantTeamInput) GrantWithoutPrivilegeInput {
typ := GrantWithoutPrivilegeInputTypeTeam
+
typStr := GrantTeamType(typ)
team.Type = typStr
diff --git a/internal/sdk/pkg/models/shared/grantwithprivilege.go b/internal/sdk/pkg/models/shared/grantwithprivilege.go
index d532a98..55f7191 100644
--- a/internal/sdk/pkg/models/shared/grantwithprivilege.go
+++ b/internal/sdk/pkg/models/shared/grantwithprivilege.go
@@ -26,6 +26,7 @@ type GrantWithPrivilege struct {
func CreateGrantWithPrivilegeUser(user GrantWithPrivilegeUser) GrantWithPrivilege {
typ := GrantWithPrivilegeTypeUser
+
typStr := GrantWithPrivilegeUserType(typ)
user.Type = typStr
@@ -37,6 +38,7 @@ func CreateGrantWithPrivilegeUser(user GrantWithPrivilegeUser) GrantWithPrivileg
func CreateGrantWithPrivilegeTeam(team GrantWithPrivilegeTeam) GrantWithPrivilege {
typ := GrantWithPrivilegeTypeTeam
+
typStr := GrantWithPrivilegeTeamType(typ)
team.Type = typStr
diff --git a/internal/sdk/pkg/models/shared/grantwithprivilegeinput.go b/internal/sdk/pkg/models/shared/grantwithprivilegeinput.go
index 50f7a58..f850211 100644
--- a/internal/sdk/pkg/models/shared/grantwithprivilegeinput.go
+++ b/internal/sdk/pkg/models/shared/grantwithprivilegeinput.go
@@ -26,6 +26,7 @@ type GrantWithPrivilegeInput struct {
func CreateGrantWithPrivilegeInputUser(user GrantWithPrivilegeUserInput) GrantWithPrivilegeInput {
typ := GrantWithPrivilegeInputTypeUser
+
typStr := GrantWithPrivilegeUserType(typ)
user.Type = typStr
@@ -37,6 +38,7 @@ func CreateGrantWithPrivilegeInputUser(user GrantWithPrivilegeUserInput) GrantWi
func CreateGrantWithPrivilegeInputTeam(team GrantWithPrivilegeTeamInput) GrantWithPrivilegeInput {
typ := GrantWithPrivilegeInputTypeTeam
+
typStr := GrantWithPrivilegeTeamType(typ)
team.Type = typStr
diff --git a/internal/sdk/pkg/models/shared/modelupdate.go b/internal/sdk/pkg/models/shared/modelupdate.go
index bd06d7d..2470a82 100644
--- a/internal/sdk/pkg/models/shared/modelupdate.go
+++ b/internal/sdk/pkg/models/shared/modelupdate.go
@@ -287,6 +287,7 @@ type ModelUpdateScheduleTypes struct {
func CreateModelUpdateScheduleTypesNever(never ScheduleTypesNeverScheduleMode) ModelUpdateScheduleTypes {
typ := ModelUpdateScheduleTypesTypeNever
+
typStr := RefreshScheduleModeNeverScheduleTypesMode(typ)
never.Mode = typStr
@@ -298,6 +299,7 @@ func CreateModelUpdateScheduleTypesNever(never ScheduleTypesNeverScheduleMode) M
func CreateModelUpdateScheduleTypesHourly(hourly ScheduleTypesHourlyScheduleMode) ModelUpdateScheduleTypes {
typ := ModelUpdateScheduleTypesTypeHourly
+
typStr := RefreshScheduleModeHourlyScheduleTypesModelUpdateMode(typ)
hourly.Mode = typStr
@@ -309,6 +311,7 @@ func CreateModelUpdateScheduleTypesHourly(hourly ScheduleTypesHourlyScheduleMode
func CreateModelUpdateScheduleTypesDaily(daily ScheduleTypesDailyScheduleMode) ModelUpdateScheduleTypes {
typ := ModelUpdateScheduleTypesTypeDaily
+
typStr := RefreshScheduleModeDailyScheduleTypesModelUpdateMode(typ)
daily.Mode = typStr
@@ -320,6 +323,7 @@ func CreateModelUpdateScheduleTypesDaily(daily ScheduleTypesDailyScheduleMode) M
func CreateModelUpdateScheduleTypesWeekly(weekly ScheduleTypesWeeklyScheduleMode) ModelUpdateScheduleTypes {
typ := ModelUpdateScheduleTypesTypeWeekly
+
typStr := RefreshScheduleModeWeeklyScheduleTypesModelUpdateMode(typ)
weekly.Mode = typStr
@@ -331,6 +335,7 @@ func CreateModelUpdateScheduleTypesWeekly(weekly ScheduleTypesWeeklyScheduleMode
func CreateModelUpdateScheduleTypesMonthly(monthly ScheduleTypesMonthlyScheduleMode) ModelUpdateScheduleTypes {
typ := ModelUpdateScheduleTypesTypeMonthly
+
typStr := RefreshScheduleModeMonthlyScheduleTypesModelUpdateMode(typ)
monthly.Mode = typStr
diff --git a/internal/sdk/pkg/models/shared/pagingstrategy.go b/internal/sdk/pkg/models/shared/pagingstrategy.go
index 37b19e9..6764d17 100644
--- a/internal/sdk/pkg/models/shared/pagingstrategy.go
+++ b/internal/sdk/pkg/models/shared/pagingstrategy.go
@@ -26,6 +26,7 @@ type PagingStrategy struct {
func CreatePagingStrategyCursorURI(cursorURI CursorURIPagingStrategy) PagingStrategy {
typ := PagingStrategyTypeCursorURI
+
typStr := CursorURIPagingStrategyType(typ)
cursorURI.Type = &typStr
@@ -37,6 +38,7 @@ func CreatePagingStrategyCursorURI(cursorURI CursorURIPagingStrategy) PagingStra
func CreatePagingStrategyOffset(offset OffsetPagingStrategy) PagingStrategy {
typ := PagingStrategyTypeOffset
+
typStr := OffsetPagingStrategyType(typ)
offset.Type = &typStr
diff --git a/internal/sdk/pkg/models/shared/pipelineupdate.go b/internal/sdk/pkg/models/shared/pipelineupdate.go
index cbe31e9..9c5bdcf 100644
--- a/internal/sdk/pkg/models/shared/pipelineupdate.go
+++ b/internal/sdk/pkg/models/shared/pipelineupdate.go
@@ -325,6 +325,7 @@ type PipelineUpdateUpdateScheduleTypes struct {
func CreatePipelineUpdateUpdateScheduleTypesInterval(interval IntervalUpdateScheduleMode) PipelineUpdateUpdateScheduleTypes {
typ := PipelineUpdateUpdateScheduleTypesTypeInterval
+
typStr := UpdateScheduleTypesMode(typ)
interval.Mode = typStr
@@ -336,6 +337,7 @@ func CreatePipelineUpdateUpdateScheduleTypesInterval(interval IntervalUpdateSche
func CreatePipelineUpdateUpdateScheduleTypesHourly(hourly HourlyUpdateScheduleMode) PipelineUpdateUpdateScheduleTypes {
typ := PipelineUpdateUpdateScheduleTypesTypeHourly
+
typStr := UpdateScheduleModeHourlyUpdateScheduleTypesMode(typ)
hourly.Mode = typStr
@@ -347,6 +349,7 @@ func CreatePipelineUpdateUpdateScheduleTypesHourly(hourly HourlyUpdateScheduleMo
func CreatePipelineUpdateUpdateScheduleTypesDaily(daily DailyUpdateScheduleMode) PipelineUpdateUpdateScheduleTypes {
typ := PipelineUpdateUpdateScheduleTypesTypeDaily
+
typStr := UpdateScheduleModeDailyUpdateScheduleTypesMode(typ)
daily.Mode = typStr
@@ -358,6 +361,7 @@ func CreatePipelineUpdateUpdateScheduleTypesDaily(daily DailyUpdateScheduleMode)
func CreatePipelineUpdateUpdateScheduleTypesWeekly(weekly WeeklyUpdateScheduleMode) PipelineUpdateUpdateScheduleTypes {
typ := PipelineUpdateUpdateScheduleTypesTypeWeekly
+
typStr := UpdateScheduleModeWeeklyUpdateScheduleTypesMode(typ)
weekly.Mode = typStr
@@ -369,6 +373,7 @@ func CreatePipelineUpdateUpdateScheduleTypesWeekly(weekly WeeklyUpdateScheduleMo
func CreatePipelineUpdateUpdateScheduleTypesMonthly(monthly MonthlyUpdateScheduleMode) PipelineUpdateUpdateScheduleTypes {
typ := PipelineUpdateUpdateScheduleTypesTypeMonthly
+
typStr := UpdateScheduleModeMonthlyUpdateScheduleTypesMode(typ)
monthly.Mode = typStr
@@ -709,6 +714,7 @@ type ScheduleTypes struct {
func CreateScheduleTypesNever(never NeverScheduleMode) ScheduleTypes {
typ := ScheduleTypesTypeNever
+
typStr := ScheduleTypesMode(typ)
never.Mode = typStr
@@ -720,6 +726,7 @@ func CreateScheduleTypesNever(never NeverScheduleMode) ScheduleTypes {
func CreateScheduleTypesHourly(hourly HourlyScheduleMode) ScheduleTypes {
typ := ScheduleTypesTypeHourly
+
typStr := RefreshScheduleModeHourlyScheduleTypesMode(typ)
hourly.Mode = typStr
@@ -731,6 +738,7 @@ func CreateScheduleTypesHourly(hourly HourlyScheduleMode) ScheduleTypes {
func CreateScheduleTypesDaily(daily DailyScheduleMode) ScheduleTypes {
typ := ScheduleTypesTypeDaily
+
typStr := RefreshScheduleModeDailyScheduleTypesMode(typ)
daily.Mode = typStr
@@ -742,6 +750,7 @@ func CreateScheduleTypesDaily(daily DailyScheduleMode) ScheduleTypes {
func CreateScheduleTypesWeekly(weekly WeeklyScheduleMode) ScheduleTypes {
typ := ScheduleTypesTypeWeekly
+
typStr := RefreshScheduleModeWeeklyScheduleTypesMode(typ)
weekly.Mode = typStr
@@ -753,6 +762,7 @@ func CreateScheduleTypesWeekly(weekly WeeklyScheduleMode) ScheduleTypes {
func CreateScheduleTypesMonthly(monthly MonthlyScheduleMode) ScheduleTypes {
typ := ScheduleTypesTypeMonthly
+
typStr := RefreshScheduleModeMonthlyScheduleTypesMode(typ)
monthly.Mode = typStr
diff --git a/internal/sdk/pkg/models/shared/refreshscheduletypes.go b/internal/sdk/pkg/models/shared/refreshscheduletypes.go
index a4db03e..e252e55 100644
--- a/internal/sdk/pkg/models/shared/refreshscheduletypes.go
+++ b/internal/sdk/pkg/models/shared/refreshscheduletypes.go
@@ -31,6 +31,7 @@ type RefreshScheduleTypes struct {
func CreateRefreshScheduleTypesNever(never RefreshScheduleModeNever) RefreshScheduleTypes {
typ := RefreshScheduleTypesTypeNever
+
typStr := RefreshScheduleModeNeverMode(typ)
never.Mode = typStr
@@ -42,6 +43,7 @@ func CreateRefreshScheduleTypesNever(never RefreshScheduleModeNever) RefreshSche
func CreateRefreshScheduleTypesHourly(hourly RefreshScheduleModeHourly) RefreshScheduleTypes {
typ := RefreshScheduleTypesTypeHourly
+
typStr := RefreshScheduleModeHourlyMode(typ)
hourly.Mode = typStr
@@ -53,6 +55,7 @@ func CreateRefreshScheduleTypesHourly(hourly RefreshScheduleModeHourly) RefreshS
func CreateRefreshScheduleTypesDaily(daily RefreshScheduleModeDaily) RefreshScheduleTypes {
typ := RefreshScheduleTypesTypeDaily
+
typStr := RefreshScheduleModeDailyMode(typ)
daily.Mode = typStr
@@ -64,6 +67,7 @@ func CreateRefreshScheduleTypesDaily(daily RefreshScheduleModeDaily) RefreshSche
func CreateRefreshScheduleTypesWeekly(weekly RefreshScheduleModeWeekly) RefreshScheduleTypes {
typ := RefreshScheduleTypesTypeWeekly
+
typStr := RefreshScheduleModeWeeklyMode(typ)
weekly.Mode = typStr
@@ -75,6 +79,7 @@ func CreateRefreshScheduleTypesWeekly(weekly RefreshScheduleModeWeekly) RefreshS
func CreateRefreshScheduleTypesMonthly(monthly RefreshScheduleModeMonthly) RefreshScheduleTypes {
typ := RefreshScheduleTypesTypeMonthly
+
typStr := RefreshScheduleModeMonthlyMode(typ)
monthly.Mode = typStr
diff --git a/internal/sdk/pkg/models/shared/sourcetypes.go b/internal/sdk/pkg/models/shared/sourcetypes.go
index 7f4e609..fce58bc 100644
--- a/internal/sdk/pkg/models/shared/sourcetypes.go
+++ b/internal/sdk/pkg/models/shared/sourcetypes.go
@@ -185,6 +185,7 @@ type SourceTypes struct {
func CreateSourceTypesActiveCampaign(activeCampaign SourceActiveCampaign) SourceTypes {
typ := SourceTypesTypeActiveCampaign
+
typStr := SourceActiveCampaignType(typ)
activeCampaign.Type = typStr
@@ -196,6 +197,7 @@ func CreateSourceTypesActiveCampaign(activeCampaign SourceActiveCampaign) Source
func CreateSourceTypesBigquery(bigquery SourceBigQuery) SourceTypes {
typ := SourceTypesTypeBigquery
+
typStr := SourceBigQueryType(typ)
bigquery.Type = typStr
@@ -207,6 +209,7 @@ func CreateSourceTypesBigquery(bigquery SourceBigQuery) SourceTypes {
func CreateSourceTypesBingAds(bingAds SourceBingAds) SourceTypes {
typ := SourceTypesTypeBingAds
+
typStr := SourceBingAdsType(typ)
bingAds.Type = typStr
@@ -218,6 +221,7 @@ func CreateSourceTypesBingAds(bingAds SourceBingAds) SourceTypes {
func CreateSourceTypesBlackline(blackline SourceBlackline) SourceTypes {
typ := SourceTypesTypeBlackline
+
typStr := SourceBlacklineType(typ)
blackline.Type = typStr
@@ -229,6 +233,7 @@ func CreateSourceTypesBlackline(blackline SourceBlackline) SourceTypes {
func CreateSourceTypesCriteo(criteo SourceCriteo) SourceTypes {
typ := SourceTypesTypeCriteo
+
typStr := SourceCriteoType(typ)
criteo.Type = typStr
@@ -240,6 +245,7 @@ func CreateSourceTypesCriteo(criteo SourceCriteo) SourceTypes {
func CreateSourceTypesDb2(db2 SourceDb2) SourceTypes {
typ := SourceTypesTypeDb2
+
typStr := SourceDb2Type(typ)
db2.Type = typStr
@@ -251,6 +257,7 @@ func CreateSourceTypesDb2(db2 SourceDb2) SourceTypes {
func CreateSourceTypesDb2Sharded(db2Sharded SourceDb2Sharded) SourceTypes {
typ := SourceTypesTypeDb2Sharded
+
typStr := SourceDb2ShardedType(typ)
db2Sharded.Type = typStr
@@ -262,6 +269,7 @@ func CreateSourceTypesDb2Sharded(db2Sharded SourceDb2Sharded) SourceTypes {
func CreateSourceTypesDeltaLake(deltaLake SourceDeltaLake) SourceTypes {
typ := SourceTypesTypeDeltaLake
+
typStr := SourceDeltaLakeType(typ)
deltaLake.Type = typStr
@@ -273,6 +281,7 @@ func CreateSourceTypesDeltaLake(deltaLake SourceDeltaLake) SourceTypes {
func CreateSourceTypesElasticsearch(elasticsearch SourceElasticSearch) SourceTypes {
typ := SourceTypesTypeElasticsearch
+
typStr := SourceElasticSearchType(typ)
elasticsearch.Type = typStr
@@ -284,6 +293,7 @@ func CreateSourceTypesElasticsearch(elasticsearch SourceElasticSearch) SourceTyp
func CreateSourceTypesElluminate(elluminate SourceElluminate) SourceTypes {
typ := SourceTypesTypeElluminate
+
typStr := SourceElluminateType(typ)
elluminate.Type = typStr
@@ -295,6 +305,7 @@ func CreateSourceTypesElluminate(elluminate SourceElluminate) SourceTypes {
func CreateSourceTypesEloqua(eloqua SourceEloqua) SourceTypes {
typ := SourceTypesTypeEloqua
+
typStr := SourceEloquaType(typ)
eloqua.Type = typStr
@@ -306,6 +317,7 @@ func CreateSourceTypesEloqua(eloqua SourceEloqua) SourceTypes {
func CreateSourceTypesFacebookAds(facebookAds SourceFacebookAds) SourceTypes {
typ := SourceTypesTypeFacebookAds
+
typStr := SourceFacebookAdsType(typ)
facebookAds.Type = typStr
@@ -317,6 +329,7 @@ func CreateSourceTypesFacebookAds(facebookAds SourceFacebookAds) SourceTypes {
func CreateSourceTypesFifteenFive(fifteenFive SourceFifteenFive) SourceTypes {
typ := SourceTypesTypeFifteenFive
+
typStr := SourceFifteenFiveType(typ)
fifteenFive.Type = typStr
@@ -328,6 +341,7 @@ func CreateSourceTypesFifteenFive(fifteenFive SourceFifteenFive) SourceTypes {
func CreateSourceTypesFreshworks(freshworks SourceFreshworks) SourceTypes {
typ := SourceTypesTypeFreshworks
+
typStr := SourceFreshworksType(typ)
freshworks.Type = typStr
@@ -339,6 +353,7 @@ func CreateSourceTypesFreshworks(freshworks SourceFreshworks) SourceTypes {
func CreateSourceTypesFtp(ftp SourceFtp) SourceTypes {
typ := SourceTypesTypeFtp
+
typStr := SourceFtpType(typ)
ftp.Type = typStr
@@ -350,6 +365,7 @@ func CreateSourceTypesFtp(ftp SourceFtp) SourceTypes {
func CreateSourceTypesGong(gong SourceGong) SourceTypes {
typ := SourceTypesTypeGong
+
typStr := SourceGongType(typ)
gong.Type = typStr
@@ -361,6 +377,7 @@ func CreateSourceTypesGong(gong SourceGong) SourceTypes {
func CreateSourceTypesGoogleAnalytics(googleAnalytics SourceGoogleAnalytics) SourceTypes {
typ := SourceTypesTypeGoogleAnalytics
+
typStr := SourceGoogleAnalyticsType(typ)
googleAnalytics.Type = typStr
@@ -372,6 +389,7 @@ func CreateSourceTypesGoogleAnalytics(googleAnalytics SourceGoogleAnalytics) Sou
func CreateSourceTypesGoogleAnalyticsGa4(googleAnalyticsGa4 SourceGoogleAnalyticsGa4) SourceTypes {
typ := SourceTypesTypeGoogleAnalyticsGa4
+
typStr := SourceGoogleAnalyticsGa4Type(typ)
googleAnalyticsGa4.Type = typStr
@@ -383,6 +401,7 @@ func CreateSourceTypesGoogleAnalyticsGa4(googleAnalyticsGa4 SourceGoogleAnalytic
func CreateSourceTypesGoogleCloudStorage(googleCloudStorage SourceGoogleCloudStorage) SourceTypes {
typ := SourceTypesTypeGoogleCloudStorage
+
typStr := SourceGoogleCloudStorageType(typ)
googleCloudStorage.Type = typStr
@@ -394,6 +413,7 @@ func CreateSourceTypesGoogleCloudStorage(googleCloudStorage SourceGoogleCloudSto
func CreateSourceTypesGoogleAds(googleAds SourceGoogleAds) SourceTypes {
typ := SourceTypesTypeGoogleAds
+
typStr := SourceGoogleAdsType(typ)
googleAds.Type = typStr
@@ -405,6 +425,7 @@ func CreateSourceTypesGoogleAds(googleAds SourceGoogleAds) SourceTypes {
func CreateSourceTypesGoogleSheets(googleSheets SourceGoogleSheets) SourceTypes {
typ := SourceTypesTypeGoogleSheets
+
typStr := SourceGoogleSheetsType(typ)
googleSheets.Type = typStr
@@ -416,6 +437,7 @@ func CreateSourceTypesGoogleSheets(googleSheets SourceGoogleSheets) SourceTypes
func CreateSourceTypesHubspot(hubspot SourceHubspot) SourceTypes {
typ := SourceTypesTypeHubspot
+
typStr := SourceHubspotType(typ)
hubspot.Type = typStr
@@ -427,6 +449,7 @@ func CreateSourceTypesHubspot(hubspot SourceHubspot) SourceTypes {
func CreateSourceTypesIntercom(intercom SourceIntercom) SourceTypes {
typ := SourceTypesTypeIntercom
+
typStr := SourceIntercomType(typ)
intercom.Type = typStr
@@ -438,6 +461,7 @@ func CreateSourceTypesIntercom(intercom SourceIntercom) SourceTypes {
func CreateSourceTypesImpactRadius(impactRadius SourceImpactRadius) SourceTypes {
typ := SourceTypesTypeImpactRadius
+
typStr := SourceImpactRadiusType(typ)
impactRadius.Type = typStr
@@ -449,6 +473,7 @@ func CreateSourceTypesImpactRadius(impactRadius SourceImpactRadius) SourceTypes
func CreateSourceTypesJira(jira SourceJira) SourceTypes {
typ := SourceTypesTypeJira
+
typStr := SourceJiraType(typ)
jira.Type = typStr
@@ -460,6 +485,7 @@ func CreateSourceTypesJira(jira SourceJira) SourceTypes {
func CreateSourceTypesJiraAlign(jiraAlign SourceJiraAlign) SourceTypes {
typ := SourceTypesTypeJiraAlign
+
typStr := SourceJiraAlignType(typ)
jiraAlign.Type = typStr
@@ -471,6 +497,7 @@ func CreateSourceTypesJiraAlign(jiraAlign SourceJiraAlign) SourceTypes {
func CreateSourceTypesKafka(kafka SourceKafka) SourceTypes {
typ := SourceTypesTypeKafka
+
typStr := SourceKafkaType(typ)
kafka.Type = typStr
@@ -482,6 +509,7 @@ func CreateSourceTypesKafka(kafka SourceKafka) SourceTypes {
func CreateSourceTypesKustomer(kustomer SourceKustomer) SourceTypes {
typ := SourceTypesTypeKustomer
+
typStr := SourceKustomerType(typ)
kustomer.Type = typStr
@@ -493,6 +521,7 @@ func CreateSourceTypesKustomer(kustomer SourceKustomer) SourceTypes {
func CreateSourceTypesLdap(ldap SourceLdap) SourceTypes {
typ := SourceTypesTypeLdap
+
typStr := SourceLdapType(typ)
ldap.Type = typStr
@@ -504,6 +533,7 @@ func CreateSourceTypesLdap(ldap SourceLdap) SourceTypes {
func CreateSourceTypesLdapVirtualListView(ldapVirtualListView SourceLdapVirtualListView) SourceTypes {
typ := SourceTypesTypeLdapVirtualListView
+
typStr := SourceLdapVirtualListViewType(typ)
ldapVirtualListView.Type = typStr
@@ -515,6 +545,7 @@ func CreateSourceTypesLdapVirtualListView(ldapVirtualListView SourceLdapVirtualL
func CreateSourceTypesLinkedInAds(linkedInAds SourceLinkedInAds) SourceTypes {
typ := SourceTypesTypeLinkedInAds
+
typStr := SourceLinkedInAdsType(typ)
linkedInAds.Type = typStr
@@ -526,6 +557,7 @@ func CreateSourceTypesLinkedInAds(linkedInAds SourceLinkedInAds) SourceTypes {
func CreateSourceTypesMarketo(marketo SourceMarketo) SourceTypes {
typ := SourceTypesTypeMarketo
+
typStr := SourceMarketoType(typ)
marketo.Type = typStr
@@ -537,6 +569,7 @@ func CreateSourceTypesMarketo(marketo SourceMarketo) SourceTypes {
func CreateSourceTypesMixpanel(mixpanel SourceMixpanel) SourceTypes {
typ := SourceTypesTypeMixpanel
+
typStr := SourceMixpanelType(typ)
mixpanel.Type = typStr
@@ -548,6 +581,7 @@ func CreateSourceTypesMixpanel(mixpanel SourceMixpanel) SourceTypes {
func CreateSourceTypesMongodb(mongodb SourceMongodb) SourceTypes {
typ := SourceTypesTypeMongodb
+
typStr := SourceMongodbType(typ)
mongodb.Type = typStr
@@ -559,6 +593,7 @@ func CreateSourceTypesMongodb(mongodb SourceMongodb) SourceTypes {
func CreateSourceTypesMysqlSharded(mysqlSharded SourceMysqlSharded) SourceTypes {
typ := SourceTypesTypeMysqlSharded
+
typStr := SourceMysqlShardedType(typ)
mysqlSharded.Type = typStr
@@ -570,6 +605,7 @@ func CreateSourceTypesMysqlSharded(mysqlSharded SourceMysqlSharded) SourceTypes
func CreateSourceTypesMysql(mysql SourceMysql) SourceTypes {
typ := SourceTypesTypeMysql
+
typStr := SourceMysqlType(typ)
mysql.Type = typStr
@@ -581,6 +617,7 @@ func CreateSourceTypesMysql(mysql SourceMysql) SourceTypes {
func CreateSourceTypesNetsuite(netsuite SourceNetsuite) SourceTypes {
typ := SourceTypesTypeNetsuite
+
typStr := SourceNetsuiteType(typ)
netsuite.Type = typStr
@@ -592,6 +629,7 @@ func CreateSourceTypesNetsuite(netsuite SourceNetsuite) SourceTypes {
func CreateSourceTypesNetsuiteV2(netsuiteV2 SourceNetsuiteV2) SourceTypes {
typ := SourceTypesTypeNetsuiteV2
+
typStr := SourceNetsuiteV2Type(typ)
netsuiteV2.Type = typStr
@@ -603,6 +641,7 @@ func CreateSourceTypesNetsuiteV2(netsuiteV2 SourceNetsuiteV2) SourceTypes {
func CreateSourceTypesOracle(oracle SourceOracle) SourceTypes {
typ := SourceTypesTypeOracle
+
typStr := SourceOracleType(typ)
oracle.Type = typStr
@@ -614,6 +653,7 @@ func CreateSourceTypesOracle(oracle SourceOracle) SourceTypes {
func CreateSourceTypesOracleSharded(oracleSharded SourceOracleSharded) SourceTypes {
typ := SourceTypesTypeOracleSharded
+
typStr := SourceOracleShardedType(typ)
oracleSharded.Type = typStr
@@ -625,6 +665,7 @@ func CreateSourceTypesOracleSharded(oracleSharded SourceOracleSharded) SourceTyp
func CreateSourceTypesOutreach(outreach SourceOutreach) SourceTypes {
typ := SourceTypesTypeOutreach
+
typStr := SourceOutreachType(typ)
outreach.Type = typStr
@@ -636,6 +677,7 @@ func CreateSourceTypesOutreach(outreach SourceOutreach) SourceTypes {
func CreateSourceTypesOutlook(outlook SourceOutlook) SourceTypes {
typ := SourceTypesTypeOutlook
+
typStr := SourceOutlookType(typ)
outlook.Type = typStr
@@ -647,6 +689,7 @@ func CreateSourceTypesOutlook(outlook SourceOutlook) SourceTypes {
func CreateSourceTypesPinterestAds(pinterestAds SourcePinterestAds) SourceTypes {
typ := SourceTypesTypePinterestAds
+
typStr := SourcePinterestAdsType(typ)
pinterestAds.Type = typStr
@@ -658,6 +701,7 @@ func CreateSourceTypesPinterestAds(pinterestAds SourcePinterestAds) SourceTypes
func CreateSourceTypesPostgres(postgres SourcePostgres) SourceTypes {
typ := SourceTypesTypePostgres
+
typStr := SourcePostgresType(typ)
postgres.Type = typStr
@@ -669,6 +713,7 @@ func CreateSourceTypesPostgres(postgres SourcePostgres) SourceTypes {
func CreateSourceTypesPostgresSharded(postgresSharded SourcePostgresSharded) SourceTypes {
typ := SourceTypesTypePostgresSharded
+
typStr := SourcePostgresShardedType(typ)
postgresSharded.Type = typStr
@@ -680,6 +725,7 @@ func CreateSourceTypesPostgresSharded(postgresSharded SourcePostgresSharded) Sou
func CreateSourceTypesQuoraAds(quoraAds SourceQuoraAds) SourceTypes {
typ := SourceTypesTypeQuoraAds
+
typStr := SourceQuoraAdsType(typ)
quoraAds.Type = typStr
@@ -691,6 +737,7 @@ func CreateSourceTypesQuoraAds(quoraAds SourceQuoraAds) SourceTypes {
func CreateSourceTypesRaveMedidata(raveMedidata SourceRaveMedidata) SourceTypes {
typ := SourceTypesTypeRaveMedidata
+
typStr := SourceRaveMedidataType(typ)
raveMedidata.Type = typStr
@@ -702,6 +749,7 @@ func CreateSourceTypesRaveMedidata(raveMedidata SourceRaveMedidata) SourceTypes
func CreateSourceTypesRecurly(recurly SourceRecurly) SourceTypes {
typ := SourceTypesTypeRecurly
+
typStr := SourceRecurlyType(typ)
recurly.Type = typStr
@@ -713,6 +761,7 @@ func CreateSourceTypesRecurly(recurly SourceRecurly) SourceTypes {
func CreateSourceTypesRedshift(redshift SourceRedshift) SourceTypes {
typ := SourceTypesTypeRedshift
+
typStr := SourceRedshiftType(typ)
redshift.Type = typStr
@@ -724,6 +773,7 @@ func CreateSourceTypesRedshift(redshift SourceRedshift) SourceTypes {
func CreateSourceTypesRedshiftSharded(redshiftSharded SourceRedshiftSharded) SourceTypes {
typ := SourceTypesTypeRedshiftSharded
+
typStr := SourceRedshiftShardedType(typ)
redshiftSharded.Type = typStr
@@ -735,6 +785,7 @@ func CreateSourceTypesRedshiftSharded(redshiftSharded SourceRedshiftSharded) Sou
func CreateSourceTypesS3Legacy(s3Legacy SourceS3Legacy) SourceTypes {
typ := SourceTypesTypeS3Legacy
+
typStr := SourceS3LegacyType(typ)
s3Legacy.Type = typStr
@@ -746,6 +797,7 @@ func CreateSourceTypesS3Legacy(s3Legacy SourceS3Legacy) SourceTypes {
func CreateSourceTypesS3Input(s3Input SourceS3Input) SourceTypes {
typ := SourceTypesTypeS3Input
+
typStr := SourceS3InputType(typ)
s3Input.Type = typStr
@@ -757,6 +809,7 @@ func CreateSourceTypesS3Input(s3Input SourceS3Input) SourceTypes {
func CreateSourceTypesSalesforceMarketingCloud(salesforceMarketingCloud SourceSalesforceMarketingCloud) SourceTypes {
typ := SourceTypesTypeSalesforceMarketingCloud
+
typStr := SourceSalesforceMarketingCloudType(typ)
salesforceMarketingCloud.Type = typStr
@@ -768,6 +821,7 @@ func CreateSourceTypesSalesforceMarketingCloud(salesforceMarketingCloud SourceSa
func CreateSourceTypesSapHana(sapHana SourceSapHana) SourceTypes {
typ := SourceTypesTypeSapHana
+
typStr := SourceSapHanaType(typ)
sapHana.Type = typStr
@@ -779,6 +833,7 @@ func CreateSourceTypesSapHana(sapHana SourceSapHana) SourceTypes {
func CreateSourceTypesSapHanaSharded(sapHanaSharded SourceSapHanaSharded) SourceTypes {
typ := SourceTypesTypeSapHanaSharded
+
typStr := SourceSapHanaShardedType(typ)
sapHanaSharded.Type = typStr
@@ -790,6 +845,7 @@ func CreateSourceTypesSapHanaSharded(sapHanaSharded SourceSapHanaSharded) Source
func CreateSourceTypesSeismic(seismic SourceSeismic) SourceTypes {
typ := SourceTypesTypeSeismic
+
typStr := SourceSeismicType(typ)
seismic.Type = typStr
@@ -801,6 +857,7 @@ func CreateSourceTypesSeismic(seismic SourceSeismic) SourceTypes {
func CreateSourceTypesShopify(shopify SourceShopify) SourceTypes {
typ := SourceTypesTypeShopify
+
typStr := SourceShopifyType(typ)
shopify.Type = typStr
@@ -812,6 +869,7 @@ func CreateSourceTypesShopify(shopify SourceShopify) SourceTypes {
func CreateSourceTypesSkyward(skyward SourceSkyward) SourceTypes {
typ := SourceTypesTypeSkyward
+
typStr := SourceSkywardType(typ)
skyward.Type = typStr
@@ -823,6 +881,7 @@ func CreateSourceTypesSkyward(skyward SourceSkyward) SourceTypes {
func CreateSourceTypesSalesforce(salesforce SourceSalesforce) SourceTypes {
typ := SourceTypesTypeSalesforce
+
typStr := SourceSalesforceType(typ)
salesforce.Type = typStr
@@ -834,6 +893,7 @@ func CreateSourceTypesSalesforce(salesforce SourceSalesforce) SourceTypes {
func CreateSourceTypesSftp(sftp SourceSftp) SourceTypes {
typ := SourceTypesTypeSftp
+
typStr := SourceSftpType(typ)
sftp.Type = typStr
@@ -845,6 +905,7 @@ func CreateSourceTypesSftp(sftp SourceSftp) SourceTypes {
func CreateSourceTypesSQLServer(sqlServer SourceSQLServer) SourceTypes {
typ := SourceTypesTypeSQLServer
+
typStr := SourceSQLServerType(typ)
sqlServer.Type = typStr
@@ -856,6 +917,7 @@ func CreateSourceTypesSQLServer(sqlServer SourceSQLServer) SourceTypes {
func CreateSourceTypesSQLServerSharded(sqlServerSharded SourceSQLServerSharded) SourceTypes {
typ := SourceTypesTypeSQLServerSharded
+
typStr := SourceSQLServerShardedType(typ)
sqlServerSharded.Type = typStr
@@ -867,6 +929,7 @@ func CreateSourceTypesSQLServerSharded(sqlServerSharded SourceSQLServerSharded)
func CreateSourceTypesStreaming(streaming SourceStreaming) SourceTypes {
typ := SourceTypesTypeStreaming
+
typStr := SourceStreamingType(typ)
streaming.Type = typStr
@@ -878,6 +941,7 @@ func CreateSourceTypesStreaming(streaming SourceStreaming) SourceTypes {
func CreateSourceTypesSnowflake(snowflake SourceSnowflake) SourceTypes {
typ := SourceTypesTypeSnowflake
+
typStr := SourceSnowflakeType(typ)
snowflake.Type = typStr
@@ -889,6 +953,7 @@ func CreateSourceTypesSnowflake(snowflake SourceSnowflake) SourceTypes {
func CreateSourceTypesSnowflakeSharded(snowflakeSharded SourceSnowflakeSharded) SourceTypes {
typ := SourceTypesTypeSnowflakeSharded
+
typStr := SourceSnowflakeShardedType(typ)
snowflakeSharded.Type = typStr
@@ -900,6 +965,7 @@ func CreateSourceTypesSnowflakeSharded(snowflakeSharded SourceSnowflakeSharded)
func CreateSourceTypesSquare(square SourceSquare) SourceTypes {
typ := SourceTypesTypeSquare
+
typStr := SourceSquareType(typ)
square.Type = typStr
@@ -911,6 +977,7 @@ func CreateSourceTypesSquare(square SourceSquare) SourceTypes {
func CreateSourceTypesSnapchatAds(snapchatAds SourceSnapchatAds) SourceTypes {
typ := SourceTypesTypeSnapchatAds
+
typStr := SourceSnapchatAdsType(typ)
snapchatAds.Type = typStr
@@ -922,6 +989,7 @@ func CreateSourceTypesSnapchatAds(snapchatAds SourceSnapchatAds) SourceTypes {
func CreateSourceTypesStripe(stripe SourceStripe) SourceTypes {
typ := SourceTypesTypeStripe
+
typStr := SourceStripeType(typ)
stripe.Type = typStr
@@ -933,6 +1001,7 @@ func CreateSourceTypesStripe(stripe SourceStripe) SourceTypes {
func CreateSourceTypesSumtotal(sumtotal SourceSumTotal) SourceTypes {
typ := SourceTypesTypeSumtotal
+
typStr := SourceSumTotalType(typ)
sumtotal.Type = typStr
@@ -944,6 +1013,7 @@ func CreateSourceTypesSumtotal(sumtotal SourceSumTotal) SourceTypes {
func CreateSourceTypesTheTradeDesk(theTradeDesk SourceTheTradeDesk) SourceTypes {
typ := SourceTypesTypeTheTradeDesk
+
typStr := SourceTheTradeDeskType(typ)
theTradeDesk.Type = typStr
@@ -955,6 +1025,7 @@ func CreateSourceTypesTheTradeDesk(theTradeDesk SourceTheTradeDesk) SourceTypes
func CreateSourceTypesTikTokAds(tikTokAds SourceTikTokAds) SourceTypes {
typ := SourceTypesTypeTikTokAds
+
typStr := SourceTikTokAdsType(typ)
tikTokAds.Type = typStr
@@ -966,6 +1037,7 @@ func CreateSourceTypesTikTokAds(tikTokAds SourceTikTokAds) SourceTypes {
func CreateSourceTypesTwilio(twilio SourceTwilio) SourceTypes {
typ := SourceTypesTypeTwilio
+
typStr := SourceTwilioType(typ)
twilio.Type = typStr
@@ -977,6 +1049,7 @@ func CreateSourceTypesTwilio(twilio SourceTwilio) SourceTypes {
func CreateSourceTypesTwitterAds(twitterAds SourceTwitter) SourceTypes {
typ := SourceTypesTypeTwitterAds
+
typStr := SourceTwitterType(typ)
twitterAds.Type = typStr
@@ -988,6 +1061,7 @@ func CreateSourceTypesTwitterAds(twitterAds SourceTwitter) SourceTypes {
func CreateSourceTypesUserDefinedAPI(userDefinedAPI SourceUserDefinedAPI) SourceTypes {
typ := SourceTypesTypeUserDefinedAPI
+
typStr := SourceUserDefinedAPIType(typ)
userDefinedAPI.Type = typStr
@@ -999,6 +1073,7 @@ func CreateSourceTypesUserDefinedAPI(userDefinedAPI SourceUserDefinedAPI) Source
func CreateSourceTypesUservoice(uservoice SourceUserVoice) SourceTypes {
typ := SourceTypesTypeUservoice
+
typStr := SourceUserVoiceType(typ)
uservoice.Type = typStr
@@ -1010,6 +1085,7 @@ func CreateSourceTypesUservoice(uservoice SourceUserVoice) SourceTypes {
func CreateSourceTypesVeeva(veeva SourceVeeva) SourceTypes {
typ := SourceTypesTypeVeeva
+
typStr := SourceVeevaType(typ)
veeva.Type = typStr
@@ -1021,6 +1097,7 @@ func CreateSourceTypesVeeva(veeva SourceVeeva) SourceTypes {
func CreateSourceTypesVerizonMediaDsp(verizonMediaDsp SourceVerizonMediaDsp) SourceTypes {
typ := SourceTypesTypeVerizonMediaDsp
+
typStr := SourceVerizonMediaDspType(typ)
verizonMediaDsp.Type = typStr
@@ -1032,6 +1109,7 @@ func CreateSourceTypesVerizonMediaDsp(verizonMediaDsp SourceVerizonMediaDsp) Sou
func CreateSourceTypesWorkdayReport(workdayReport SourceWorkdayReport) SourceTypes {
typ := SourceTypesTypeWorkdayReport
+
typStr := SourceWorkdayReportType(typ)
workdayReport.Type = typStr
@@ -1043,6 +1121,7 @@ func CreateSourceTypesWorkdayReport(workdayReport SourceWorkdayReport) SourceTyp
func CreateSourceTypesWorkfront(workfront SourceWorkfront) SourceTypes {
typ := SourceTypesTypeWorkfront
+
typStr := SourceWorkfrontType(typ)
workfront.Type = typStr
@@ -1054,6 +1133,7 @@ func CreateSourceTypesWorkfront(workfront SourceWorkfront) SourceTypes {
func CreateSourceTypesZendesk(zendesk SourceZendesk) SourceTypes {
typ := SourceTypesTypeZendesk
+
typStr := SourceZendeskType(typ)
zendesk.Type = typStr
@@ -1065,6 +1145,7 @@ func CreateSourceTypesZendesk(zendesk SourceZendesk) SourceTypes {
func CreateSourceTypesZoomPhone(zoomPhone SourceZoomPhone) SourceTypes {
typ := SourceTypesTypeZoomPhone
+
typStr := SourceZoomPhoneType(typ)
zoomPhone.Type = typStr
@@ -1076,6 +1157,7 @@ func CreateSourceTypesZoomPhone(zoomPhone SourceZoomPhone) SourceTypes {
func CreateSourceTypesZuora(zuora SourceZuora) SourceTypes {
typ := SourceTypesTypeZuora
+
typStr := SourceZuoraType(typ)
zuora.Type = typStr
diff --git a/internal/sdk/pkg/models/shared/transformtypes.go b/internal/sdk/pkg/models/shared/transformtypes.go
index 5074dc2..df215ec 100644
--- a/internal/sdk/pkg/models/shared/transformtypes.go
+++ b/internal/sdk/pkg/models/shared/transformtypes.go
@@ -31,6 +31,7 @@ type TransformTypes struct {
func CreateTransformTypesAddFilePath(addFilePath TransformAddFilePath) TransformTypes {
typ := TransformTypesTypeAddFilePath
+
typStr := TransformAddFilePathType(typ)
addFilePath.Type = typStr
@@ -42,6 +43,7 @@ func CreateTransformTypesAddFilePath(addFilePath TransformAddFilePath) Transform
func CreateTransformTypesFlattenJSONObject(flattenJSONObject TransformExtractJSONFields) TransformTypes {
typ := TransformTypesTypeFlattenJSONObject
+
typStr := TransformExtractJSONFieldsType(typ)
flattenJSONObject.Type = typStr
@@ -53,6 +55,7 @@ func CreateTransformTypesFlattenJSONObject(flattenJSONObject TransformExtractJSO
func CreateTransformTypesParquetToRows(parquetToRows TransformParquetToRows) TransformTypes {
typ := TransformTypesTypeParquetToRows
+
typStr := TransformParquetToRowsType(typ)
parquetToRows.Type = typStr
@@ -64,6 +67,7 @@ func CreateTransformTypesParquetToRows(parquetToRows TransformParquetToRows) Tra
func CreateTransformTypesParseByRegex(parseByRegex TransformParseByRegex) TransformTypes {
typ := TransformTypesTypeParseByRegex
+
typStr := TransformParseByRegexType(typ)
parseByRegex.Type = typStr
@@ -75,6 +79,7 @@ func CreateTransformTypesParseByRegex(parseByRegex TransformParseByRegex) Transf
func CreateTransformTypesRenameColumns(renameColumns TransformRenameColumns) TransformTypes {
typ := TransformTypesTypeRenameColumns
+
typStr := TransformRenameColumnsType(typ)
renameColumns.Type = typStr
diff --git a/internal/sdk/pkg/models/shared/updatescheduletypes.go b/internal/sdk/pkg/models/shared/updatescheduletypes.go
index 860be07..a382cb5 100644
--- a/internal/sdk/pkg/models/shared/updatescheduletypes.go
+++ b/internal/sdk/pkg/models/shared/updatescheduletypes.go
@@ -32,6 +32,7 @@ type UpdateScheduleTypes struct {
func CreateUpdateScheduleTypesInterval(interval UpdateScheduleModeInterval) UpdateScheduleTypes {
typ := UpdateScheduleTypesTypeInterval
+
typStr := Mode(typ)
interval.Mode = typStr
@@ -43,6 +44,7 @@ func CreateUpdateScheduleTypesInterval(interval UpdateScheduleModeInterval) Upda
func CreateUpdateScheduleTypesHourly(hourly UpdateScheduleModeHourly) UpdateScheduleTypes {
typ := UpdateScheduleTypesTypeHourly
+
typStr := UpdateScheduleModeHourlyMode(typ)
hourly.Mode = typStr
@@ -54,6 +56,7 @@ func CreateUpdateScheduleTypesHourly(hourly UpdateScheduleModeHourly) UpdateSche
func CreateUpdateScheduleTypesDaily(daily UpdateScheduleModeDaily) UpdateScheduleTypes {
typ := UpdateScheduleTypesTypeDaily
+
typStr := UpdateScheduleModeDailyMode(typ)
daily.Mode = typStr
@@ -65,6 +68,7 @@ func CreateUpdateScheduleTypesDaily(daily UpdateScheduleModeDaily) UpdateSchedul
func CreateUpdateScheduleTypesWeekly(weekly UpdateScheduleModeWeekly) UpdateScheduleTypes {
typ := UpdateScheduleTypesTypeWeekly
+
typStr := UpdateScheduleModeWeeklyMode(typ)
weekly.Mode = typStr
@@ -76,6 +80,7 @@ func CreateUpdateScheduleTypesWeekly(weekly UpdateScheduleModeWeekly) UpdateSche
func CreateUpdateScheduleTypesMonthly(monthly UpdateScheduleModeMonthly) UpdateScheduleTypes {
typ := UpdateScheduleTypesTypeMonthly
+
typStr := UpdateScheduleModeMonthlyMode(typ)
monthly.Mode = typStr
diff --git a/internal/sdk/pkg/models/shared/warehousetypes.go b/internal/sdk/pkg/models/shared/warehousetypes.go
index 1452b13..286232e 100644
--- a/internal/sdk/pkg/models/shared/warehousetypes.go
+++ b/internal/sdk/pkg/models/shared/warehousetypes.go
@@ -25,6 +25,7 @@ type WarehouseTypes struct {
func CreateWarehouseTypesRedshift(redshift WarehouseRedshift) WarehouseTypes {
typ := WarehouseTypesTypeRedshift
+
typStr := WarehouseRedshiftType(typ)
redshift.Type = typStr
@@ -36,6 +37,7 @@ func CreateWarehouseTypesRedshift(redshift WarehouseRedshift) WarehouseTypes {
func CreateWarehouseTypesSnowflake(snowflake WarehouseSnowflake) WarehouseTypes {
typ := WarehouseTypesTypeSnowflake
+
typStr := WarehouseSnowflakeType(typ)
snowflake.Type = typStr
diff --git a/internal/sdk/pkg/models/shared/warehousetypesinput.go b/internal/sdk/pkg/models/shared/warehousetypesinput.go
index 5d3d5e3..c5f1c90 100644
--- a/internal/sdk/pkg/models/shared/warehousetypesinput.go
+++ b/internal/sdk/pkg/models/shared/warehousetypesinput.go
@@ -25,6 +25,7 @@ type WarehouseTypesInput struct {
func CreateWarehouseTypesInputRedshift(redshift WarehouseRedshiftInput) WarehouseTypesInput {
typ := WarehouseTypesInputTypeRedshift
+
typStr := WarehouseRedshiftType(typ)
redshift.Type = typStr
@@ -36,6 +37,7 @@ func CreateWarehouseTypesInputRedshift(redshift WarehouseRedshiftInput) Warehous
func CreateWarehouseTypesInputSnowflake(snowflake WarehouseSnowflakeInput) WarehouseTypesInput {
typ := WarehouseTypesInputTypeSnowflake
+
typStr := WarehouseSnowflakeType(typ)
snowflake.Type = typStr
diff --git a/internal/sdk/pkg/models/shared/warehouseupdatetypes.go b/internal/sdk/pkg/models/shared/warehouseupdatetypes.go
index f0c5461..bc41e3b 100644
--- a/internal/sdk/pkg/models/shared/warehouseupdatetypes.go
+++ b/internal/sdk/pkg/models/shared/warehouseupdatetypes.go
@@ -25,6 +25,7 @@ type WarehouseUpdateTypes struct {
func CreateWarehouseUpdateTypesRedshift(redshift WarehouseRedshiftUpdate) WarehouseUpdateTypes {
typ := WarehouseUpdateTypesTypeRedshift
+
typStr := WarehouseRedshiftUpdateType(typ)
redshift.Type = typStr
@@ -36,6 +37,7 @@ func CreateWarehouseUpdateTypesRedshift(redshift WarehouseRedshiftUpdate) Wareho
func CreateWarehouseUpdateTypesSnowflake(snowflake WarehouseSnowflakeUpdate) WarehouseUpdateTypes {
typ := WarehouseUpdateTypesTypeSnowflake
+
typStr := WarehouseSnowflakeUpdateType(typ)
snowflake.Type = typStr
diff --git a/internal/sdk/pkg/utils/security.go b/internal/sdk/pkg/utils/security.go
index fa5eff7..ea1d4b2 100644
--- a/internal/sdk/pkg/utils/security.go
+++ b/internal/sdk/pkg/utils/security.go
@@ -204,9 +204,9 @@ func parseSecuritySchemeValue(client *securityConfig, schemeTag *securityTag, se
panic("not supported")
}
case "openIdConnect":
- client.headers[secTag.Name] = valToString(val)
+ client.headers[secTag.Name] = prefixBearer(valToString(val))
case "oauth2":
- client.headers[secTag.Name] = valToString(val)
+ client.headers[secTag.Name] = prefixBearer(valToString(val))
case "http":
switch schemeTag.SubType {
case "bearer":
diff --git a/internal/sdk/sdk.go b/internal/sdk/sdk.go
index 8eac5c1..e57066d 100644
--- a/internal/sdk/sdk.go
+++ b/internal/sdk/sdk.go
@@ -145,9 +145,9 @@ func New(opts ...SDKOption) *SDK {
sdkConfiguration: sdkConfiguration{
Language: "go",
OpenAPIDocVersion: "2.6.13",
- SDKVersion: "0.2.0",
- GenVersion: "2.230.3",
- UserAgent: "speakeasy-sdk/go 0.2.0 2.230.3 2.6.13 etleap",
+ SDKVersion: "0.0.2",
+ GenVersion: "2.237.3",
+ UserAgent: "speakeasy-sdk/go 0.0.2 2.237.3 2.6.13 etleap",
},
}
for _, opt := range opts {
diff --git a/main.go b/main.go
index fff103d..7c2fdbc 100644
--- a/main.go
+++ b/main.go
@@ -15,7 +15,7 @@ import (
// Run the docs generation tool, check its repository for more information on how it works and how docs
// can be customized.
-//go:generate go run github.com/hashicorp/terraform-plugin-docs/cmd/tfplugindocs generate --rendered-provider-name terraform-provider-etleap
+//go:generate go run github.com/hashicorp/terraform-plugin-docs/cmd/tfplugindocs generate --provider-name terraform-provider-etleap --rendered-provider-name terraform-provider-etleap
var (
// these will be set by the goreleaser configuration