diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2ce08f4..3a906a3 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,7 +2,16 @@
 
 All notable changes to this project will be documented in this file.
 
-## [Unreleased](https://github.com/dbt-labs/terraform-provider-dbtcloud/compare/v1.3.14...HEAD)
+## [Unreleased](https://github.com/dbt-labs/terraform-provider-dbtcloud/compare/v1.3.15...HEAD)
+
+# [0.3.15](https://github.com/dbt-labs/terraform-provider-dbtcloud/compare/v0.3.14...v0.3.15)
+
+### Changes
+
+- Add a `dbtcloud_projects` (with an "s") datasource to return all the projects along with some information about the warehouse connections and repositories connected to those projects. Loops through the API in case there are more than 100 projects
+  - Along with the `check` block, it can be used to check that there are no duplicate project names for example.
+- Add a datasource for `dbtcloud_global_connection` with the same information as the corresponding resource
+- Add a datasource for `dbtcloud_global_connections` (with an "s"), returning all the connections of an account along with details like the number of environments they are used in. This could be used to check that connections  don't have the same names or that connections are all used by projects.
 
 # [0.3.14](https://github.com/dbt-labs/terraform-provider-dbtcloud/compare/v0.3.13...v0.3.14)
 
diff --git a/docs/data-sources/global_connection.md b/docs/data-sources/global_connection.md
new file mode 100644
index 0000000..c2c1bf9
--- /dev/null
+++ b/docs/data-sources/global_connection.md
@@ -0,0 +1,224 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "dbtcloud_global_connection Data Source - dbtcloud"
+subcategory: ""
+description: |-
+  
+---
+
+# dbtcloud_global_connection (Data Source)
+
+
+
+## Example Usage
+
+```terraform
+data dbtcloud_global_connection my_connection {
+  id = 1234
+}
+```
+
+<!-- schema generated by tfplugindocs -->
+## Schema
+
+### Required
+
+- `id` (Number) Connection Identifier
+
+### Read-Only
+
+- `adapter_version` (String) Version of the adapter
+- `apache_spark` (Attributes) Apache Spark connection configuration. (see [below for nested schema](#nestedatt--apache_spark))
+- `athena` (Attributes) Athena connection configuration. (see [below for nested schema](#nestedatt--athena))
+- `bigquery` (Attributes) (see [below for nested schema](#nestedatt--bigquery))
+- `databricks` (Attributes) Databricks connection configuration (see [below for nested schema](#nestedatt--databricks))
+- `fabric` (Attributes) Microsoft Fabric connection configuration. (see [below for nested schema](#nestedatt--fabric))
+- `is_ssh_tunnel_enabled` (Boolean) Whether the connection can use an SSH tunnel
+- `name` (String) Connection name
+- `oauth_configuration_id` (Number)
+- `postgres` (Attributes) PostgreSQL connection configuration. (see [below for nested schema](#nestedatt--postgres))
+- `private_link_endpoint_id` (String) Private Link Endpoint ID. This ID can be found using the `privatelink_endpoint` data source
+- `redshift` (Attributes) Redshift connection configuration (see [below for nested schema](#nestedatt--redshift))
+- `snowflake` (Attributes) Snowflake connection configuration (see [below for nested schema](#nestedatt--snowflake))
+- `starburst` (Attributes) Starburst/Trino connection configuration. (see [below for nested schema](#nestedatt--starburst))
+- `synapse` (Attributes) Azure Synapse Analytics connection configuration. (see [below for nested schema](#nestedatt--synapse))
+
+<a id="nestedatt--apache_spark"></a>
+### Nested Schema for `apache_spark`
+
+Read-Only:
+
+- `auth` (String) Auth
+- `cluster` (String) Spark cluster for the connection
+- `connect_retries` (Number) Connection retries. Default=0
+- `connect_timeout` (Number) Connection time out in seconds. Default=10
+- `host` (String) Hostname of the connection
+- `method` (String) Authentication method for the connection (http or thrift).
+- `organization` (String) Organization ID
+- `port` (Number) Port for the connection. Default=443
+- `user` (String) User
+
+
+<a id="nestedatt--athena"></a>
+### Nested Schema for `athena`
+
+Read-Only:
+
+- `database` (String) Specify the database (data catalog) to build models into (lowercase only).
+- `num_boto3_retries` (Number) Number of times to retry boto3 requests (e.g. deleting S3 files for materialized tables).
+- `num_iceberg_retries` (Number) Number of times to retry iceberg commit queries to fix ICEBERG_COMMIT_ERROR.
+- `num_retries` (Number) Number of times to retry a failing query.
+- `poll_interval` (Number) Interval in seconds to use for polling the status of query results in Athena.
+- `region_name` (String) AWS region of your Athena instance.
+- `s3_data_dir` (String) Prefix for storing tables, if different from the connection's S3 staging directory.
+- `s3_data_naming` (String) How to generate table paths in the S3 data directory.
+- `s3_staging_dir` (String) S3 location to store Athena query results and metadata.
+- `s3_tmp_table_dir` (String) Prefix for storing temporary tables, if different from the connection's S3 data directory.
+- `spark_work_group` (String) Identifier of Athena Spark workgroup for running Python models.
+- `work_group` (String) Identifier of Athena workgroup.
+
+
+<a id="nestedatt--bigquery"></a>
+### Nested Schema for `bigquery`
+
+Required:
+
+- `gcp_project_id` (String) The GCP project ID to use for the connection
+
+Read-Only:
+
+- `application_id` (String, Sensitive) OAuth Client ID
+- `application_secret` (String, Sensitive) OAuth Client Secret
+- `auth_provider_x509_cert_url` (String) Auth Provider X509 Cert URL for the Service Account
+- `auth_uri` (String) Auth URI for the Service Account
+- `client_email` (String) Service Account email
+- `client_id` (String) Client ID of the Service Account
+- `client_x509_cert_url` (String) Client X509 Cert URL for the Service Account
+- `dataproc_cluster_name` (String) Dataproc cluster name for PySpark workloads
+- `dataproc_region` (String) Google Cloud region for PySpark workloads on Dataproc
+- `execution_project` (String) Project to bill for query execution
+- `gcs_bucket` (String) URI for a Google Cloud Storage bucket to host Python code executed via Datapro
+- `impersonate_service_account` (String) Service Account to impersonate when running queries
+- `job_creation_timeout_seconds` (Number) Maximum timeout for the job creation step
+- `job_retry_deadline_seconds` (Number) Total number of seconds to wait while retrying the same query
+- `location` (String) Location to create new Datasets in
+- `maximum_bytes_billed` (Number) Max number of bytes that can be billed for a given BigQuery query
+- `priority` (String) The priority with which to execute BigQuery queries (batch or interactive)
+- `private_key` (String, Sensitive) Private Key for the Service Account
+- `private_key_id` (String) Private Key ID for the Service Account
+- `retries` (Number) Number of retries for queries
+- `scopes` (Set of String) OAuth scopes for the BigQuery connection
+- `timeout_seconds` (Number) Timeout in seconds for queries
+- `token_uri` (String) Token URI for the Service Account
+
+
+<a id="nestedatt--databricks"></a>
+### Nested Schema for `databricks`
+
+Read-Only:
+
+- `catalog` (String) Catalog name if Unity Catalog is enabled in your Databricks workspace.
+- `client_id` (String) Required to enable Databricks OAuth authentication for IDE developers.
+- `client_secret` (String) Required to enable Databricks OAuth authentication for IDE developers.
+- `host` (String) The hostname of the Databricks cluster or SQL warehouse.
+- `http_path` (String) The HTTP path of the Databricks cluster or SQL warehouse.
+
+
+<a id="nestedatt--fabric"></a>
+### Nested Schema for `fabric`
+
+Read-Only:
+
+- `database` (String) The database to connect to for this connection.
+- `login_timeout` (Number) The number of seconds used to establish a connection before failing. Defaults to 0, which means that the timeout is disabled or uses the default system settings.
+- `port` (Number) The port to connect to for this connection. Default=1433
+- `query_timeout` (Number) The number of seconds used to wait for a query before failing. Defaults to 0, which means that the timeout is disabled or uses the default system settings.
+- `retries` (Number) The number of automatic times to retry a query before failing. Defaults to 1. Queries with syntax errors will not be retried. This setting can be used to overcome intermittent network issues.
+- `server` (String) The server hostname.
+
+
+<a id="nestedatt--postgres"></a>
+### Nested Schema for `postgres`
+
+Read-Only:
+
+- `dbname` (String) The database name for this connection.
+- `hostname` (String) The hostname of the database.
+- `port` (Number) The port to connect to for this connection. Default=5432
+- `ssh_tunnel` (Attributes) PostgreSQL SSH Tunnel configuration (see [below for nested schema](#nestedatt--postgres--ssh_tunnel))
+
+<a id="nestedatt--postgres--ssh_tunnel"></a>
+### Nested Schema for `postgres.ssh_tunnel`
+
+Read-Only:
+
+- `hostname` (String) The hostname for the SSH tunnel.
+- `id` (Number) The ID of the SSH tunnel connection.
+- `port` (Number) The HTTP port for the SSH tunnel.
+- `public_key` (String) The SSH public key generated to allow connecting via SSH tunnel.
+- `username` (String) The username to use for the SSH tunnel.
+
+
+
+<a id="nestedatt--redshift"></a>
+### Nested Schema for `redshift`
+
+Required:
+
+- `hostname` (String) The hostname of the data warehouse.
+
+Read-Only:
+
+- `dbname` (String) The database name for this connection.
+- `port` (Number) The port to connect to for this connection. Default=5432
+- `ssh_tunnel` (Attributes) Redshift SSH Tunnel configuration (see [below for nested schema](#nestedatt--redshift--ssh_tunnel))
+
+<a id="nestedatt--redshift--ssh_tunnel"></a>
+### Nested Schema for `redshift.ssh_tunnel`
+
+Read-Only:
+
+- `hostname` (String) The hostname for the SSH tunnel.
+- `id` (Number) The ID of the SSH tunnel connection.
+- `port` (Number) The HTTP port for the SSH tunnel.
+- `public_key` (String) The SSH public key generated to allow connecting via SSH tunnel.
+- `username` (String) The username to use for the SSH tunnel.
+
+
+
+<a id="nestedatt--snowflake"></a>
+### Nested Schema for `snowflake`
+
+Read-Only:
+
+- `account` (String) The Snowflake account name
+- `allow_sso` (Boolean) Whether to allow Snowflake OAuth for the connection. If true, the `oauth_client_id` and `oauth_client_secret` fields must be set
+- `client_session_keep_alive` (Boolean) If true, the snowflake client will keep connections for longer than the default 4 hours. This is helpful when particularly long-running queries are executing (> 4 hours)
+- `database` (String) The default database for the connection
+- `oauth_client_id` (String, Sensitive) OAuth Client ID. Required to allow OAuth between dbt Cloud and Snowflake
+- `oauth_client_secret` (String, Sensitive) OAuth Client Secret. Required to allow OAuth between dbt Cloud and Snowflake
+- `role` (String) The Snowflake role to use when running queries on the connection
+- `warehouse` (String) The default Snowflake Warehouse to use for the connection
+
+
+<a id="nestedatt--starburst"></a>
+### Nested Schema for `starburst`
+
+Read-Only:
+
+- `host` (String) The hostname of the account to connect to.
+- `method` (String) The authentication method. Only LDAP for now.
+- `port` (Number) The port to connect to for this connection. Default=443
+
+
+<a id="nestedatt--synapse"></a>
+### Nested Schema for `synapse`
+
+Read-Only:
+
+- `database` (String) The database to connect to for this connection.
+- `host` (String) The server hostname.
+- `login_timeout` (Number) The number of seconds used to establish a connection before failing. Defaults to 0, which means that the timeout is disabled or uses the default system settings.
+- `port` (Number) The port to connect to for this connection. Default=1433
+- `query_timeout` (Number) The number of seconds used to wait for a query before failing. Defaults to 0, which means that the timeout is disabled or uses the default system settings.
+- `retries` (Number) The number of automatic times to retry a query before failing. Defaults to 1. Queries with syntax errors will not be retried. This setting can be used to overcome intermittent network issues.
diff --git a/docs/data-sources/global_connections.md b/docs/data-sources/global_connections.md
new file mode 100644
index 0000000..ca18821
--- /dev/null
+++ b/docs/data-sources/global_connections.md
@@ -0,0 +1,40 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "dbtcloud_global_connections Data Source - dbtcloud"
+subcategory: ""
+description: |-
+  All the connections created on the account with some summary information, like their name, type, when they were created/updated and the number of environments using them.
+---
+
+# dbtcloud_global_connections (Data Source)
+
+All the connections created on the account with some summary information, like their name, type, when they were created/updated and the number of environments using them.
+
+## Example Usage
+
+```terraform
+data dbtcloud_global_connections my_connections {
+}
+```
+
+<!-- schema generated by tfplugindocs -->
+## Schema
+
+### Read-Only
+
+- `connections` (Attributes Set) A set of all the connections (see [below for nested schema](#nestedatt--connections))
+
+<a id="nestedatt--connections"></a>
+### Nested Schema for `connections`
+
+Read-Only:
+
+- `adapter_version` (String) Type of adapter used for the connection
+- `created_at` (String) When the connection was created
+- `environment__count` (Number) Number of environments using this connection
+- `id` (Number) Connection Identifier
+- `is_ssh_tunnel_enabled` (Boolean)
+- `name` (String) Connection name
+- `oauth_configuration_id` (Number)
+- `private_link_endpoint_id` (Number) Private Link Endpoint ID.
+- `updated_at` (String) When the connection was updated
diff --git a/docs/data-sources/projects.md b/docs/data-sources/projects.md
new file mode 100644
index 0000000..9c82682
--- /dev/null
+++ b/docs/data-sources/projects.md
@@ -0,0 +1,88 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "dbtcloud_projects Data Source - dbtcloud"
+subcategory: ""
+description: |-
+  Retrieve all the projects created in dbt Cloud with an optional filter on parts of the project name.
+---
+
+# dbtcloud_projects (Data Source)
+
+Retrieve all the projects created in dbt Cloud with an optional filter on parts of the project name.
+
+## Example Usage
+
+```terraform
+// can be filtered by parts of the project name
+data dbtcloud_projects my_acme_projects {
+  name_contains = "acme"
+}
+
+// or can return all projects
+data dbtcloud_projects my_projects {
+  name_contains = "acme"
+}
+
+// this can be used to make sure that there are no distinct projects with the same names for example
+
+locals {
+  name_occurrences = {
+    for project in data.dbtcloud_projects.my_projects.projects : project.name => project.id ...
+  }
+  duplicates_with_id = [
+    for name, project_id in local.name_occurrences : "'${name}':${join(",", project_id)}" if length(project_id) > 1
+  ]
+}
+
+check "no_different_projects_with_same_name" {
+  assert {
+    condition = length(local.duplicates_with_id) == 0
+    error_message = "There are duplicate project names: ${join(" ; ", local.duplicates_with_id)}"
+  }
+}
+```
+
+<!-- schema generated by tfplugindocs -->
+## Schema
+
+### Optional
+
+- `name_contains` (String) Used to filter projects by name, Optional
+
+### Read-Only
+
+- `projects` (Attributes Set) Set of projects with their details (see [below for nested schema](#nestedatt--projects))
+
+<a id="nestedatt--projects"></a>
+### Nested Schema for `projects`
+
+Read-Only:
+
+- `connection` (Attributes) Details for the connection linked to the project (see [below for nested schema](#nestedatt--projects--connection))
+- `created_at` (String) When the project was created
+- `dbt_project_subdirectory` (String) Subdirectory for the dbt project inside the git repo
+- `description` (String) Project description
+- `id` (Number) Project ID
+- `name` (String) Project name
+- `repository` (Attributes) Details for the repository linked to the project (see [below for nested schema](#nestedatt--projects--repository))
+- `semantic_layer_config_id` (Number) Semantic layer config ID
+- `updated_at` (String) When the project was last updated
+
+<a id="nestedatt--projects--connection"></a>
+### Nested Schema for `projects.connection`
+
+Read-Only:
+
+- `adapter_version` (String) Version of the adapter for the connection. Will tell what connection type it is
+- `id` (Number) Connection ID
+- `name` (String) Connection name
+
+
+<a id="nestedatt--projects--repository"></a>
+### Nested Schema for `projects.repository`
+
+Read-Only:
+
+- `id` (Number) Repository ID
+- `pull_request_url_template` (String) URL template for PRs
+- `remote_url` (String) URL of the git repo remote
diff --git a/examples/data-sources/dbtcloud_global_connection/data-source.tf b/examples/data-sources/dbtcloud_global_connection/data-source.tf
new file mode 100644
index 0000000..5b23d8c
--- /dev/null
+++ b/examples/data-sources/dbtcloud_global_connection/data-source.tf
@@ -0,0 +1,3 @@
+data dbtcloud_global_connection my_connection {
+  id = 1234
+}
\ No newline at end of file
diff --git a/examples/data-sources/dbtcloud_global_connections/data-source.tf b/examples/data-sources/dbtcloud_global_connections/data-source.tf
new file mode 100644
index 0000000..6028b28
--- /dev/null
+++ b/examples/data-sources/dbtcloud_global_connections/data-source.tf
@@ -0,0 +1,2 @@
+data dbtcloud_global_connections my_connections {
+}
\ No newline at end of file
diff --git a/examples/data-sources/dbtcloud_projects/data-source.tf b/examples/data-sources/dbtcloud_projects/data-source.tf
new file mode 100644
index 0000000..3d9a1ef
--- /dev/null
+++ b/examples/data-sources/dbtcloud_projects/data-source.tf
@@ -0,0 +1,28 @@
+
+// can be filtered by parts of the project name
+data dbtcloud_projects my_acme_projects {
+  name_contains = "acme"
+}
+
+// or can return all projects
+data dbtcloud_projects my_projects {
+  name_contains = "acme"
+}
+
+// this can be used to make sure that there are no distinct projects with the same names for example
+
+locals {
+  name_occurrences = {
+    for project in data.dbtcloud_projects.my_projects.projects : project.name => project.id ...
+  }
+  duplicates_with_id = [
+    for name, project_id in local.name_occurrences : "'${name}':${join(",", project_id)}" if length(project_id) > 1
+  ]
+}
+
+check "no_different_projects_with_same_name" {
+  assert {
+    condition = length(local.duplicates_with_id) == 0
+    error_message = "There are duplicate project names: ${join(" ; ", local.duplicates_with_id)}"
+  }
+}
\ No newline at end of file
diff --git a/pkg/dbt_cloud/global_connection.go b/pkg/dbt_cloud/global_connection.go
index 1eb78b6..406c9ed 100644
--- a/pkg/dbt_cloud/global_connection.go
+++ b/pkg/dbt_cloud/global_connection.go
@@ -345,6 +345,13 @@ func (c *GlobalConnectionClient[T]) CreateUpdateEncryption(
 	return &resp.Data, nil
 }
 
+type EmptyConfig struct {
+}
+
+func (EmptyConfig) AdapterVersion() string {
+	return "n/a"
+}
+
 type SnowflakeConfig struct {
 	Account                *string                   `json:"account,omitempty"`
 	Database               *string                   `json:"database,omitempty"`
diff --git a/pkg/dbt_cloud/global_connections.go b/pkg/dbt_cloud/global_connections.go
new file mode 100644
index 0000000..6ab982a
--- /dev/null
+++ b/pkg/dbt_cloud/global_connections.go
@@ -0,0 +1,43 @@
+package dbt_cloud
+
+import (
+	"encoding/json"
+	"fmt"
+)
+
+type GlobalConnectionSummary struct {
+	ID                    int64  `json:"id"`
+	CreatedAt             string `json:"created_at"`
+	UpdatedAt             string `json:"updated_at"`
+	AccountID             int64  `json:"account_id"`
+	Name                  string `json:"name"`
+	AdapterVersion        string `json:"adapter_version"`
+	PrivateLinkEndpointID *int64 `json:"private_link_endpoint_id"`
+	IsSSHTunnelEnabled    bool   `json:"is_ssh_tunnel_enabled"`
+	OauthConfigurationID  *int64 `json:"oauth_configuration_id"`
+	EnvironmentCount      int64  `json:"environment__count"`
+}
+
+func (c *Client) GetAllConnections() ([]GlobalConnectionSummary, error) {
+
+	url := fmt.Sprintf(
+		`%s/v3/accounts/%d/connections/`,
+		c.HostURL,
+		c.AccountID,
+	)
+
+	allConnectionsRaw := c.GetData(url)
+
+	allConnections := []GlobalConnectionSummary{}
+	for _, connection := range allConnectionsRaw {
+
+		data, _ := json.Marshal(connection)
+		currentConnection := GlobalConnectionSummary{}
+		err := json.Unmarshal(data, &currentConnection)
+		if err != nil {
+			return nil, err
+		}
+		allConnections = append(allConnections, currentConnection)
+	}
+	return allConnections, nil
+}
diff --git a/pkg/dbt_cloud/projects.go b/pkg/dbt_cloud/projects.go
new file mode 100644
index 0000000..b0bea42
--- /dev/null
+++ b/pkg/dbt_cloud/projects.go
@@ -0,0 +1,63 @@
+package dbt_cloud
+
+import (
+	"encoding/json"
+	"fmt"
+)
+
+type ProjectConnectionRepository struct {
+	Name                   string                                `json:"name,omitempty"`
+	AccountID              int64                                 `json:"account_id,omitempty"`
+	Description            string                                `json:"description,omitempty"`
+	ConnectionID           int64                                 `json:"connection_id,omitempty"`
+	RepositoryID           int64                                 `json:"repository_id,omitempty"`
+	SemanticLayerConfigID  *int64                                `json:"semantic_layer_config_id,omitempty"`
+	SkippedSetup           bool                                  `json:"skipped_setup,omitempty"`
+	State                  int64                                 `json:"state,omitempty"`
+	DbtProjectSubdirectory string                                `json:"dbt_project_subdirectory,omitempty"`
+	DocsJobID              *int64                                `json:"docs_job_id,omitempty"`
+	FreshnessJobID         *int64                                `json:"freshness_job_id,omitempty"`
+	ID                     int64                                 `json:"id,omitempty"`
+	CreatedAt              string                                `json:"created_at,omitempty"`
+	UpdatedAt              string                                `json:"updated_at,omitempty"`
+	Connection             *globalConnectionPayload[EmptyConfig] `json:"connection,omitempty"`
+	Environments           any                                   `json:"environments,omitempty"`
+	Repository             *Repository                           `json:"repository,omitempty"`
+	GroupPermissions       any                                   `json:"group_permissions,omitempty"`
+	DocsJob                any                                   `json:"docs_job,omitempty"`
+	FreshnessJob           any                                   `json:"freshness_job,omitempty"`
+}
+
+func (c *Client) GetAllProjects(nameContains string) ([]ProjectConnectionRepository, error) {
+	var url string
+
+	if nameContains == "" {
+		url = fmt.Sprintf(
+			`%s/v3/accounts/%d/projects/?limit=100&order_by=name&include_related=["repository","connection"]`,
+			c.HostURL,
+			c.AccountID,
+		)
+	} else {
+		url = fmt.Sprintf(
+			`%s/v3/accounts/%d/projects/?name__icontains=%s&limit=100&order_by=name&include_related=["repository","connection"]`,
+			c.HostURL,
+			c.AccountID,
+			nameContains,
+		)
+	}
+
+	allProjectsRaw := c.GetData(url)
+
+	allProjects := []ProjectConnectionRepository{}
+	for _, job := range allProjectsRaw {
+
+		data, _ := json.Marshal(job)
+		currentProject := ProjectConnectionRepository{}
+		err := json.Unmarshal(data, &currentProject)
+		if err != nil {
+			return nil, err
+		}
+		allProjects = append(allProjects, currentProject)
+	}
+	return allProjects, nil
+}
diff --git a/pkg/framework/objects/global_connection/common.go b/pkg/framework/objects/global_connection/common.go
new file mode 100644
index 0000000..1f4ee78
--- /dev/null
+++ b/pkg/framework/objects/global_connection/common.go
@@ -0,0 +1,638 @@
+package global_connection
+
+import (
+	"strings"
+
+	"github.com/dbt-labs/terraform-provider-dbtcloud/pkg/dbt_cloud"
+	"github.com/dbt-labs/terraform-provider-dbtcloud/pkg/helper"
+	"github.com/hashicorp/terraform-plugin-framework/types"
+)
+
+func readGeneric(
+	client *dbt_cloud.Client,
+	state *GlobalConnectionResourceModel,
+	adapter string,
+) (*GlobalConnectionResourceModel, string, error) {
+
+	connectionID := state.ID.ValueInt64()
+
+	switch {
+	case state.SnowflakeConfig != nil || strings.HasPrefix(adapter, "snowflake_"):
+		// in case we use it for a datasource, we need to set the Config to not be nil
+		if state.SnowflakeConfig == nil {
+			state.SnowflakeConfig = &SnowflakeConfig{}
+		}
+
+		c := dbt_cloud.NewGlobalConnectionClient[dbt_cloud.SnowflakeConfig](client)
+
+		common, snowflakeCfg, err := c.Get(connectionID)
+		if err != nil {
+			if strings.HasPrefix(err.Error(), "resource-not-found") {
+				return nil, "removeFromState", nil
+			}
+			return nil, "", err
+		}
+
+		// global settings
+		state.ID = types.Int64PointerValue(common.ID)
+		state.AdapterVersion = types.StringValue(snowflakeCfg.AdapterVersion())
+		state.Name = types.StringPointerValue(common.Name)
+		state.IsSshTunnelEnabled = types.BoolPointerValue(common.IsSshTunnelEnabled)
+		state.OauthConfigurationId = types.Int64PointerValue(common.OauthConfigurationId)
+
+		// nullable common fields
+		if !common.PrivateLinkEndpointId.IsNull() {
+			state.PrivateLinkEndpointId = types.StringValue(common.PrivateLinkEndpointId.MustGet())
+		} else {
+			state.PrivateLinkEndpointId = types.StringNull()
+		}
+
+		// snowflake settings
+		state.SnowflakeConfig.Account = types.StringPointerValue(snowflakeCfg.Account)
+		state.SnowflakeConfig.Database = types.StringPointerValue(snowflakeCfg.Database)
+		state.SnowflakeConfig.Warehouse = types.StringPointerValue(snowflakeCfg.Warehouse)
+		state.SnowflakeConfig.ClientSessionKeepAlive = types.BoolPointerValue(
+			snowflakeCfg.ClientSessionKeepAlive,
+		)
+		state.SnowflakeConfig.AllowSso = types.BoolPointerValue(snowflakeCfg.AllowSso)
+
+		// nullable optional fields
+		// TODO: decide if it is better to read it as string, *string or nullable.Nullable[string] on the dbt_cloud side
+		// in this case role can never be empty so this works but we might have cases where null and empty are different
+		if !snowflakeCfg.Role.IsNull() {
+			state.SnowflakeConfig.Role = types.StringValue(snowflakeCfg.Role.MustGet())
+		} else {
+			state.SnowflakeConfig.Role = types.StringNull()
+		}
+
+		// We don't set the sensitive fields when we read because those are secret and never returned by the API
+		// sensitive fields: OauthClientID, OauthClientSecret
+
+	case state.BigQueryConfig != nil || strings.HasPrefix(adapter, "bigquery_"):
+		// in case we use it for a datasource, we need to set the Config to not be nil
+		if state.BigQueryConfig == nil {
+			state.BigQueryConfig = &BigQueryConfig{}
+		}
+
+		c := dbt_cloud.NewGlobalConnectionClient[dbt_cloud.BigQueryConfig](client)
+
+		common, bigqueryCfg, err := c.Get(connectionID)
+		if err != nil {
+			if strings.HasPrefix(err.Error(), "resource-not-found") {
+				return nil, "removeFromState", nil
+			}
+			return nil, "", err
+		}
+
+		// global settings
+		state.ID = types.Int64PointerValue(common.ID)
+		state.AdapterVersion = types.StringValue(bigqueryCfg.AdapterVersion())
+		state.Name = types.StringPointerValue(common.Name)
+		state.IsSshTunnelEnabled = types.BoolPointerValue(common.IsSshTunnelEnabled)
+		state.OauthConfigurationId = types.Int64PointerValue(common.OauthConfigurationId)
+
+		// nullable common fields
+		if !common.PrivateLinkEndpointId.IsNull() {
+			state.PrivateLinkEndpointId = types.StringValue(common.PrivateLinkEndpointId.MustGet())
+		} else {
+			state.PrivateLinkEndpointId = types.StringNull()
+		}
+
+		// BigQuery settings
+		state.BigQueryConfig.GCPProjectID = types.StringPointerValue(bigqueryCfg.ProjectID)
+		state.BigQueryConfig.TimeoutSeconds = types.Int64PointerValue(bigqueryCfg.TimeoutSeconds)
+		state.BigQueryConfig.PrivateKeyID = types.StringPointerValue(bigqueryCfg.PrivateKeyID)
+		state.BigQueryConfig.ClientEmail = types.StringPointerValue(bigqueryCfg.ClientEmail)
+		state.BigQueryConfig.ClientID = types.StringPointerValue(bigqueryCfg.ClientID)
+		state.BigQueryConfig.AuthURI = types.StringPointerValue(bigqueryCfg.AuthURI)
+		state.BigQueryConfig.TokenURI = types.StringPointerValue(bigqueryCfg.TokenURI)
+		state.BigQueryConfig.AuthProviderX509CertURL = types.StringPointerValue(
+			bigqueryCfg.AuthProviderX509CertURL,
+		)
+		state.BigQueryConfig.ClientX509CertURL = types.StringPointerValue(
+			bigqueryCfg.ClientX509CertURL,
+		)
+		state.BigQueryConfig.Retries = types.Int64PointerValue(bigqueryCfg.Retries)
+		state.BigQueryConfig.Scopes = helper.SliceStringToSliceTypesString(bigqueryCfg.Scopes)
+
+		// nullable optional fields
+		if !bigqueryCfg.Priority.IsNull() {
+			state.BigQueryConfig.Priority = types.StringValue(bigqueryCfg.Priority.MustGet())
+		} else {
+			state.BigQueryConfig.Priority = types.StringNull()
+		}
+
+		if !bigqueryCfg.Location.IsNull() {
+			state.BigQueryConfig.Location = types.StringValue(bigqueryCfg.Location.MustGet())
+		} else {
+			state.BigQueryConfig.Location = types.StringNull()
+		}
+
+		if !bigqueryCfg.MaximumBytesBilled.IsNull() {
+			state.BigQueryConfig.MaximumBytesBilled = types.Int64Value(
+				bigqueryCfg.MaximumBytesBilled.MustGet(),
+			)
+		} else {
+			state.BigQueryConfig.MaximumBytesBilled = types.Int64Null()
+		}
+
+		if !bigqueryCfg.ExecutionProject.IsNull() {
+			state.BigQueryConfig.ExecutionProject = types.StringValue(
+				bigqueryCfg.ExecutionProject.MustGet(),
+			)
+		} else {
+			state.BigQueryConfig.ExecutionProject = types.StringNull()
+		}
+
+		if !bigqueryCfg.ImpersonateServiceAccount.IsNull() {
+			state.BigQueryConfig.ImpersonateServiceAccount = types.StringValue(
+				bigqueryCfg.ImpersonateServiceAccount.MustGet(),
+			)
+		} else {
+			state.BigQueryConfig.ImpersonateServiceAccount = types.StringNull()
+		}
+
+		if !bigqueryCfg.JobRetryDeadlineSeconds.IsNull() {
+			state.BigQueryConfig.JobRetryDeadlineSeconds = types.Int64Value(
+				bigqueryCfg.JobRetryDeadlineSeconds.MustGet(),
+			)
+		} else {
+			state.BigQueryConfig.JobRetryDeadlineSeconds = types.Int64Null()
+		}
+
+		if !bigqueryCfg.JobCreationTimeoutSeconds.IsNull() {
+			state.BigQueryConfig.JobCreationTimeoutSeconds = types.Int64Value(
+				bigqueryCfg.JobCreationTimeoutSeconds.MustGet(),
+			)
+		} else {
+			state.BigQueryConfig.JobCreationTimeoutSeconds = types.Int64Null()
+		}
+
+		if !bigqueryCfg.GcsBucket.IsNull() {
+			state.BigQueryConfig.GcsBucket = types.StringValue(bigqueryCfg.GcsBucket.MustGet())
+		} else {
+			state.BigQueryConfig.GcsBucket = types.StringNull()
+		}
+
+		if !bigqueryCfg.DataprocRegion.IsNull() {
+			state.BigQueryConfig.DataprocRegion = types.StringValue(
+				bigqueryCfg.DataprocRegion.MustGet(),
+			)
+		} else {
+			state.BigQueryConfig.DataprocRegion = types.StringNull()
+		}
+
+		if !bigqueryCfg.DataprocClusterName.IsNull() {
+			state.BigQueryConfig.DataprocClusterName = types.StringValue(
+				bigqueryCfg.DataprocClusterName.MustGet(),
+			)
+		} else {
+			state.BigQueryConfig.DataprocClusterName = types.StringNull()
+		}
+
+		// We don't set the sensitive fields when we read because those are secret and never returned by the API
+		// sensitive fields: ApplicationID, ApplicationSecret, PrivateKey
+
+	case state.DatabricksConfig != nil || strings.HasPrefix(adapter, "databricks_"):
+		// in case we use it for a datasource, we need to set the Config to not be nil
+		if state.DatabricksConfig == nil {
+			state.DatabricksConfig = &DatabricksConfig{}
+		}
+
+		c := dbt_cloud.NewGlobalConnectionClient[dbt_cloud.DatabricksConfig](client)
+
+		common, databricksCfg, err := c.Get(connectionID)
+		if err != nil {
+			if strings.HasPrefix(err.Error(), "resource-not-found") {
+				return nil, "removeFromState", nil
+			}
+			return nil, "", err
+		}
+
+		// global settings
+		state.ID = types.Int64PointerValue(common.ID)
+		state.AdapterVersion = types.StringValue(databricksCfg.AdapterVersion())
+		state.Name = types.StringPointerValue(common.Name)
+		state.IsSshTunnelEnabled = types.BoolPointerValue(common.IsSshTunnelEnabled)
+		state.OauthConfigurationId = types.Int64PointerValue(common.OauthConfigurationId)
+
+		// nullable common fields
+		if !common.PrivateLinkEndpointId.IsNull() {
+			state.PrivateLinkEndpointId = types.StringValue(common.PrivateLinkEndpointId.MustGet())
+		} else {
+			state.PrivateLinkEndpointId = types.StringNull()
+		}
+
+		// Databricks settings
+		state.DatabricksConfig.Host = types.StringPointerValue(databricksCfg.Host)
+		state.DatabricksConfig.HTTPPath = types.StringPointerValue(databricksCfg.HTTPPath)
+
+		// nullable optional fields
+		if !databricksCfg.Catalog.IsNull() {
+			state.DatabricksConfig.Catalog = types.StringValue(databricksCfg.Catalog.MustGet())
+		} else {
+			state.DatabricksConfig.Catalog = types.StringNull()
+		}
+
+		// We don't set the sensitive fields when we read because those are secret and never returned by the API
+		// sensitive fields: ClientID, ClientSecret
+
+	case state.RedshiftConfig != nil || strings.HasPrefix(adapter, "redshift_"):
+		// in case we use it for a datasource, we need to set the Config to not be nil
+		if state.RedshiftConfig == nil {
+			state.RedshiftConfig = &RedshiftConfig{}
+		}
+
+		c := dbt_cloud.NewGlobalConnectionClient[dbt_cloud.RedshiftConfig](client)
+
+		common, redshiftCfg, err := c.Get(connectionID)
+		if err != nil {
+			if strings.HasPrefix(err.Error(), "resource-not-found") {
+				return nil, "removeFromState", nil
+			}
+			return nil, "", err
+		}
+
+		sshTunnel, err := c.GetEncryptionsForConnection(connectionID)
+		if err != nil {
+			return nil, "", err
+		}
+
+		// global settings
+		state.ID = types.Int64PointerValue(common.ID)
+		state.AdapterVersion = types.StringValue(redshiftCfg.AdapterVersion())
+		state.Name = types.StringPointerValue(common.Name)
+		state.IsSshTunnelEnabled = types.BoolPointerValue(common.IsSshTunnelEnabled)
+		state.OauthConfigurationId = types.Int64PointerValue(common.OauthConfigurationId)
+
+		// nullable common fields
+		if !common.PrivateLinkEndpointId.IsNull() {
+			state.PrivateLinkEndpointId = types.StringValue(common.PrivateLinkEndpointId.MustGet())
+		} else {
+			state.PrivateLinkEndpointId = types.StringNull()
+		}
+
+		// Redshift settings
+		state.RedshiftConfig.HostName = types.StringPointerValue(redshiftCfg.HostName)
+		state.RedshiftConfig.Port = types.Int64PointerValue(redshiftCfg.Port)
+
+		// nullable optional fields
+		if !redshiftCfg.DBName.IsNull() {
+			state.RedshiftConfig.DBName = types.StringValue(redshiftCfg.DBName.MustGet())
+		} else {
+			state.RedshiftConfig.DBName = types.StringNull()
+		}
+
+		// SSH tunnel settings
+		if len(*sshTunnel) > 0 {
+
+			state.RedshiftConfig.SSHTunnel = &SSHTunnelConfig{
+				ID:        types.Int64PointerValue((*sshTunnel)[0].ID),
+				HostName:  types.StringValue((*sshTunnel)[0].HostName),
+				Port:      types.Int64Value((*sshTunnel)[0].Port),
+				Username:  types.StringValue((*sshTunnel)[0].Username),
+				PublicKey: types.StringValue((*sshTunnel)[0].PublicKey),
+			}
+		}
+
+		// We don't set the sensitive fields when we read because those are secret and never returned by the API
+		// sensitive fields: N/A for Redshift
+
+	case state.PostgresConfig != nil || strings.HasPrefix(adapter, "postgres_"):
+		// in case we use it for a datasource, we need to set the Config to not be nil
+		if state.PostgresConfig == nil {
+			state.PostgresConfig = &PostgresConfig{}
+		}
+
+		c := dbt_cloud.NewGlobalConnectionClient[dbt_cloud.PostgresConfig](client)
+
+		common, postgresCfg, err := c.Get(connectionID)
+		if err != nil {
+			if strings.HasPrefix(err.Error(), "resource-not-found") {
+				return nil, "removeFromState", nil
+			}
+			return nil, "", err
+		}
+
+		sshTunnel, err := c.GetEncryptionsForConnection(connectionID)
+		if err != nil {
+			return nil, "", err
+		}
+
+		// global settings
+		state.ID = types.Int64PointerValue(common.ID)
+		state.AdapterVersion = types.StringValue(postgresCfg.AdapterVersion())
+		state.Name = types.StringPointerValue(common.Name)
+		state.IsSshTunnelEnabled = types.BoolPointerValue(common.IsSshTunnelEnabled)
+		state.OauthConfigurationId = types.Int64PointerValue(common.OauthConfigurationId)
+
+		// nullable common fields
+		if !common.PrivateLinkEndpointId.IsNull() {
+			state.PrivateLinkEndpointId = types.StringValue(common.PrivateLinkEndpointId.MustGet())
+		} else {
+			state.PrivateLinkEndpointId = types.StringNull()
+		}
+
+		// Postgres settings
+		state.PostgresConfig.HostName = types.StringPointerValue(postgresCfg.HostName)
+		state.PostgresConfig.Port = types.Int64PointerValue(postgresCfg.Port)
+
+		// nullable optional fields
+		if !postgresCfg.DBName.IsNull() {
+			state.PostgresConfig.DBName = types.StringValue(postgresCfg.DBName.MustGet())
+		} else {
+			state.PostgresConfig.DBName = types.StringNull()
+		}
+
+		// SSH tunnel settings
+		if len(*sshTunnel) > 0 {
+
+			state.PostgresConfig.SSHTunnel = &SSHTunnelConfig{
+				ID:        types.Int64PointerValue((*sshTunnel)[0].ID),
+				HostName:  types.StringValue((*sshTunnel)[0].HostName),
+				Port:      types.Int64Value((*sshTunnel)[0].Port),
+				Username:  types.StringValue((*sshTunnel)[0].Username),
+				PublicKey: types.StringValue((*sshTunnel)[0].PublicKey),
+			}
+		}
+
+		// We don't set the sensitive fields when we read because those are secret and never returned by the API
+		// sensitive fields: N/A for Postgres
+
+	case state.FabricConfig != nil || strings.HasPrefix(adapter, "fabric_"):
+		// in case we use it for a datasource, we need to set the Config to not be nil
+		if state.FabricConfig == nil {
+			state.FabricConfig = &FabricConfig{}
+		}
+
+		c := dbt_cloud.NewGlobalConnectionClient[dbt_cloud.FabricConfig](client)
+
+		common, fabricCfg, err := c.Get(connectionID)
+		if err != nil {
+			if strings.HasPrefix(err.Error(), "resource-not-found") {
+				return nil, "removeFromState", nil
+			}
+			return nil, "", err
+		}
+
+		// global settings
+		state.ID = types.Int64PointerValue(common.ID)
+		state.AdapterVersion = types.StringValue(fabricCfg.AdapterVersion())
+		state.Name = types.StringPointerValue(common.Name)
+		state.IsSshTunnelEnabled = types.BoolPointerValue(common.IsSshTunnelEnabled)
+		state.OauthConfigurationId = types.Int64PointerValue(common.OauthConfigurationId)
+
+		// nullable common fields
+		if !common.PrivateLinkEndpointId.IsNull() {
+			state.PrivateLinkEndpointId = types.StringValue(common.PrivateLinkEndpointId.MustGet())
+		} else {
+			state.PrivateLinkEndpointId = types.StringNull()
+		}
+
+		// Fabric settings
+		state.FabricConfig.Server = types.StringPointerValue(fabricCfg.Server)
+		state.FabricConfig.Port = types.Int64PointerValue(fabricCfg.Port)
+		state.FabricConfig.Database = types.StringPointerValue(fabricCfg.Database)
+		state.FabricConfig.Retries = types.Int64PointerValue(fabricCfg.Retries)
+		state.FabricConfig.LoginTimeout = types.Int64PointerValue(fabricCfg.LoginTimeout)
+		state.FabricConfig.QueryTimeout = types.Int64PointerValue(fabricCfg.QueryTimeout)
+
+		// We don't set the sensitive fields when we read because those are secret and never returned by the API
+		// sensitive fields: N/A for Fabric
+
+	case state.SynapseConfig != nil || strings.HasPrefix(adapter, "synapse_"):
+		// in case we use it for a datasource, we need to set the Config to not be nil
+		if state.SynapseConfig == nil {
+			state.SynapseConfig = &SynapseConfig{}
+		}
+
+		c := dbt_cloud.NewGlobalConnectionClient[dbt_cloud.SynapseConfig](client)
+
+		common, synapseCfg, err := c.Get(connectionID)
+		if err != nil {
+			if strings.HasPrefix(err.Error(), "resource-not-found") {
+				return nil, "removeFromState", nil
+			}
+			return nil, "", err
+		}
+
+		// global settings
+		state.ID = types.Int64PointerValue(common.ID)
+		state.AdapterVersion = types.StringValue(synapseCfg.AdapterVersion())
+		state.Name = types.StringPointerValue(common.Name)
+		state.IsSshTunnelEnabled = types.BoolPointerValue(common.IsSshTunnelEnabled)
+		state.OauthConfigurationId = types.Int64PointerValue(common.OauthConfigurationId)
+
+		// nullable common fields
+		if !common.PrivateLinkEndpointId.IsNull() {
+			state.PrivateLinkEndpointId = types.StringValue(common.PrivateLinkEndpointId.MustGet())
+		} else {
+			state.PrivateLinkEndpointId = types.StringNull()
+		}
+
+		// Synapse settings
+		state.SynapseConfig.Host = types.StringPointerValue(synapseCfg.Host)
+		state.SynapseConfig.Port = types.Int64PointerValue(synapseCfg.Port)
+		state.SynapseConfig.Database = types.StringPointerValue(synapseCfg.Database)
+		state.SynapseConfig.Retries = types.Int64PointerValue(synapseCfg.Retries)
+		state.SynapseConfig.LoginTimeout = types.Int64PointerValue(synapseCfg.LoginTimeout)
+		state.SynapseConfig.QueryTimeout = types.Int64PointerValue(synapseCfg.QueryTimeout)
+
+		// We don't set the sensitive fields when we read because those are secret and never returned by the API
+		// sensitive fields: N/A for Synapse
+
+	case state.StarburstConfig != nil || strings.HasPrefix(adapter, "trino_"):
+		// in case we use it for a datasource, we need to set the Config to not be nil
+		if state.StarburstConfig == nil {
+			state.StarburstConfig = &StarburstConfig{}
+		}
+
+		c := dbt_cloud.NewGlobalConnectionClient[dbt_cloud.StarburstConfig](client)
+
+		common, starburstCfg, err := c.Get(connectionID)
+		if err != nil {
+			if strings.HasPrefix(err.Error(), "resource-not-found") {
+				return nil, "removeFromState", nil
+			}
+			return nil, "", err
+		}
+
+		// global settings
+		state.ID = types.Int64PointerValue(common.ID)
+		state.AdapterVersion = types.StringValue(starburstCfg.AdapterVersion())
+		state.Name = types.StringPointerValue(common.Name)
+		state.IsSshTunnelEnabled = types.BoolPointerValue(common.IsSshTunnelEnabled)
+		state.OauthConfigurationId = types.Int64PointerValue(common.OauthConfigurationId)
+
+		// nullable common fields
+		if !common.PrivateLinkEndpointId.IsNull() {
+			state.PrivateLinkEndpointId = types.StringValue(common.PrivateLinkEndpointId.MustGet())
+		} else {
+			state.PrivateLinkEndpointId = types.StringNull()
+		}
+
+		// Starburst settings
+		state.StarburstConfig.Method = types.StringPointerValue(starburstCfg.Method)
+		state.StarburstConfig.Host = types.StringPointerValue(starburstCfg.Host)
+		state.StarburstConfig.Port = types.Int64PointerValue(starburstCfg.Port)
+
+		// We don't set the sensitive fields when we read because those are secret and never returned by the API
+		// sensitive fields: N/A for Starburst
+
+	case state.AthenaConfig != nil || strings.HasPrefix(adapter, "athena_"):
+		// in case we use it for a datasource, we need to set the Config to not be nil
+		if state.AthenaConfig == nil {
+			state.AthenaConfig = &AthenaConfig{}
+		}
+
+		c := dbt_cloud.NewGlobalConnectionClient[dbt_cloud.AthenaConfig](client)
+
+		common, athenaCfg, err := c.Get(connectionID)
+		if err != nil {
+			if strings.HasPrefix(err.Error(), "resource-not-found") {
+				return nil, "removeFromState", nil
+			}
+			return nil, "", err
+		}
+
+		// global settings
+		state.ID = types.Int64PointerValue(common.ID)
+		state.AdapterVersion = types.StringValue(athenaCfg.AdapterVersion())
+		state.Name = types.StringPointerValue(common.Name)
+		state.IsSshTunnelEnabled = types.BoolPointerValue(common.IsSshTunnelEnabled)
+		state.OauthConfigurationId = types.Int64PointerValue(common.OauthConfigurationId)
+
+		// nullable common fields
+		if !common.PrivateLinkEndpointId.IsNull() {
+			state.PrivateLinkEndpointId = types.StringValue(common.PrivateLinkEndpointId.MustGet())
+		} else {
+			state.PrivateLinkEndpointId = types.StringNull()
+		}
+
+		// Athena settings
+		state.AthenaConfig.RegionName = types.StringPointerValue(athenaCfg.RegionName)
+		state.AthenaConfig.Database = types.StringPointerValue(athenaCfg.Database)
+		state.AthenaConfig.S3StagingDir = types.StringPointerValue(athenaCfg.S3StagingDir)
+
+		// nullable optional fields
+		if !athenaCfg.WorkGroup.IsNull() {
+			state.AthenaConfig.WorkGroup = types.StringValue(athenaCfg.WorkGroup.MustGet())
+		} else {
+			state.AthenaConfig.WorkGroup = types.StringNull()
+		}
+		if !athenaCfg.SparkWorkGroup.IsNull() {
+			state.AthenaConfig.SparkWorkGroup = types.StringValue(
+				athenaCfg.SparkWorkGroup.MustGet(),
+			)
+		} else {
+			state.AthenaConfig.SparkWorkGroup = types.StringNull()
+		}
+		if !athenaCfg.S3DataDir.IsNull() {
+			state.AthenaConfig.S3DataDir = types.StringValue(athenaCfg.S3DataDir.MustGet())
+		} else {
+			state.AthenaConfig.S3DataDir = types.StringNull()
+		}
+		if !athenaCfg.S3DataNaming.IsNull() {
+			state.AthenaConfig.S3DataNaming = types.StringValue(athenaCfg.S3DataNaming.MustGet())
+		} else {
+			state.AthenaConfig.S3DataNaming = types.StringNull()
+		}
+		if !athenaCfg.S3TmpTableDir.IsNull() {
+			state.AthenaConfig.S3TmpTableDir = types.StringValue(athenaCfg.S3TmpTableDir.MustGet())
+		} else {
+			state.AthenaConfig.S3TmpTableDir = types.StringNull()
+		}
+		if !athenaCfg.PollInterval.IsNull() {
+			state.AthenaConfig.PollInterval = types.Int64Value(athenaCfg.PollInterval.MustGet())
+		} else {
+			state.AthenaConfig.PollInterval = types.Int64Null()
+		}
+		if !athenaCfg.NumRetries.IsNull() {
+			state.AthenaConfig.NumRetries = types.Int64Value(athenaCfg.NumRetries.MustGet())
+		} else {
+			state.AthenaConfig.NumRetries = types.Int64Null()
+		}
+		if !athenaCfg.NumBoto3Retries.IsNull() {
+			state.AthenaConfig.NumBoto3Retries = types.Int64Value(
+				athenaCfg.NumBoto3Retries.MustGet(),
+			)
+		} else {
+			state.AthenaConfig.NumBoto3Retries = types.Int64Null()
+		}
+		if !athenaCfg.NumIcebergRetries.IsNull() {
+			state.AthenaConfig.NumIcebergRetries = types.Int64Value(
+				athenaCfg.NumIcebergRetries.MustGet(),
+			)
+		} else {
+			state.AthenaConfig.NumIcebergRetries = types.Int64Null()
+		}
+
+		// We don't set the sensitive fields when we read because those are secret and never returned by the API
+		// sensitive fields: N/A for Athena
+
+	case state.ApacheSparkConfig != nil || strings.HasPrefix(adapter, "apache_spark_"):
+		// in case we use it for a datasource, we need to set the Config to not be nil
+		if state.ApacheSparkConfig == nil {
+			state.ApacheSparkConfig = &ApacheSparkConfig{}
+		}
+
+		c := dbt_cloud.NewGlobalConnectionClient[dbt_cloud.ApacheSparkConfig](client)
+
+		common, sparkCfg, err := c.Get(connectionID)
+		if err != nil {
+			if strings.HasPrefix(err.Error(), "resource-not-found") {
+				return nil, "removeFromState", nil
+			}
+			return nil, "", err
+		}
+
+		// global settings
+		state.ID = types.Int64PointerValue(common.ID)
+		state.AdapterVersion = types.StringValue(sparkCfg.AdapterVersion())
+		state.Name = types.StringPointerValue(common.Name)
+		state.IsSshTunnelEnabled = types.BoolPointerValue(common.IsSshTunnelEnabled)
+		state.OauthConfigurationId = types.Int64PointerValue(common.OauthConfigurationId)
+
+		// nullable common fields
+		if !common.PrivateLinkEndpointId.IsNull() {
+			state.PrivateLinkEndpointId = types.StringValue(common.PrivateLinkEndpointId.MustGet())
+		} else {
+			state.PrivateLinkEndpointId = types.StringNull()
+		}
+
+		// Spark settings
+		state.ApacheSparkConfig.Method = types.StringPointerValue(sparkCfg.Method)
+		state.ApacheSparkConfig.Host = types.StringPointerValue(sparkCfg.Host)
+		state.ApacheSparkConfig.Port = types.Int64PointerValue(sparkCfg.Port)
+		state.ApacheSparkConfig.Cluster = types.StringPointerValue(sparkCfg.Cluster)
+		state.ApacheSparkConfig.ConnectTimeout = types.Int64PointerValue(sparkCfg.ConnectTimeout)
+		state.ApacheSparkConfig.ConnectRetries = types.Int64PointerValue(sparkCfg.ConnectRetries)
+
+		// nullable optional fields
+		if !sparkCfg.Organization.IsNull() {
+			state.ApacheSparkConfig.Organization = types.StringValue(
+				sparkCfg.Organization.MustGet(),
+			)
+		} else {
+			state.ApacheSparkConfig.Organization = types.StringNull()
+		}
+		if !sparkCfg.User.IsNull() {
+			state.ApacheSparkConfig.User = types.StringValue(sparkCfg.User.MustGet())
+		} else {
+			state.ApacheSparkConfig.User = types.StringNull()
+		}
+		if !sparkCfg.Auth.IsNull() {
+			state.ApacheSparkConfig.Auth = types.StringValue(sparkCfg.Auth.MustGet())
+		} else {
+			state.ApacheSparkConfig.Auth = types.StringNull()
+		}
+
+		// We don't set the sensitive fields when we read because those are secret and never returned by the API
+		// sensitive fields: N/A for Spark
+
+	default:
+		panic("Unknown connection type")
+	}
+
+	return state, "", nil
+}
diff --git a/pkg/framework/objects/global_connection/data_source.go.todo b/pkg/framework/objects/global_connection/data_source.go
similarity index 59%
rename from pkg/framework/objects/global_connection/data_source.go.todo
rename to pkg/framework/objects/global_connection/data_source.go
index 31ceac0..34be1b7 100644
--- a/pkg/framework/objects/global_connection/data_source.go.todo
+++ b/pkg/framework/objects/global_connection/data_source.go
@@ -5,7 +5,6 @@ import (
 
 	"github.com/dbt-labs/terraform-provider-dbtcloud/pkg/dbt_cloud"
 	"github.com/hashicorp/terraform-plugin-framework/datasource"
-	"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
 )
 
 var (
@@ -29,25 +28,43 @@ func (d *globalConnectionDataSource) Metadata(
 	resp.TypeName = req.ProviderTypeName + "_global_connection"
 }
 
-func (d *globalConnectionDataSource) Schema(
-	_ context.Context,
-	_ datasource.SchemaRequest,
-	resp *datasource.SchemaResponse,
-) {
-	resp.Schema = schema.Schema{
-		Description: "Retrieve notification details",
-		Attributes:  map[string]schema.Attribute{
-			// TODO
-		},
-	}
-}
-
 func (d *globalConnectionDataSource) Read(
 	ctx context.Context,
 	req datasource.ReadRequest,
 	resp *datasource.ReadResponse,
 ) {
-	// TODO, similar to read resource
+	var state GlobalConnectionResourceModel
+
+	resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
+
+	connectionID := state.ID.ValueInt64()
+
+	globalConnectionResponse, err := d.client.GetGlobalConnectionAdapter(connectionID)
+	if err != nil {
+		resp.Diagnostics.AddError("Error getting the connection type", err.Error())
+		return
+	}
+
+	newState, action, err := readGeneric(
+		d.client,
+		&state,
+		globalConnectionResponse.Data.AdapterVersion,
+	)
+	if err != nil {
+		resp.Diagnostics.AddError("Error reading the connection", err.Error())
+		return
+	}
+
+	if action == "removeFromState" {
+		resp.Diagnostics.AddWarning(
+			"Resource not found",
+			"The connection resource was not found and has been removed from the state.",
+		)
+		resp.State.RemoveResource(ctx)
+		return
+	}
+
+	resp.Diagnostics.Append(resp.State.Set(ctx, newState)...)
 }
 
 func (d *globalConnectionDataSource) Configure(
diff --git a/pkg/framework/objects/global_connection/data_source_acceptance_test.go b/pkg/framework/objects/global_connection/data_source_acceptance_test.go
new file mode 100644
index 0000000..294c8a0
--- /dev/null
+++ b/pkg/framework/objects/global_connection/data_source_acceptance_test.go
@@ -0,0 +1,93 @@
+package global_connection_test
+
+import (
+	"fmt"
+	"strings"
+	"testing"
+
+	"github.com/dbt-labs/terraform-provider-dbtcloud/pkg/framework/acctest_helper"
+	"github.com/hashicorp/terraform-plugin-testing/helper/acctest"
+	"github.com/hashicorp/terraform-plugin-testing/helper/resource"
+)
+
+func TestAccDbtCloudGlobalConnectionDatasource(t *testing.T) {
+	connectionName := strings.ToUpper(acctest.RandStringFromCharSet(10, acctest.CharSetAlpha))
+	oAuthClientID := strings.ToUpper(acctest.RandStringFromCharSet(10, acctest.CharSetAlpha))
+	oAuthClientSecret := strings.ToUpper(acctest.RandStringFromCharSet(10, acctest.CharSetAlpha))
+
+	resource.ParallelTest(t, resource.TestCase{
+		PreCheck:                 func() { acctest_helper.TestAccPreCheck(t) },
+		ProtoV6ProviderFactories: acctest_helper.TestAccProtoV6ProviderFactories,
+		Steps: []resource.TestStep{
+			{
+				Config: testAccDbtCloudGlobalConnectionDatasourceBasicConfig(
+					connectionName,
+					oAuthClientID,
+					oAuthClientSecret,
+				),
+				// we check the computed values, for the other ones the test suite already checks that the plan and state are the same
+				Check: resource.ComposeTestCheckFunc(
+					resource.TestCheckResourceAttrSet(
+						"data.dbtcloud_global_connection.test",
+						"id",
+					),
+					resource.TestCheckResourceAttr(
+						"data.dbtcloud_global_connection.test",
+						"adapter_version",
+						"snowflake_v0",
+					),
+					resource.TestCheckResourceAttr(
+						"data.dbtcloud_global_connection.test",
+						"name",
+						connectionName,
+					),
+					resource.TestCheckResourceAttr(
+						"data.dbtcloud_global_connection.test",
+						"is_ssh_tunnel_enabled",
+						"false",
+					),
+					resource.TestCheckResourceAttrSet(
+						"data.dbtcloud_global_connection.test",
+						"snowflake.account",
+					),
+					resource.TestCheckResourceAttrSet(
+						"data.dbtcloud_global_connection.test",
+						"snowflake.database",
+					),
+					resource.TestCheckResourceAttrSet(
+						"data.dbtcloud_global_connection.test",
+						"snowflake.warehouse",
+					),
+				),
+			},
+		},
+	})
+
+}
+
+func testAccDbtCloudGlobalConnectionDatasourceBasicConfig(
+	connectionName, oAuthClientID, oAuthClientSecret string,
+) string {
+	return fmt.Sprintf(`
+
+resource dbtcloud_global_connection test {
+  name = "%s"
+
+  snowflake = {
+    account = "account"
+    warehouse = "warehouse"
+    database = "database"
+    allow_sso = true
+    oauth_client_id = "%s"
+    oauth_client_secret = "%s"
+    client_session_keep_alive = false
+	role = "role"
+  }
+}
+
+data dbtcloud_global_connection test {
+  id = dbtcloud_global_connection.test.id
+}
+
+`, connectionName, oAuthClientID, oAuthClientSecret)
+}
diff --git a/pkg/framework/objects/global_connection/data_source_acceptance_test.go.todo b/pkg/framework/objects/global_connection/data_source_acceptance_test.go.todo
deleted file mode 100644
index a85d7f1..0000000
--- a/pkg/framework/objects/global_connection/data_source_acceptance_test.go.todo
+++ /dev/null
@@ -1,95 +0,0 @@
-package global_connection_test
-
-import (
-	"fmt"
-	"testing"
-	"time"
-
-	"github.com/dbt-labs/terraform-provider-dbtcloud/pkg/framework/acctest_helper"
-	"github.com/hashicorp/terraform-plugin-testing/helper/acctest"
-	"github.com/hashicorp/terraform-plugin-testing/helper/resource"
-)
-
-func TestAccDbtCloudGlobalConnectionDataSource(t *testing.T) {
-
-	var userID string
-	if acctest_helper.IsDbtCloudPR() {
-		userID = "1"
-	} else {
-		userID = "100"
-	}
-
-	currentTime := time.Now().Unix()
-	notificationEmail := fmt.Sprintf("%d-datasource@nomail.com", currentTime)
-
-	randomProjectName := acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum)
-
-	config := notification(randomProjectName, userID, notificationEmail)
-
-	check := resource.ComposeTestCheckFunc(
-		resource.TestCheckResourceAttr(
-			"data.dbtcloud_notification.test_notification_external",
-			"notification_type",
-			"4",
-		),
-		resource.TestCheckResourceAttrSet(
-			"data.dbtcloud_notification.test_notification_external",
-			"on_failure.0",
-		),
-		resource.TestCheckResourceAttr(
-			"data.dbtcloud_notification.test_notification_external",
-			"external_email",
-			notificationEmail,
-		),
-	)
-
-	resource.ParallelTest(t, resource.TestCase{
-		ProtoV6ProviderFactories: acctest_helper.TestAccProtoV6ProviderFactories,
-		Steps: []resource.TestStep{
-			{
-				Config: config,
-				Check:  check,
-			},
-		},
-	})
-}
-
-func notification(projectName, userID, notificationEmail string) string {
-	return fmt.Sprintf(`
-	resource "dbtcloud_project" "test_notification_project" {
-		name = "%s"
-	}
-		
-	resource "dbtcloud_environment" "test_notification_environment" {
-		project_id  = dbtcloud_project.test_notification_project.id
-		name        = "Test Env Notification"
-		dbt_version = "%s"
-		type        = "development"
-	}
-		
-	resource "dbtcloud_job" "test_notification_job_1" {
-		name           = "Job 1 TF"
-		project_id     = dbtcloud_project.test_notification_project.id
-		environment_id = dbtcloud_environment.test_notification_environment.environment_id
-		execute_steps = [
-			"dbt compile"
-		]
-		triggers = {
-			"github_webhook" : false,
-			"git_provider_webhook" : false,
-			"schedule" : false,
-		}
-	}
-
-	resource "dbtcloud_notification" "test_notification_external" {
-		user_id           = %s
-		on_failure        = [dbtcloud_job.test_notification_job_1.id]
-		notification_type = 4
-		external_email    = "%s"
-	}
-
-	data "dbtcloud_notification" "test_notification_external" {
-		notification_id = dbtcloud_notification.test_notification_external.id
-	}
-    `, projectName, acctest_helper.DBT_CLOUD_VERSION, userID, notificationEmail)
-}
diff --git a/pkg/framework/objects/global_connection/data_source_all.go b/pkg/framework/objects/global_connection/data_source_all.go
new file mode 100644
index 0000000..36c9497
--- /dev/null
+++ b/pkg/framework/objects/global_connection/data_source_all.go
@@ -0,0 +1,87 @@
+package global_connection
+
+import (
+	"context"
+
+	"github.com/dbt-labs/terraform-provider-dbtcloud/pkg/dbt_cloud"
+	"github.com/hashicorp/terraform-plugin-framework/datasource"
+	"github.com/hashicorp/terraform-plugin-framework/types"
+)
+
+var (
+	_ datasource.DataSource              = &globalConnectionsDataSource{}
+	_ datasource.DataSourceWithConfigure = &globalConnectionsDataSource{}
+)
+
+func GlobalConnectionsDataSource() datasource.DataSource {
+	return &globalConnectionsDataSource{}
+}
+
+type globalConnectionsDataSource struct {
+	client *dbt_cloud.Client
+}
+
+func (d *globalConnectionsDataSource) Metadata(
+	_ context.Context,
+	req datasource.MetadataRequest,
+	resp *datasource.MetadataResponse,
+) {
+	resp.TypeName = req.ProviderTypeName + "_global_connections"
+}
+
+func (d *globalConnectionsDataSource) Read(
+	ctx context.Context,
+	req datasource.ReadRequest,
+	resp *datasource.ReadResponse,
+) {
+	var state GlobalConnectionsDatasourceModel
+
+	resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
+
+	apiAllConnections, err := d.client.GetAllConnections()
+	if err != nil {
+		resp.Diagnostics.AddError(
+			"Issue when retrieving connections",
+			err.Error(),
+		)
+		return
+	}
+
+	allConnections := []GlobalConnectionSummary{}
+	for _, connection := range apiAllConnections {
+
+		currentConnection := GlobalConnectionSummary{}
+		currentConnection.ID = types.Int64Value(connection.ID)
+		currentConnection.Name = types.StringValue(connection.Name)
+		currentConnection.CreatedAt = types.StringValue(connection.CreatedAt)
+		currentConnection.UpdatedAt = types.StringValue(connection.UpdatedAt)
+		currentConnection.AdapterVersion = types.StringValue(connection.AdapterVersion)
+		currentConnection.PrivateLinkEndpointID = types.Int64PointerValue(
+			connection.PrivateLinkEndpointID,
+		)
+		currentConnection.IsSSHTunnelEnabled = types.BoolValue(connection.IsSSHTunnelEnabled)
+		currentConnection.OauthConfigurationID = types.Int64PointerValue(
+			connection.OauthConfigurationID,
+		)
+		currentConnection.EnvironmentCount = types.Int64Value(connection.EnvironmentCount)
+
+		allConnections = append(allConnections, currentConnection)
+	}
+	state.Connections = allConnections
+
+	resp.Diagnostics.Append(resp.State.Set(ctx, state)...)
+}
+
+func (d *globalConnectionsDataSource) Configure(
+	_ context.Context,
+	req datasource.ConfigureRequest,
+	resp *datasource.ConfigureResponse,
+) {
+	switch c := req.ProviderData.(type) {
+	case nil: // do nothing
+	case *dbt_cloud.Client:
+		d.client = c
+	default:
+		resp.Diagnostics.AddError("Missing client", "A client is required to configure the global connection resource")
+	}
+}
diff --git a/pkg/framework/objects/global_connection/data_source_all_acceptance_test.go b/pkg/framework/objects/global_connection/data_source_all_acceptance_test.go
new file mode 100644
index 0000000..4a6741f
--- /dev/null
+++ b/pkg/framework/objects/global_connection/data_source_all_acceptance_test.go
@@ -0,0 +1,109 @@
+package global_connection_test
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/dbt-labs/terraform-provider-dbtcloud/pkg/framework/acctest_helper"
+	"github.com/hashicorp/terraform-plugin-testing/helper/acctest"
+	"github.com/hashicorp/terraform-plugin-testing/helper/resource"
+)
+
+func TestAccDbtCloudGlobalConnectionsDatasource(t *testing.T) {
+
+	connectionName := acctest.RandStringFromCharSet(19, acctest.CharSetAlphaNum)
+
+	resource.ParallelTest(t, resource.TestCase{
+		PreCheck:                 func() { acctest_helper.TestAccPreCheck(t) },
+		ProtoV6ProviderFactories: acctest_helper.TestAccProtoV6ProviderFactories,
+		Steps: []resource.TestStep{
+			{
+				Config: testAccDbtCloudGlobalConnectionsDatasourceBasicConfig(
+					connectionName,
+				),
+				// we check the computed values, for the other ones the test suite already checks that the plan and state are the same
+				Check: resource.ComposeTestCheckFunc(
+					resource.TestCheckResourceAttrSet(
+						"data.dbtcloud_global_connections.test",
+						"connections.0.id",
+					),
+					resource.TestCheckResourceAttrSet(
+						"data.dbtcloud_global_connections.test",
+						"connections.0.name",
+					),
+					resource.TestCheckResourceAttrSet(
+						"data.dbtcloud_global_connections.test",
+						"connections.0.adapter_version",
+					),
+					resource.TestCheckResourceAttrSet(
+						"data.dbtcloud_global_connections.test",
+						"connections.0.environment__count",
+					),
+					resource.TestCheckResourceAttrSet(
+						"data.dbtcloud_global_connections.test",
+						"connections.1.id",
+					),
+					resource.TestCheckResourceAttrSet(
+						"data.dbtcloud_global_connections.test",
+						"connections.1.name",
+					),
+					resource.TestCheckResourceAttrSet(
+						"data.dbtcloud_global_connections.test",
+						"connections.1.adapter_version",
+					),
+					resource.TestCheckResourceAttrSet(
+						"data.dbtcloud_global_connections.test",
+						"connections.1.environment__count",
+					),
+				),
+			},
+		},
+	})
+
+}
+
+func testAccDbtCloudGlobalConnectionsDatasourceBasicConfig(
+	connectionName string,
+) string {
+
+	return fmt.Sprintf(`
+
+resource dbtcloud_global_connection connection1 {
+  name = "%[1]s1"
+
+  snowflake = {
+    account = "account"
+    warehouse = "warehouse"
+    database = "database"
+    allow_sso = true
+    client_session_keep_alive = false
+	role = "role"
+  }
+}
+
+resource dbtcloud_global_connection connection2 {
+  name = "%[1]s2"
+
+  bigquery = {
+    gcp_project_id              = "my-gcp-project-id"
+    timeout_seconds             = 1000
+    private_key_id              = "my-private-key-id"
+    private_key                 = "ABCDEFGHIJKL"
+    client_email                = "my_client_email"
+    client_id                   = "my_client_id"
+    auth_uri                    = "my_auth_uri"
+    token_uri                   = "my_token_uri"
+    auth_provider_x509_cert_url = "my_auth_provider_x509_cert_url"
+    client_x509_cert_url        = "my_client_x509_cert_url"
+    application_id              = "oauth_application_id"
+    application_secret          = "oauth_secret_id"
+
+  }
+}
+
+data dbtcloud_global_connections test {
+  depends_on = [dbtcloud_global_connection.connection1, dbtcloud_global_connection.connection2]
+}
+
+`, connectionName)
+}
diff --git a/pkg/framework/objects/global_connection/model.go b/pkg/framework/objects/global_connection/model.go
index 90d3aa6..a3295ef 100644
--- a/pkg/framework/objects/global_connection/model.go
+++ b/pkg/framework/objects/global_connection/model.go
@@ -254,17 +254,24 @@ type ApacheSparkConfig struct {
 	Cluster        types.String `tfsdk:"cluster"`
 	ConnectTimeout types.Int64  `tfsdk:"connect_timeout"`
 	ConnectRetries types.Int64  `tfsdk:"connect_retries"`
-	// nullables
+	// nullable
 	Organization types.String `tfsdk:"organization"`
 	User         types.String `tfsdk:"user"`
 	Auth         types.String `tfsdk:"auth"`
 }
-type GlobalConnectionDataSourceModel struct {
-	// TBD, and do we use the same as the for the Resource model?
+
+type GlobalConnectionsDatasourceModel struct {
+	Connections []GlobalConnectionSummary `tfsdk:"connections"`
 }
 
-// func ConvertGlobalConnectionModelToData(
-// 	model GlobalConnectionResourceModel,
-// ) dbt_cloud.Notification {
-// TBD
-// }
+type GlobalConnectionSummary struct {
+	ID                    types.Int64  `tfsdk:"id"`
+	Name                  types.String `tfsdk:"name"`
+	CreatedAt             types.String `tfsdk:"created_at"`
+	UpdatedAt             types.String `tfsdk:"updated_at"`
+	AdapterVersion        types.String `tfsdk:"adapter_version"`
+	PrivateLinkEndpointID types.Int64  `tfsdk:"private_link_endpoint_id"`
+	IsSSHTunnelEnabled    types.Bool   `tfsdk:"is_ssh_tunnel_enabled"`
+	OauthConfigurationID  types.Int64  `tfsdk:"oauth_configuration_id"`
+	EnvironmentCount      types.Int64  `tfsdk:"environment__count"`
+}
diff --git a/pkg/framework/objects/global_connection/resource.go b/pkg/framework/objects/global_connection/resource.go
index 895cdb7..ab9fd48 100644
--- a/pkg/framework/objects/global_connection/resource.go
+++ b/pkg/framework/objects/global_connection/resource.go
@@ -102,649 +102,22 @@ func (r *globalConnectionResource) Read(
 
 	resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
 
-	connectionID := state.ID.ValueInt64()
-
-	switch {
-	case state.SnowflakeConfig != nil:
-
-		c := dbt_cloud.NewGlobalConnectionClient[dbt_cloud.SnowflakeConfig](r.client)
-
-		common, snowflakeCfg, err := c.Get(connectionID)
-		if err != nil {
-			if strings.HasPrefix(err.Error(), "resource-not-found") {
-				resp.Diagnostics.AddWarning(
-					"Resource not found",
-					"The connection resource was not found and has been removed from the state.",
-				)
-				resp.State.RemoveResource(ctx)
-				return
-			}
-			resp.Diagnostics.AddError("Error getting the connection", err.Error())
-			return
-		}
-
-		// global settings
-		state.ID = types.Int64PointerValue(common.ID)
-		state.AdapterVersion = types.StringValue(snowflakeCfg.AdapterVersion())
-		state.Name = types.StringPointerValue(common.Name)
-		state.IsSshTunnelEnabled = types.BoolPointerValue(common.IsSshTunnelEnabled)
-		state.OauthConfigurationId = types.Int64PointerValue(common.OauthConfigurationId)
-
-		// nullable common fields
-		if !common.PrivateLinkEndpointId.IsNull() {
-			state.PrivateLinkEndpointId = types.StringValue(common.PrivateLinkEndpointId.MustGet())
-		} else {
-			state.PrivateLinkEndpointId = types.StringNull()
-		}
-
-		// snowflake settings
-		state.SnowflakeConfig.Account = types.StringPointerValue(snowflakeCfg.Account)
-		state.SnowflakeConfig.Database = types.StringPointerValue(snowflakeCfg.Database)
-		state.SnowflakeConfig.Warehouse = types.StringPointerValue(snowflakeCfg.Warehouse)
-		state.SnowflakeConfig.ClientSessionKeepAlive = types.BoolPointerValue(
-			snowflakeCfg.ClientSessionKeepAlive,
-		)
-		state.SnowflakeConfig.AllowSso = types.BoolPointerValue(snowflakeCfg.AllowSso)
-
-		// nullable optional fields
-		// TODO: decide if it is better to read it as string, *string or nullable.Nullable[string] on the dbt_cloud side
-		// in this case role can never be empty so this works but we might have cases where null and empty are different
-		if !snowflakeCfg.Role.IsNull() {
-			state.SnowflakeConfig.Role = types.StringValue(snowflakeCfg.Role.MustGet())
-		} else {
-			state.SnowflakeConfig.Role = types.StringNull()
-		}
-
-		// We don't set the sensitive fields when we read because those are secret and never returned by the API
-		// sensitive fields: OauthClientID, OauthClientSecret
-
-	case state.BigQueryConfig != nil:
-
-		c := dbt_cloud.NewGlobalConnectionClient[dbt_cloud.BigQueryConfig](r.client)
-
-		common, bigqueryCfg, err := c.Get(connectionID)
-		if err != nil {
-			if strings.HasPrefix(err.Error(), "resource-not-found") {
-				resp.Diagnostics.AddWarning(
-					"Resource not found",
-					"The connection resource was not found and has been removed from the state.",
-				)
-				resp.State.RemoveResource(ctx)
-				return
-			}
-			resp.Diagnostics.AddError("Error getting the connection", err.Error())
-			return
-		}
-
-		// global settings
-		state.ID = types.Int64PointerValue(common.ID)
-		state.AdapterVersion = types.StringValue(bigqueryCfg.AdapterVersion())
-		state.Name = types.StringPointerValue(common.Name)
-		state.IsSshTunnelEnabled = types.BoolPointerValue(common.IsSshTunnelEnabled)
-		state.OauthConfigurationId = types.Int64PointerValue(common.OauthConfigurationId)
+	newState, action, err := readGeneric(r.client, &state, "")
+	if err != nil {
+		resp.Diagnostics.AddError("Error reading the connection", err.Error())
+		return
+	}
 
-		// nullable common fields
-		if !common.PrivateLinkEndpointId.IsNull() {
-			state.PrivateLinkEndpointId = types.StringValue(common.PrivateLinkEndpointId.MustGet())
-		} else {
-			state.PrivateLinkEndpointId = types.StringNull()
-		}
-
-		// BigQuery settings
-		state.BigQueryConfig.GCPProjectID = types.StringPointerValue(bigqueryCfg.ProjectID)
-		state.BigQueryConfig.TimeoutSeconds = types.Int64PointerValue(bigqueryCfg.TimeoutSeconds)
-		state.BigQueryConfig.PrivateKeyID = types.StringPointerValue(bigqueryCfg.PrivateKeyID)
-		state.BigQueryConfig.ClientEmail = types.StringPointerValue(bigqueryCfg.ClientEmail)
-		state.BigQueryConfig.ClientID = types.StringPointerValue(bigqueryCfg.ClientID)
-		state.BigQueryConfig.AuthURI = types.StringPointerValue(bigqueryCfg.AuthURI)
-		state.BigQueryConfig.TokenURI = types.StringPointerValue(bigqueryCfg.TokenURI)
-		state.BigQueryConfig.AuthProviderX509CertURL = types.StringPointerValue(
-			bigqueryCfg.AuthProviderX509CertURL,
-		)
-		state.BigQueryConfig.ClientX509CertURL = types.StringPointerValue(
-			bigqueryCfg.ClientX509CertURL,
+	if action == "removeFromState" {
+		resp.Diagnostics.AddWarning(
+			"Resource not found",
+			"The connection resource was not found and has been removed from the state.",
 		)
-		state.BigQueryConfig.Retries = types.Int64PointerValue(bigqueryCfg.Retries)
-		state.BigQueryConfig.Scopes = helper.SliceStringToSliceTypesString(bigqueryCfg.Scopes)
-
-		// nullable optional fields
-		if !bigqueryCfg.Priority.IsNull() {
-			state.BigQueryConfig.Priority = types.StringValue(bigqueryCfg.Priority.MustGet())
-		} else {
-			state.BigQueryConfig.Priority = types.StringNull()
-		}
-
-		if !bigqueryCfg.Location.IsNull() {
-			state.BigQueryConfig.Location = types.StringValue(bigqueryCfg.Location.MustGet())
-		} else {
-			state.BigQueryConfig.Location = types.StringNull()
-		}
-
-		if !bigqueryCfg.MaximumBytesBilled.IsNull() {
-			state.BigQueryConfig.MaximumBytesBilled = types.Int64Value(
-				bigqueryCfg.MaximumBytesBilled.MustGet(),
-			)
-		} else {
-			state.BigQueryConfig.MaximumBytesBilled = types.Int64Null()
-		}
-
-		if !bigqueryCfg.ExecutionProject.IsNull() {
-			state.BigQueryConfig.ExecutionProject = types.StringValue(
-				bigqueryCfg.ExecutionProject.MustGet(),
-			)
-		} else {
-			state.BigQueryConfig.ExecutionProject = types.StringNull()
-		}
-
-		if !bigqueryCfg.ImpersonateServiceAccount.IsNull() {
-			state.BigQueryConfig.ImpersonateServiceAccount = types.StringValue(
-				bigqueryCfg.ImpersonateServiceAccount.MustGet(),
-			)
-		} else {
-			state.BigQueryConfig.ImpersonateServiceAccount = types.StringNull()
-		}
-
-		if !bigqueryCfg.JobRetryDeadlineSeconds.IsNull() {
-			state.BigQueryConfig.JobRetryDeadlineSeconds = types.Int64Value(
-				bigqueryCfg.JobRetryDeadlineSeconds.MustGet(),
-			)
-		} else {
-			state.BigQueryConfig.JobRetryDeadlineSeconds = types.Int64Null()
-		}
-
-		if !bigqueryCfg.JobCreationTimeoutSeconds.IsNull() {
-			state.BigQueryConfig.JobCreationTimeoutSeconds = types.Int64Value(
-				bigqueryCfg.JobCreationTimeoutSeconds.MustGet(),
-			)
-		} else {
-			state.BigQueryConfig.JobCreationTimeoutSeconds = types.Int64Null()
-		}
-
-		if !bigqueryCfg.GcsBucket.IsNull() {
-			state.BigQueryConfig.GcsBucket = types.StringValue(bigqueryCfg.GcsBucket.MustGet())
-		} else {
-			state.BigQueryConfig.GcsBucket = types.StringNull()
-		}
-
-		if !bigqueryCfg.DataprocRegion.IsNull() {
-			state.BigQueryConfig.DataprocRegion = types.StringValue(
-				bigqueryCfg.DataprocRegion.MustGet(),
-			)
-		} else {
-			state.BigQueryConfig.DataprocRegion = types.StringNull()
-		}
-
-		if !bigqueryCfg.DataprocClusterName.IsNull() {
-			state.BigQueryConfig.DataprocClusterName = types.StringValue(
-				bigqueryCfg.DataprocClusterName.MustGet(),
-			)
-		} else {
-			state.BigQueryConfig.DataprocClusterName = types.StringNull()
-		}
-
-		// We don't set the sensitive fields when we read because those are secret and never returned by the API
-		// sensitive fields: ApplicationID, ApplicationSecret, PrivateKey
-
-	case state.DatabricksConfig != nil:
-
-		c := dbt_cloud.NewGlobalConnectionClient[dbt_cloud.DatabricksConfig](r.client)
-
-		common, databricksCfg, err := c.Get(connectionID)
-		if err != nil {
-			if strings.HasPrefix(err.Error(), "resource-not-found") {
-				resp.Diagnostics.AddWarning(
-					"Resource not found",
-					"The connection resource was not found and has been removed from the state.",
-				)
-				resp.State.RemoveResource(ctx)
-				return
-			}
-			resp.Diagnostics.AddError("Error getting the connection", err.Error())
-			return
-		}
-
-		// global settings
-		state.ID = types.Int64PointerValue(common.ID)
-		state.AdapterVersion = types.StringValue(databricksCfg.AdapterVersion())
-		state.Name = types.StringPointerValue(common.Name)
-		state.IsSshTunnelEnabled = types.BoolPointerValue(common.IsSshTunnelEnabled)
-		state.OauthConfigurationId = types.Int64PointerValue(common.OauthConfigurationId)
-
-		// nullable common fields
-		if !common.PrivateLinkEndpointId.IsNull() {
-			state.PrivateLinkEndpointId = types.StringValue(common.PrivateLinkEndpointId.MustGet())
-		} else {
-			state.PrivateLinkEndpointId = types.StringNull()
-		}
-
-		// Databricks settings
-		state.DatabricksConfig.Host = types.StringPointerValue(databricksCfg.Host)
-		state.DatabricksConfig.HTTPPath = types.StringPointerValue(databricksCfg.HTTPPath)
-
-		// nullable optional fields
-		if !databricksCfg.Catalog.IsNull() {
-			state.DatabricksConfig.Catalog = types.StringValue(databricksCfg.Catalog.MustGet())
-		} else {
-			state.DatabricksConfig.Catalog = types.StringNull()
-		}
-
-		// We don't set the sensitive fields when we read because those are secret and never returned by the API
-		// sensitive fields: ClientID, ClientSecret
-
-	case state.RedshiftConfig != nil:
-
-		c := dbt_cloud.NewGlobalConnectionClient[dbt_cloud.RedshiftConfig](r.client)
-
-		common, redshiftCfg, err := c.Get(connectionID)
-		if err != nil {
-			if strings.HasPrefix(err.Error(), "resource-not-found") {
-				resp.Diagnostics.AddWarning(
-					"Resource not found",
-					"The connection resource was not found and has been removed from the state.",
-				)
-				resp.State.RemoveResource(ctx)
-				return
-			}
-			resp.Diagnostics.AddError("Error getting the connection", err.Error())
-			return
-		}
-
-		sshTunnel, err := c.GetEncryptionsForConnection(connectionID)
-		if err != nil {
-			resp.Diagnostics.AddError("Error getting the SSH Tunnel details", err.Error())
-			return
-		}
-
-		// global settings
-		state.ID = types.Int64PointerValue(common.ID)
-		state.AdapterVersion = types.StringValue(redshiftCfg.AdapterVersion())
-		state.Name = types.StringPointerValue(common.Name)
-		state.IsSshTunnelEnabled = types.BoolPointerValue(common.IsSshTunnelEnabled)
-		state.OauthConfigurationId = types.Int64PointerValue(common.OauthConfigurationId)
-
-		// nullable common fields
-		if !common.PrivateLinkEndpointId.IsNull() {
-			state.PrivateLinkEndpointId = types.StringValue(common.PrivateLinkEndpointId.MustGet())
-		} else {
-			state.PrivateLinkEndpointId = types.StringNull()
-		}
-
-		// Redshift settings
-		state.RedshiftConfig.HostName = types.StringPointerValue(redshiftCfg.HostName)
-		state.RedshiftConfig.Port = types.Int64PointerValue(redshiftCfg.Port)
-
-		// nullable optional fields
-		if !redshiftCfg.DBName.IsNull() {
-			state.RedshiftConfig.DBName = types.StringValue(redshiftCfg.DBName.MustGet())
-		} else {
-			state.RedshiftConfig.DBName = types.StringNull()
-		}
-
-		// SSH tunnel settings
-		if len(*sshTunnel) > 0 {
-
-			state.RedshiftConfig.SSHTunnel = &SSHTunnelConfig{
-				ID:        types.Int64PointerValue((*sshTunnel)[0].ID),
-				HostName:  types.StringValue((*sshTunnel)[0].HostName),
-				Port:      types.Int64Value((*sshTunnel)[0].Port),
-				Username:  types.StringValue((*sshTunnel)[0].Username),
-				PublicKey: types.StringValue((*sshTunnel)[0].PublicKey),
-			}
-		}
-
-		// We don't set the sensitive fields when we read because those are secret and never returned by the API
-		// sensitive fields: N/A for Redshift
-
-	case state.PostgresConfig != nil:
-
-		c := dbt_cloud.NewGlobalConnectionClient[dbt_cloud.PostgresConfig](r.client)
-
-		common, postgresCfg, err := c.Get(connectionID)
-		if err != nil {
-			if strings.HasPrefix(err.Error(), "resource-not-found") {
-				resp.Diagnostics.AddWarning(
-					"Resource not found",
-					"The connection resource was not found and has been removed from the state.",
-				)
-				resp.State.RemoveResource(ctx)
-				return
-			}
-			resp.Diagnostics.AddError("Error getting the connection", err.Error())
-			return
-		}
-
-		sshTunnel, err := c.GetEncryptionsForConnection(connectionID)
-		if err != nil {
-			resp.Diagnostics.AddError("Error getting the SSH Tunnel details", err.Error())
-			return
-		}
-
-		// global settings
-		state.ID = types.Int64PointerValue(common.ID)
-		state.AdapterVersion = types.StringValue(postgresCfg.AdapterVersion())
-		state.Name = types.StringPointerValue(common.Name)
-		state.IsSshTunnelEnabled = types.BoolPointerValue(common.IsSshTunnelEnabled)
-		state.OauthConfigurationId = types.Int64PointerValue(common.OauthConfigurationId)
-
-		// nullable common fields
-		if !common.PrivateLinkEndpointId.IsNull() {
-			state.PrivateLinkEndpointId = types.StringValue(common.PrivateLinkEndpointId.MustGet())
-		} else {
-			state.PrivateLinkEndpointId = types.StringNull()
-		}
-
-		// Postgres settings
-		state.PostgresConfig.HostName = types.StringPointerValue(postgresCfg.HostName)
-		state.PostgresConfig.Port = types.Int64PointerValue(postgresCfg.Port)
-
-		// nullable optional fields
-		if !postgresCfg.DBName.IsNull() {
-			state.PostgresConfig.DBName = types.StringValue(postgresCfg.DBName.MustGet())
-		} else {
-			state.PostgresConfig.DBName = types.StringNull()
-		}
-
-		// SSH tunnel settings
-		if len(*sshTunnel) > 0 {
-
-			state.PostgresConfig.SSHTunnel = &SSHTunnelConfig{
-				ID:        types.Int64PointerValue((*sshTunnel)[0].ID),
-				HostName:  types.StringValue((*sshTunnel)[0].HostName),
-				Port:      types.Int64Value((*sshTunnel)[0].Port),
-				Username:  types.StringValue((*sshTunnel)[0].Username),
-				PublicKey: types.StringValue((*sshTunnel)[0].PublicKey),
-			}
-		}
-
-		// We don't set the sensitive fields when we read because those are secret and never returned by the API
-		// sensitive fields: N/A for Postgres
-
-	case state.FabricConfig != nil:
-
-		c := dbt_cloud.NewGlobalConnectionClient[dbt_cloud.FabricConfig](r.client)
-
-		common, fabricCfg, err := c.Get(connectionID)
-		if err != nil {
-			if strings.HasPrefix(err.Error(), "resource-not-found") {
-				resp.Diagnostics.AddWarning(
-					"Resource not found",
-					"The connection resource was not found and has been removed from the state.",
-				)
-				resp.State.RemoveResource(ctx)
-				return
-			}
-			resp.Diagnostics.AddError("Error getting the connection", err.Error())
-			return
-		}
-
-		// global settings
-		state.ID = types.Int64PointerValue(common.ID)
-		state.AdapterVersion = types.StringValue(fabricCfg.AdapterVersion())
-		state.Name = types.StringPointerValue(common.Name)
-		state.IsSshTunnelEnabled = types.BoolPointerValue(common.IsSshTunnelEnabled)
-		state.OauthConfigurationId = types.Int64PointerValue(common.OauthConfigurationId)
-
-		// nullable common fields
-		if !common.PrivateLinkEndpointId.IsNull() {
-			state.PrivateLinkEndpointId = types.StringValue(common.PrivateLinkEndpointId.MustGet())
-		} else {
-			state.PrivateLinkEndpointId = types.StringNull()
-		}
-
-		// Fabric settings
-		state.FabricConfig.Server = types.StringPointerValue(fabricCfg.Server)
-		state.FabricConfig.Port = types.Int64PointerValue(fabricCfg.Port)
-		state.FabricConfig.Database = types.StringPointerValue(fabricCfg.Database)
-		state.FabricConfig.Retries = types.Int64PointerValue(fabricCfg.Retries)
-		state.FabricConfig.LoginTimeout = types.Int64PointerValue(fabricCfg.LoginTimeout)
-		state.FabricConfig.QueryTimeout = types.Int64PointerValue(fabricCfg.QueryTimeout)
-
-		// We don't set the sensitive fields when we read because those are secret and never returned by the API
-		// sensitive fields: N/A for Fabric
-
-	case state.SynapseConfig != nil:
-
-		c := dbt_cloud.NewGlobalConnectionClient[dbt_cloud.SynapseConfig](r.client)
-
-		common, synapseCfg, err := c.Get(connectionID)
-		if err != nil {
-			if strings.HasPrefix(err.Error(), "resource-not-found") {
-				resp.Diagnostics.AddWarning(
-					"Resource not found",
-					"The connection resource was not found and has been removed from the state.",
-				)
-				resp.State.RemoveResource(ctx)
-				return
-			}
-			resp.Diagnostics.AddError("Error getting the connection", err.Error())
-			return
-		}
-
-		// global settings
-		state.ID = types.Int64PointerValue(common.ID)
-		state.AdapterVersion = types.StringValue(synapseCfg.AdapterVersion())
-		state.Name = types.StringPointerValue(common.Name)
-		state.IsSshTunnelEnabled = types.BoolPointerValue(common.IsSshTunnelEnabled)
-		state.OauthConfigurationId = types.Int64PointerValue(common.OauthConfigurationId)
-
-		// nullable common fields
-		if !common.PrivateLinkEndpointId.IsNull() {
-			state.PrivateLinkEndpointId = types.StringValue(common.PrivateLinkEndpointId.MustGet())
-		} else {
-			state.PrivateLinkEndpointId = types.StringNull()
-		}
-
-		// Synapse settings
-		state.SynapseConfig.Host = types.StringPointerValue(synapseCfg.Host)
-		state.SynapseConfig.Port = types.Int64PointerValue(synapseCfg.Port)
-		state.SynapseConfig.Database = types.StringPointerValue(synapseCfg.Database)
-		state.SynapseConfig.Retries = types.Int64PointerValue(synapseCfg.Retries)
-		state.SynapseConfig.LoginTimeout = types.Int64PointerValue(synapseCfg.LoginTimeout)
-		state.SynapseConfig.QueryTimeout = types.Int64PointerValue(synapseCfg.QueryTimeout)
-
-		// We don't set the sensitive fields when we read because those are secret and never returned by the API
-		// sensitive fields: N/A for Synapse
-
-	case state.StarburstConfig != nil:
-
-		c := dbt_cloud.NewGlobalConnectionClient[dbt_cloud.StarburstConfig](r.client)
-
-		common, starburstCfg, err := c.Get(connectionID)
-		if err != nil {
-			if strings.HasPrefix(err.Error(), "resource-not-found") {
-				resp.Diagnostics.AddWarning(
-					"Resource not found",
-					"The connection resource was not found and has been removed from the state.",
-				)
-				resp.State.RemoveResource(ctx)
-				return
-			}
-			resp.Diagnostics.AddError("Error getting the connection", err.Error())
-			return
-		}
-
-		// global settings
-		state.ID = types.Int64PointerValue(common.ID)
-		state.AdapterVersion = types.StringValue(starburstCfg.AdapterVersion())
-		state.Name = types.StringPointerValue(common.Name)
-		state.IsSshTunnelEnabled = types.BoolPointerValue(common.IsSshTunnelEnabled)
-		state.OauthConfigurationId = types.Int64PointerValue(common.OauthConfigurationId)
-
-		// nullable common fields
-		if !common.PrivateLinkEndpointId.IsNull() {
-			state.PrivateLinkEndpointId = types.StringValue(common.PrivateLinkEndpointId.MustGet())
-		} else {
-			state.PrivateLinkEndpointId = types.StringNull()
-		}
-
-		// Starburst settings
-		state.StarburstConfig.Method = types.StringPointerValue(starburstCfg.Method)
-		state.StarburstConfig.Host = types.StringPointerValue(starburstCfg.Host)
-		state.StarburstConfig.Port = types.Int64PointerValue(starburstCfg.Port)
-
-		// We don't set the sensitive fields when we read because those are secret and never returned by the API
-		// sensitive fields: N/A for Starburst
-
-	case state.AthenaConfig != nil:
-
-		c := dbt_cloud.NewGlobalConnectionClient[dbt_cloud.AthenaConfig](r.client)
-
-		common, athenaCfg, err := c.Get(connectionID)
-		if err != nil {
-			if strings.HasPrefix(err.Error(), "resource-not-found") {
-				resp.Diagnostics.AddWarning(
-					"Resource not found",
-					"The connection resource was not found and has been removed from the state.",
-				)
-				resp.State.RemoveResource(ctx)
-				return
-			}
-			resp.Diagnostics.AddError("Error getting the connection", err.Error())
-			return
-		}
-
-		// global settings
-		state.ID = types.Int64PointerValue(common.ID)
-		state.AdapterVersion = types.StringValue(athenaCfg.AdapterVersion())
-		state.Name = types.StringPointerValue(common.Name)
-		state.IsSshTunnelEnabled = types.BoolPointerValue(common.IsSshTunnelEnabled)
-		state.OauthConfigurationId = types.Int64PointerValue(common.OauthConfigurationId)
-
-		// nullable common fields
-		if !common.PrivateLinkEndpointId.IsNull() {
-			state.PrivateLinkEndpointId = types.StringValue(common.PrivateLinkEndpointId.MustGet())
-		} else {
-			state.PrivateLinkEndpointId = types.StringNull()
-		}
-
-		// Athena settings
-		state.AthenaConfig.RegionName = types.StringPointerValue(athenaCfg.RegionName)
-		state.AthenaConfig.Database = types.StringPointerValue(athenaCfg.Database)
-		state.AthenaConfig.S3StagingDir = types.StringPointerValue(athenaCfg.S3StagingDir)
-
-		// nullable optional fields
-		if !athenaCfg.WorkGroup.IsNull() {
-			state.AthenaConfig.WorkGroup = types.StringValue(athenaCfg.WorkGroup.MustGet())
-		} else {
-			state.AthenaConfig.WorkGroup = types.StringNull()
-		}
-		if !athenaCfg.SparkWorkGroup.IsNull() {
-			state.AthenaConfig.SparkWorkGroup = types.StringValue(
-				athenaCfg.SparkWorkGroup.MustGet(),
-			)
-		} else {
-			state.AthenaConfig.SparkWorkGroup = types.StringNull()
-		}
-		if !athenaCfg.S3DataDir.IsNull() {
-			state.AthenaConfig.S3DataDir = types.StringValue(athenaCfg.S3DataDir.MustGet())
-		} else {
-			state.AthenaConfig.S3DataDir = types.StringNull()
-		}
-		if !athenaCfg.S3DataNaming.IsNull() {
-			state.AthenaConfig.S3DataNaming = types.StringValue(athenaCfg.S3DataNaming.MustGet())
-		} else {
-			state.AthenaConfig.S3DataNaming = types.StringNull()
-		}
-		if !athenaCfg.S3TmpTableDir.IsNull() {
-			state.AthenaConfig.S3TmpTableDir = types.StringValue(athenaCfg.S3TmpTableDir.MustGet())
-		} else {
-			state.AthenaConfig.S3TmpTableDir = types.StringNull()
-		}
-		if !athenaCfg.PollInterval.IsNull() {
-			state.AthenaConfig.PollInterval = types.Int64Value(athenaCfg.PollInterval.MustGet())
-		} else {
-			state.AthenaConfig.PollInterval = types.Int64Null()
-		}
-		if !athenaCfg.NumRetries.IsNull() {
-			state.AthenaConfig.NumRetries = types.Int64Value(athenaCfg.NumRetries.MustGet())
-		} else {
-			state.AthenaConfig.NumRetries = types.Int64Null()
-		}
-		if !athenaCfg.NumBoto3Retries.IsNull() {
-			state.AthenaConfig.NumBoto3Retries = types.Int64Value(
-				athenaCfg.NumBoto3Retries.MustGet(),
-			)
-		} else {
-			state.AthenaConfig.NumBoto3Retries = types.Int64Null()
-		}
-		if !athenaCfg.NumIcebergRetries.IsNull() {
-			state.AthenaConfig.NumIcebergRetries = types.Int64Value(
-				athenaCfg.NumIcebergRetries.MustGet(),
-			)
-		} else {
-			state.AthenaConfig.NumIcebergRetries = types.Int64Null()
-		}
-
-		// We don't set the sensitive fields when we read because those are secret and never returned by the API
-		// sensitive fields: N/A for Athena
-
-	case state.ApacheSparkConfig != nil:
-
-		c := dbt_cloud.NewGlobalConnectionClient[dbt_cloud.ApacheSparkConfig](r.client)
-
-		common, sparkCfg, err := c.Get(connectionID)
-		if err != nil {
-			if strings.HasPrefix(err.Error(), "resource-not-found") {
-				resp.Diagnostics.AddWarning(
-					"Resource not found",
-					"The connection resource was not found and has been removed from the state.",
-				)
-				resp.State.RemoveResource(ctx)
-				return
-			}
-			resp.Diagnostics.AddError("Error getting the connection", err.Error())
-			return
-		}
-
-		// global settings
-		state.ID = types.Int64PointerValue(common.ID)
-		state.AdapterVersion = types.StringValue(sparkCfg.AdapterVersion())
-		state.Name = types.StringPointerValue(common.Name)
-		state.IsSshTunnelEnabled = types.BoolPointerValue(common.IsSshTunnelEnabled)
-		state.OauthConfigurationId = types.Int64PointerValue(common.OauthConfigurationId)
-
-		// nullable common fields
-		if !common.PrivateLinkEndpointId.IsNull() {
-			state.PrivateLinkEndpointId = types.StringValue(common.PrivateLinkEndpointId.MustGet())
-		} else {
-			state.PrivateLinkEndpointId = types.StringNull()
-		}
-
-		// Spark settings
-		state.ApacheSparkConfig.Method = types.StringPointerValue(sparkCfg.Method)
-		state.ApacheSparkConfig.Host = types.StringPointerValue(sparkCfg.Host)
-		state.ApacheSparkConfig.Port = types.Int64PointerValue(sparkCfg.Port)
-		state.ApacheSparkConfig.Cluster = types.StringPointerValue(sparkCfg.Cluster)
-		state.ApacheSparkConfig.ConnectTimeout = types.Int64PointerValue(sparkCfg.ConnectTimeout)
-		state.ApacheSparkConfig.ConnectRetries = types.Int64PointerValue(sparkCfg.ConnectRetries)
-
-		// nullable optional fields
-		if !sparkCfg.Organization.IsNull() {
-			state.ApacheSparkConfig.Organization = types.StringValue(
-				sparkCfg.Organization.MustGet(),
-			)
-		} else {
-			state.ApacheSparkConfig.Organization = types.StringNull()
-		}
-		if !sparkCfg.User.IsNull() {
-			state.ApacheSparkConfig.User = types.StringValue(sparkCfg.User.MustGet())
-		} else {
-			state.ApacheSparkConfig.User = types.StringNull()
-		}
-		if !sparkCfg.Auth.IsNull() {
-			state.ApacheSparkConfig.Auth = types.StringValue(sparkCfg.Auth.MustGet())
-		} else {
-			state.ApacheSparkConfig.Auth = types.StringNull()
-		}
-
-		// We don't set the sensitive fields when we read because those are secret and never returned by the API
-		// sensitive fields: N/A for Spark
-
-	default:
-		panic("Unknown connection type")
+		resp.State.RemoveResource(ctx)
+		return
 	}
 
-	resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
+	resp.Diagnostics.Append(resp.State.Set(ctx, newState)...)
 
 }
 
diff --git a/pkg/framework/objects/global_connection/schema.go b/pkg/framework/objects/global_connection/schema.go
index 548d10d..1f7a108 100644
--- a/pkg/framework/objects/global_connection/schema.go
+++ b/pkg/framework/objects/global_connection/schema.go
@@ -6,8 +6,10 @@ import (
 	"github.com/dbt-labs/terraform-provider-dbtcloud/pkg/helper"
 	"github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
 	"github.com/hashicorp/terraform-plugin-framework/attr"
+	"github.com/hashicorp/terraform-plugin-framework/datasource"
+	datasource_schema "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
 	"github.com/hashicorp/terraform-plugin-framework/resource"
-	"github.com/hashicorp/terraform-plugin-framework/resource/schema"
+	resource_schema "github.com/hashicorp/terraform-plugin-framework/resource/schema"
 	"github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault"
 	"github.com/hashicorp/terraform-plugin-framework/resource/schema/int64default"
 	"github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier"
@@ -24,7 +26,7 @@ func (r *globalConnectionResource) Schema(
 	resp *resource.SchemaResponse,
 ) {
 
-	resp.Schema = schema.Schema{
+	resp.Schema = resource_schema.Schema{
 		Description: helper.DocString(
 			`This resource can be used to create global connections as introduced in dbt Cloud in August 2024.
 
@@ -32,139 +34,139 @@ func (r *globalConnectionResource) Schema(
 			
 			All connections types are supported, and the old resources ~~~dbtcloud_connection~~~, ~~~dbtcloud_bigquery_connection~~~ and ~~~dbtcloud_fabric_connection~~~ are now flagged as deprecated and will be removed from the next major version of the provider.`,
 		),
-		Attributes: map[string]schema.Attribute{
-			"id": schema.Int64Attribute{
+		Attributes: map[string]resource_schema.Attribute{
+			"id": resource_schema.Int64Attribute{
 				Computed:    true,
 				Description: "Connection Identifier",
 				PlanModifiers: []planmodifier.Int64{
 					int64planmodifier.UseStateForUnknown(),
 				},
 			},
-			"adapter_version": schema.StringAttribute{
+			"adapter_version": resource_schema.StringAttribute{
 				Computed:    true,
 				Description: "Version of the adapter",
 			},
-			"name": schema.StringAttribute{
+			"name": resource_schema.StringAttribute{
 				Required:    true,
 				Description: "Connection name",
 			},
-			"is_ssh_tunnel_enabled": schema.BoolAttribute{
+			"is_ssh_tunnel_enabled": resource_schema.BoolAttribute{
 				Computed:    true,
 				Description: "Whether the connection can use an SSH tunnel",
 			},
-			"private_link_endpoint_id": schema.StringAttribute{
+			"private_link_endpoint_id": resource_schema.StringAttribute{
 				Optional:    true,
 				Description: "Private Link Endpoint ID. This ID can be found using the `privatelink_endpoint` data source",
 			},
-			"oauth_configuration_id": schema.Int64Attribute{
+			"oauth_configuration_id": resource_schema.Int64Attribute{
 				Computed: true,
 			},
-			"bigquery": schema.SingleNestedAttribute{
+			"bigquery": resource_schema.SingleNestedAttribute{
 				Optional: true,
-				Attributes: map[string]schema.Attribute{
-					"gcp_project_id": schema.StringAttribute{
+				Attributes: map[string]resource_schema.Attribute{
+					"gcp_project_id": resource_schema.StringAttribute{
 						Required:    true,
 						Description: "The GCP project ID to use for the connection",
 					},
-					"timeout_seconds": schema.Int64Attribute{
+					"timeout_seconds": resource_schema.Int64Attribute{
 						Optional:    true,
 						Computed:    true,
 						Default:     int64default.StaticInt64(300),
 						Description: "Timeout in seconds for queries",
 					},
-					"private_key_id": schema.StringAttribute{
+					"private_key_id": resource_schema.StringAttribute{
 						Required:    true,
 						Description: "Private Key ID for the Service Account",
 					},
-					"private_key": schema.StringAttribute{
+					"private_key": resource_schema.StringAttribute{
 						Required:    true,
 						Sensitive:   true,
 						Description: "Private Key for the Service Account",
 					},
-					"client_email": schema.StringAttribute{
+					"client_email": resource_schema.StringAttribute{
 						Required:    true,
 						Description: "Service Account email",
 					},
-					"client_id": schema.StringAttribute{
+					"client_id": resource_schema.StringAttribute{
 						Required:    true,
 						Description: "Client ID of the Service Account",
 					},
-					"auth_uri": schema.StringAttribute{
+					"auth_uri": resource_schema.StringAttribute{
 						Required:    true,
 						Description: "Auth URI for the Service Account",
 					},
-					"token_uri": schema.StringAttribute{
+					"token_uri": resource_schema.StringAttribute{
 						Required:    true,
 						Description: "Token URI for the Service Account",
 					},
-					"auth_provider_x509_cert_url": schema.StringAttribute{
+					"auth_provider_x509_cert_url": resource_schema.StringAttribute{
 						Required:    true,
 						Description: "Auth Provider X509 Cert URL for the Service Account",
 					},
-					"client_x509_cert_url": schema.StringAttribute{
+					"client_x509_cert_url": resource_schema.StringAttribute{
 						Required:    true,
 						Description: "Client X509 Cert URL for the Service Account",
 					},
-					"priority": schema.StringAttribute{
+					"priority": resource_schema.StringAttribute{
 						Optional: true,
 						Validators: []validator.String{
 							stringvalidator.OneOf([]string{"batch", "interactive"}...),
 						},
 						Description: "The priority with which to execute BigQuery queries (batch or interactive)",
 					},
-					"retries": schema.Int64Attribute{
+					"retries": resource_schema.Int64Attribute{
 						Optional:    true,
 						Computed:    true,
 						Default:     int64default.StaticInt64(1),
 						Description: "Number of retries for queries",
 					},
-					"location": schema.StringAttribute{
+					"location": resource_schema.StringAttribute{
 						Optional:    true,
 						Description: "Location to create new Datasets in",
 					},
-					"maximum_bytes_billed": schema.Int64Attribute{
+					"maximum_bytes_billed": resource_schema.Int64Attribute{
 						Optional:    true,
 						Description: "Max number of bytes that can be billed for a given BigQuery query",
 					},
-					"execution_project": schema.StringAttribute{
+					"execution_project": resource_schema.StringAttribute{
 						Optional:    true,
 						Description: "Project to bill for query execution",
 					},
-					"impersonate_service_account": schema.StringAttribute{
+					"impersonate_service_account": resource_schema.StringAttribute{
 						Optional:    true,
 						Description: "Service Account to impersonate when running queries",
 					},
-					"job_retry_deadline_seconds": schema.Int64Attribute{
+					"job_retry_deadline_seconds": resource_schema.Int64Attribute{
 						Optional:    true,
 						Description: "Total number of seconds to wait while retrying the same query",
 					},
-					"job_creation_timeout_seconds": schema.Int64Attribute{
+					"job_creation_timeout_seconds": resource_schema.Int64Attribute{
 						Optional:    true,
 						Description: "Maximum timeout for the job creation step",
 					},
-					"application_id": schema.StringAttribute{
+					"application_id": resource_schema.StringAttribute{
 						Optional:    true,
 						Description: "OAuth Client ID",
 						Sensitive:   true,
 					},
-					"application_secret": schema.StringAttribute{
+					"application_secret": resource_schema.StringAttribute{
 						Optional:    true,
 						Description: "OAuth Client Secret",
 						Sensitive:   true,
 					},
-					"gcs_bucket": schema.StringAttribute{
+					"gcs_bucket": resource_schema.StringAttribute{
 						Optional:    true,
 						Description: "URI for a Google Cloud Storage bucket to host Python code executed via Datapro",
 					},
-					"dataproc_region": schema.StringAttribute{
+					"dataproc_region": resource_schema.StringAttribute{
 						Optional:    true,
 						Description: "Google Cloud region for PySpark workloads on Dataproc",
 					},
-					"dataproc_cluster_name": schema.StringAttribute{
+					"dataproc_cluster_name": resource_schema.StringAttribute{
 						Optional:    true,
 						Description: "Dataproc cluster name for PySpark workloads",
 					},
-					"scopes": schema.SetAttribute{
+					"scopes": resource_schema.SetAttribute{
 						Optional:    true,
 						Computed:    true,
 						ElementType: types.StringType,
@@ -185,44 +187,44 @@ func (r *globalConnectionResource) Schema(
 				},
 			},
 			// this feels bad, but there is no error/warning when people add extra fields https://github.com/hashicorp/terraform/issues/33570
-			"snowflake": schema.SingleNestedAttribute{
+			"snowflake": resource_schema.SingleNestedAttribute{
 				Optional:    true,
 				Description: "Snowflake connection configuration",
-				Attributes: map[string]schema.Attribute{
-					"account": schema.StringAttribute{
+				Attributes: map[string]resource_schema.Attribute{
+					"account": resource_schema.StringAttribute{
 						Required:    true,
 						Description: "The Snowflake account name",
 					},
-					"database": schema.StringAttribute{
+					"database": resource_schema.StringAttribute{
 						Required:    true,
 						Description: "The default database for the connection",
 					},
-					"warehouse": schema.StringAttribute{
+					"warehouse": resource_schema.StringAttribute{
 						Required:    true,
 						Description: "The default Snowflake Warehouse to use for the connection",
 					},
-					"allow_sso": schema.BoolAttribute{
+					"allow_sso": resource_schema.BoolAttribute{
 						Optional:    true,
 						Computed:    true,
 						Default:     booldefault.StaticBool(false),
 						Description: "Whether to allow Snowflake OAuth for the connection. If true, the `oauth_client_id` and `oauth_client_secret` fields must be set",
 					},
 					// TODO: required if allow_sso is true
-					"oauth_client_id": schema.StringAttribute{
+					"oauth_client_id": resource_schema.StringAttribute{
 						Optional:    true,
 						Sensitive:   true,
 						Description: "OAuth Client ID. Required to allow OAuth between dbt Cloud and Snowflake",
 					},
-					"oauth_client_secret": schema.StringAttribute{
+					"oauth_client_secret": resource_schema.StringAttribute{
 						Optional:    true,
 						Sensitive:   true,
 						Description: "OAuth Client Secret. Required to allow OAuth between dbt Cloud and Snowflake",
 					},
-					"role": schema.StringAttribute{
+					"role": resource_schema.StringAttribute{
 						Optional:    true,
 						Description: "The Snowflake role to use when running queries on the connection",
 					},
-					"client_session_keep_alive": schema.BoolAttribute{
+					"client_session_keep_alive": resource_schema.BoolAttribute{
 						Optional:    true,
 						Computed:    true,
 						Default:     booldefault.StaticBool(false),
@@ -230,72 +232,72 @@ func (r *globalConnectionResource) Schema(
 					},
 				},
 			},
-			"databricks": schema.SingleNestedAttribute{
+			"databricks": resource_schema.SingleNestedAttribute{
 				Optional:    true,
 				Description: "Databricks connection configuration",
-				Attributes: map[string]schema.Attribute{
-					"host": schema.StringAttribute{
+				Attributes: map[string]resource_schema.Attribute{
+					"host": resource_schema.StringAttribute{
 						Required:    true,
 						Description: "The hostname of the Databricks cluster or SQL warehouse.",
 					},
-					"http_path": schema.StringAttribute{
+					"http_path": resource_schema.StringAttribute{
 						Required:    true,
 						Description: "The HTTP path of the Databricks cluster or SQL warehouse.",
 					},
-					"catalog": schema.StringAttribute{
+					"catalog": resource_schema.StringAttribute{
 						Optional:    true,
 						Description: "Catalog name if Unity Catalog is enabled in your Databricks workspace.",
 					},
-					"client_id": schema.StringAttribute{
+					"client_id": resource_schema.StringAttribute{
 						Optional:    true,
 						Description: "Required to enable Databricks OAuth authentication for IDE developers.",
 					},
-					"client_secret": schema.StringAttribute{
+					"client_secret": resource_schema.StringAttribute{
 						Optional:    true,
 						Description: "Required to enable Databricks OAuth authentication for IDE developers.",
 					},
 				},
 			},
-			"redshift": schema.SingleNestedAttribute{
+			"redshift": resource_schema.SingleNestedAttribute{
 				Optional:    true,
 				Description: "Redshift connection configuration",
-				Attributes: map[string]schema.Attribute{
-					"hostname": schema.StringAttribute{
+				Attributes: map[string]resource_schema.Attribute{
+					"hostname": resource_schema.StringAttribute{
 						Required:    true,
 						Description: "The hostname of the data warehouse.",
 					},
-					"port": schema.Int64Attribute{
+					"port": resource_schema.Int64Attribute{
 						Optional:    true,
 						Default:     int64default.StaticInt64(5432),
 						Computed:    true,
 						Description: "The port to connect to for this connection. Default=5432",
 					},
-					"dbname": schema.StringAttribute{
+					"dbname": resource_schema.StringAttribute{
 						Optional:    true,
 						Description: "The database name for this connection.",
 					},
 					// for SSH tunnel details
-					"ssh_tunnel": schema.SingleNestedAttribute{
+					"ssh_tunnel": resource_schema.SingleNestedAttribute{
 						Optional:    true,
 						Description: "Redshift SSH Tunnel configuration",
-						Attributes: map[string]schema.Attribute{
-							"username": schema.StringAttribute{
+						Attributes: map[string]resource_schema.Attribute{
+							"username": resource_schema.StringAttribute{
 								Required:    true,
 								Description: "The username to use for the SSH tunnel.",
 							},
-							"port": schema.Int64Attribute{
+							"port": resource_schema.Int64Attribute{
 								Required:    true,
 								Description: "The HTTP port for the SSH tunnel.",
 							},
-							"hostname": schema.StringAttribute{
+							"hostname": resource_schema.StringAttribute{
 								Required:    true,
 								Description: "The hostname for the SSH tunnel.",
 							},
-							"public_key": schema.StringAttribute{
+							"public_key": resource_schema.StringAttribute{
 								Computed:    true,
 								Description: "The SSH public key generated to allow connecting via SSH tunnel.",
 							},
-							"id": schema.Int64Attribute{
+							"id": resource_schema.Int64Attribute{
 								Computed:    true,
 								Description: "The ID of the SSH tunnel connection.",
 							},
@@ -303,46 +305,46 @@ func (r *globalConnectionResource) Schema(
 					},
 				},
 			},
-			"postgres": schema.SingleNestedAttribute{
+			"postgres": resource_schema.SingleNestedAttribute{
 				Optional:    true,
 				Description: "PostgreSQL connection configuration.",
-				Attributes: map[string]schema.Attribute{
-					"hostname": schema.StringAttribute{
+				Attributes: map[string]resource_schema.Attribute{
+					"hostname": resource_schema.StringAttribute{
 						Required:    true,
 						Description: "The hostname of the database.",
 					},
-					"port": schema.Int64Attribute{
+					"port": resource_schema.Int64Attribute{
 						Optional:    true,
 						Default:     int64default.StaticInt64(5432),
 						Computed:    true,
 						Description: "The port to connect to for this connection. Default=5432",
 					},
-					"dbname": schema.StringAttribute{
+					"dbname": resource_schema.StringAttribute{
 						Optional:    true,
 						Description: "The database name for this connection.",
 					},
 					// for SSH tunnel details
-					"ssh_tunnel": schema.SingleNestedAttribute{
+					"ssh_tunnel": resource_schema.SingleNestedAttribute{
 						Optional:    true,
 						Description: "PostgreSQL SSH Tunnel configuration",
-						Attributes: map[string]schema.Attribute{
-							"username": schema.StringAttribute{
+						Attributes: map[string]resource_schema.Attribute{
+							"username": resource_schema.StringAttribute{
 								Required:    true,
 								Description: "The username to use for the SSH tunnel.",
 							},
-							"port": schema.Int64Attribute{
+							"port": resource_schema.Int64Attribute{
 								Required:    true,
 								Description: "The HTTP port for the SSH tunnel.",
 							},
-							"hostname": schema.StringAttribute{
+							"hostname": resource_schema.StringAttribute{
 								Required:    true,
 								Description: "The hostname for the SSH tunnel.",
 							},
-							"public_key": schema.StringAttribute{
+							"public_key": resource_schema.StringAttribute{
 								Computed:    true,
 								Description: "The SSH public key generated to allow connecting via SSH tunnel.",
 							},
-							"id": schema.Int64Attribute{
+							"id": resource_schema.Int64Attribute{
 								Computed:    true,
 								Description: "The ID of the SSH tunnel connection.",
 							},
@@ -350,37 +352,37 @@ func (r *globalConnectionResource) Schema(
 					},
 				},
 			},
-			"fabric": schema.SingleNestedAttribute{
+			"fabric": resource_schema.SingleNestedAttribute{
 				Optional:    true,
 				Description: "Microsoft Fabric connection configuration.",
-				Attributes: map[string]schema.Attribute{
-					"server": schema.StringAttribute{
+				Attributes: map[string]resource_schema.Attribute{
+					"server": resource_schema.StringAttribute{
 						Required:    true,
 						Description: "The server hostname.",
 					},
-					"port": schema.Int64Attribute{
+					"port": resource_schema.Int64Attribute{
 						Optional:    true,
 						Default:     int64default.StaticInt64(1433),
 						Computed:    true,
 						Description: "The port to connect to for this connection. Default=1433",
 					},
-					"database": schema.StringAttribute{
+					"database": resource_schema.StringAttribute{
 						Required:    true,
 						Description: "The database to connect to for this connection.",
 					},
-					"retries": schema.Int64Attribute{
+					"retries": resource_schema.Int64Attribute{
 						Optional:    true,
 						Default:     int64default.StaticInt64(1),
 						Computed:    true,
 						Description: "The number of automatic times to retry a query before failing. Defaults to 1. Queries with syntax errors will not be retried. This setting can be used to overcome intermittent network issues.",
 					},
-					"login_timeout": schema.Int64Attribute{
+					"login_timeout": resource_schema.Int64Attribute{
 						Optional:    true,
 						Default:     int64default.StaticInt64(0),
 						Computed:    true,
 						Description: "The number of seconds used to establish a connection before failing. Defaults to 0, which means that the timeout is disabled or uses the default system settings.",
 					},
-					"query_timeout": schema.Int64Attribute{
+					"query_timeout": resource_schema.Int64Attribute{
 						Optional:    true,
 						Default:     int64default.StaticInt64(0),
 						Computed:    true,
@@ -388,37 +390,37 @@ func (r *globalConnectionResource) Schema(
 					},
 				},
 			},
-			"synapse": schema.SingleNestedAttribute{
+			"synapse": resource_schema.SingleNestedAttribute{
 				Optional:    true,
 				Description: "Azure Synapse Analytics connection configuration.",
-				Attributes: map[string]schema.Attribute{
-					"host": schema.StringAttribute{
+				Attributes: map[string]resource_schema.Attribute{
+					"host": resource_schema.StringAttribute{
 						Required:    true,
 						Description: "The server hostname.",
 					},
-					"port": schema.Int64Attribute{
+					"port": resource_schema.Int64Attribute{
 						Optional:    true,
 						Default:     int64default.StaticInt64(1433),
 						Computed:    true,
 						Description: "The port to connect to for this connection. Default=1433",
 					},
-					"database": schema.StringAttribute{
+					"database": resource_schema.StringAttribute{
 						Required:    true,
 						Description: "The database to connect to for this connection.",
 					},
-					"retries": schema.Int64Attribute{
+					"retries": resource_schema.Int64Attribute{
 						Optional:    true,
 						Default:     int64default.StaticInt64(1),
 						Computed:    true,
 						Description: "The number of automatic times to retry a query before failing. Defaults to 1. Queries with syntax errors will not be retried. This setting can be used to overcome intermittent network issues.",
 					},
-					"login_timeout": schema.Int64Attribute{
+					"login_timeout": resource_schema.Int64Attribute{
 						Optional:    true,
 						Default:     int64default.StaticInt64(0),
 						Computed:    true,
 						Description: "The number of seconds used to establish a connection before failing. Defaults to 0, which means that the timeout is disabled or uses the default system settings.",
 					},
-					"query_timeout": schema.Int64Attribute{
+					"query_timeout": resource_schema.Int64Attribute{
 						Optional:    true,
 						Default:     int64default.StaticInt64(0),
 						Computed:    true,
@@ -426,12 +428,12 @@ func (r *globalConnectionResource) Schema(
 					},
 				},
 			},
-			"starburst": schema.SingleNestedAttribute{
+			"starburst": resource_schema.SingleNestedAttribute{
 				Optional:    true,
 				Description: "Starburst/Trino connection configuration.",
-				Attributes: map[string]schema.Attribute{
+				Attributes: map[string]resource_schema.Attribute{
 					// not too useful now, but should be easy to modify if we support for authentication methods
-					"method": schema.StringAttribute{
+					"method": resource_schema.StringAttribute{
 						Optional:    true,
 						Computed:    true,
 						Description: "The authentication method. Only LDAP for now.",
@@ -440,117 +442,117 @@ func (r *globalConnectionResource) Schema(
 							stringvalidator.OneOf([]string{"ldap"}...),
 						},
 					},
-					"host": schema.StringAttribute{
+					"host": resource_schema.StringAttribute{
 						Required:    true,
 						Description: "The hostname of the account to connect to.",
 					},
-					"port": schema.Int64Attribute{
+					"port": resource_schema.Int64Attribute{
 						Optional:    true,
 						Default:     int64default.StaticInt64(443),
 						Computed:    true,
 						Description: "The port to connect to for this connection. Default=443",
 					},
 				},
-			}, "athena": schema.SingleNestedAttribute{
+			}, "athena": resource_schema.SingleNestedAttribute{
 				Optional:    true,
 				Description: "Athena connection configuration.",
-				Attributes: map[string]schema.Attribute{
-					"region_name": schema.StringAttribute{
+				Attributes: map[string]resource_schema.Attribute{
+					"region_name": resource_schema.StringAttribute{
 						Required:    true,
 						Description: "AWS region of your Athena instance.",
 					},
-					"database": schema.StringAttribute{
+					"database": resource_schema.StringAttribute{
 						Required:    true,
 						Description: "Specify the database (data catalog) to build models into (lowercase only).",
 					},
-					"s3_staging_dir": schema.StringAttribute{
+					"s3_staging_dir": resource_schema.StringAttribute{
 						Required:    true,
 						Description: "S3 location to store Athena query results and metadata.",
 					},
-					"work_group": schema.StringAttribute{
+					"work_group": resource_schema.StringAttribute{
 						Optional:    true,
 						Description: "Identifier of Athena workgroup.",
 					},
-					"spark_work_group": schema.StringAttribute{
+					"spark_work_group": resource_schema.StringAttribute{
 						Optional:    true,
 						Description: "Identifier of Athena Spark workgroup for running Python models.",
 					},
-					"s3_data_dir": schema.StringAttribute{
+					"s3_data_dir": resource_schema.StringAttribute{
 						Optional:    true,
 						Description: "Prefix for storing tables, if different from the connection's S3 staging directory.",
 					},
-					"s3_data_naming": schema.StringAttribute{
+					"s3_data_naming": resource_schema.StringAttribute{
 						Optional:    true,
 						Description: "How to generate table paths in the S3 data directory.",
 					},
-					"s3_tmp_table_dir": schema.StringAttribute{
+					"s3_tmp_table_dir": resource_schema.StringAttribute{
 						Optional:    true,
 						Description: "Prefix for storing temporary tables, if different from the connection's S3 data directory.",
 					},
-					"poll_interval": schema.Int64Attribute{
+					"poll_interval": resource_schema.Int64Attribute{
 						Optional:    true,
 						Description: "Interval in seconds to use for polling the status of query results in Athena.",
 					},
-					"num_retries": schema.Int64Attribute{
+					"num_retries": resource_schema.Int64Attribute{
 						Optional:    true,
 						Description: "Number of times to retry a failing query.",
 					},
-					"num_boto3_retries": schema.Int64Attribute{
+					"num_boto3_retries": resource_schema.Int64Attribute{
 						Optional:    true,
 						Description: "Number of times to retry boto3 requests (e.g. deleting S3 files for materialized tables).",
 					},
-					"num_iceberg_retries": schema.Int64Attribute{
+					"num_iceberg_retries": resource_schema.Int64Attribute{
 						Optional:    true,
 						Description: "Number of times to retry iceberg commit queries to fix ICEBERG_COMMIT_ERROR.",
 					},
 				},
 			},
-			"apache_spark": schema.SingleNestedAttribute{
+			"apache_spark": resource_schema.SingleNestedAttribute{
 				Optional:    true,
 				Description: "Apache Spark connection configuration.",
-				Attributes: map[string]schema.Attribute{
-					"method": schema.StringAttribute{
+				Attributes: map[string]resource_schema.Attribute{
+					"method": resource_schema.StringAttribute{
 						Required:    true,
 						Description: "Authentication method for the connection (http or thrift).",
 						Validators: []validator.String{
 							stringvalidator.OneOf([]string{"http", "thrift"}...),
 						},
 					},
-					"host": schema.StringAttribute{
+					"host": resource_schema.StringAttribute{
 						Required:    true,
 						Description: "Hostname of the connection",
 					},
-					"port": schema.Int64Attribute{
+					"port": resource_schema.Int64Attribute{
 						Optional:    true,
 						Computed:    true,
 						Description: "Port for the connection. Default=443",
 						Default:     int64default.StaticInt64(443),
 					},
-					"cluster": schema.StringAttribute{
+					"cluster": resource_schema.StringAttribute{
 						Required:    true,
 						Description: "Spark cluster for the connection",
 					},
-					"connect_timeout": schema.Int64Attribute{
+					"connect_timeout": resource_schema.Int64Attribute{
 						Optional:    true,
 						Description: "Connection time out in seconds. Default=10",
 						Computed:    true,
 						Default:     int64default.StaticInt64(10),
 					},
-					"connect_retries": schema.Int64Attribute{
+					"connect_retries": resource_schema.Int64Attribute{
 						Optional:    true,
 						Description: "Connection retries. Default=0",
 						Computed:    true,
 						Default:     int64default.StaticInt64(0),
 					},
-					"organization": schema.StringAttribute{
+					"organization": resource_schema.StringAttribute{
 						Optional:    true,
 						Description: "Organization ID",
 					},
-					"user": schema.StringAttribute{
+					"user": resource_schema.StringAttribute{
 						Optional:    true,
 						Description: "User",
 					},
-					"auth": schema.StringAttribute{
+					"auth": resource_schema.StringAttribute{
 						Optional:    true,
 						Description: "Auth",
 					},
@@ -559,3 +561,530 @@ func (r *globalConnectionResource) Schema(
 		},
 	}
 }
+
+func (r *globalConnectionDataSource) Schema(
+	_ context.Context,
+	_ datasource.SchemaRequest,
+	resp *datasource.SchemaResponse,
+) {
+
+	resp.Schema = datasource_schema.Schema{
+		Attributes: map[string]datasource_schema.Attribute{
+			"id": datasource_schema.Int64Attribute{
+				Required:    true,
+				Description: "Connection Identifier",
+			},
+			"adapter_version": datasource_schema.StringAttribute{
+				Computed:    true,
+				Description: "Version of the adapter",
+			},
+			"name": datasource_schema.StringAttribute{
+				Computed:    true,
+				Description: "Connection name",
+			},
+			"is_ssh_tunnel_enabled": datasource_schema.BoolAttribute{
+				Computed:    true,
+				Description: "Whether the connection can use an SSH tunnel",
+			},
+			"private_link_endpoint_id": datasource_schema.StringAttribute{
+				Computed:    true,
+				Description: "Private Link Endpoint ID. This ID can be found using the `privatelink_endpoint` data source",
+			},
+			"oauth_configuration_id": datasource_schema.Int64Attribute{
+				Computed: true,
+			},
+			"bigquery": datasource_schema.SingleNestedAttribute{
+				Computed: true,
+				Attributes: map[string]datasource_schema.Attribute{
+					"gcp_project_id": datasource_schema.StringAttribute{
+						Required:    true,
+						Description: "The GCP project ID to use for the connection",
+					},
+					"timeout_seconds": datasource_schema.Int64Attribute{
+						Computed:    true,
+						Description: "Timeout in seconds for queries",
+					},
+					"private_key_id": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "Private Key ID for the Service Account",
+					},
+					"private_key": datasource_schema.StringAttribute{
+						Computed:    true,
+						Sensitive:   true,
+						Description: "Private Key for the Service Account",
+					},
+					"client_email": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "Service Account email",
+					},
+					"client_id": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "Client ID of the Service Account",
+					},
+					"auth_uri": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "Auth URI for the Service Account",
+					},
+					"token_uri": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "Token URI for the Service Account",
+					},
+					"auth_provider_x509_cert_url": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "Auth Provider X509 Cert URL for the Service Account",
+					},
+					"client_x509_cert_url": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "Client X509 Cert URL for the Service Account",
+					},
+					"priority": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "The priority with which to execute BigQuery queries (batch or interactive)",
+					},
+					"retries": datasource_schema.Int64Attribute{
+						Computed:    true,
+						Description: "Number of retries for queries",
+					},
+					"location": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "Location to create new Datasets in",
+					},
+					"maximum_bytes_billed": datasource_schema.Int64Attribute{
+						Computed:    true,
+						Description: "Max number of bytes that can be billed for a given BigQuery query",
+					},
+					"execution_project": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "Project to bill for query execution",
+					},
+					"impersonate_service_account": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "Service Account to impersonate when running queries",
+					},
+					"job_retry_deadline_seconds": datasource_schema.Int64Attribute{
+						Computed:    true,
+						Description: "Total number of seconds to wait while retrying the same query",
+					},
+					"job_creation_timeout_seconds": datasource_schema.Int64Attribute{
+						Computed:    true,
+						Description: "Maximum timeout for the job creation step",
+					},
+					"application_id": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "OAuth Client ID",
+						Sensitive:   true,
+					},
+					"application_secret": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "OAuth Client Secret",
+						Sensitive:   true,
+					},
+					"gcs_bucket": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "URI for a Google Cloud Storage bucket to host Python code executed via Datapro",
+					},
+					"dataproc_region": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "Google Cloud region for PySpark workloads on Dataproc",
+					},
+					"dataproc_cluster_name": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "Dataproc cluster name for PySpark workloads",
+					},
+					"scopes": datasource_schema.SetAttribute{
+						Computed:    true,
+						ElementType: types.StringType,
+						Description: "OAuth scopes for the BigQuery connection",
+					},
+				},
+			},
+			// this feels bad, but there is no error/warning when people add extra fields https://github.com/hashicorp/terraform/issues/33570
+			"snowflake": datasource_schema.SingleNestedAttribute{
+				Computed:    true,
+				Description: "Snowflake connection configuration",
+				Attributes: map[string]datasource_schema.Attribute{
+					"account": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "The Snowflake account name",
+					},
+					"database": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "The default database for the connection",
+					},
+					"warehouse": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "The default Snowflake Warehouse to use for the connection",
+					},
+					"allow_sso": datasource_schema.BoolAttribute{
+						Computed:    true,
+						Description: "Whether to allow Snowflake OAuth for the connection. If true, the `oauth_client_id` and `oauth_client_secret` fields must be set",
+					},
+					// TODO: required if allow_sso is true
+					"oauth_client_id": datasource_schema.StringAttribute{
+						Computed:    true,
+						Sensitive:   true,
+						Description: "OAuth Client ID. Required to allow OAuth between dbt Cloud and Snowflake",
+					},
+					"oauth_client_secret": datasource_schema.StringAttribute{
+						Computed:    true,
+						Sensitive:   true,
+						Description: "OAuth Client Secret. Required to allow OAuth between dbt Cloud and Snowflake",
+					},
+					"role": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "The Snowflake role to use when running queries on the connection",
+					},
+					"client_session_keep_alive": datasource_schema.BoolAttribute{
+						Computed:    true,
+						Description: "If true, the snowflake client will keep connections for longer than the default 4 hours. This is helpful when particularly long-running queries are executing (> 4 hours)",
+					},
+				},
+			},
+			"databricks": datasource_schema.SingleNestedAttribute{
+				Computed:    true,
+				Description: "Databricks connection configuration",
+				Attributes: map[string]datasource_schema.Attribute{
+					"host": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "The hostname of the Databricks cluster or SQL warehouse.",
+					},
+					"http_path": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "The HTTP path of the Databricks cluster or SQL warehouse.",
+					},
+					"catalog": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "Catalog name if Unity Catalog is enabled in your Databricks workspace.",
+					},
+					"client_id": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "Required to enable Databricks OAuth authentication for IDE developers.",
+					},
+					"client_secret": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "Required to enable Databricks OAuth authentication for IDE developers.",
+					},
+				},
+			},
+			"redshift": datasource_schema.SingleNestedAttribute{
+				Computed:    true,
+				Description: "Redshift connection configuration",
+				Attributes: map[string]datasource_schema.Attribute{
+					"hostname": datasource_schema.StringAttribute{
+						Required:    true,
+						Description: "The hostname of the data warehouse.",
+					},
+					"port": datasource_schema.Int64Attribute{
+						Computed:    true,
+						Description: "The port to connect to for this connection. Default=5432",
+					},
+					"dbname": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "The database name for this connection.",
+					},
+					// for SSH tunnel details
+					"ssh_tunnel": datasource_schema.SingleNestedAttribute{
+						Computed:    true,
+						Description: "Redshift SSH Tunnel configuration",
+						Attributes: map[string]datasource_schema.Attribute{
+							"username": datasource_schema.StringAttribute{
+								Computed:    true,
+								Description: "The username to use for the SSH tunnel.",
+							},
+							"port": datasource_schema.Int64Attribute{
+								Computed:    true,
+								Description: "The HTTP port for the SSH tunnel.",
+							},
+							"hostname": datasource_schema.StringAttribute{
+								Computed:    true,
+								Description: "The hostname for the SSH tunnel.",
+							},
+							"public_key": datasource_schema.StringAttribute{
+								Computed:    true,
+								Description: "The SSH public key generated to allow connecting via SSH tunnel.",
+							},
+							"id": datasource_schema.Int64Attribute{
+								Computed:    true,
+								Description: "The ID of the SSH tunnel connection.",
+							},
+						},
+					},
+				},
+			},
+			"postgres": datasource_schema.SingleNestedAttribute{
+				Computed:    true,
+				Description: "PostgreSQL connection configuration.",
+				Attributes: map[string]datasource_schema.Attribute{
+					"hostname": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "The hostname of the database.",
+					},
+					"port": datasource_schema.Int64Attribute{
+						Computed:    true,
+						Description: "The port to connect to for this connection. Default=5432",
+					},
+					"dbname": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "The database name for this connection.",
+					},
+					// for SSH tunnel details
+					"ssh_tunnel": datasource_schema.SingleNestedAttribute{
+						Computed:    true,
+						Description: "PostgreSQL SSH Tunnel configuration",
+						Attributes: map[string]datasource_schema.Attribute{
+							"username": datasource_schema.StringAttribute{
+								Computed:    true,
+								Description: "The username to use for the SSH tunnel.",
+							},
+							"port": datasource_schema.Int64Attribute{
+								Computed:    true,
+								Description: "The HTTP port for the SSH tunnel.",
+							},
+							"hostname": datasource_schema.StringAttribute{
+								Computed:    true,
+								Description: "The hostname for the SSH tunnel.",
+							},
+							"public_key": datasource_schema.StringAttribute{
+								Computed:    true,
+								Description: "The SSH public key generated to allow connecting via SSH tunnel.",
+							},
+							"id": datasource_schema.Int64Attribute{
+								Computed:    true,
+								Description: "The ID of the SSH tunnel connection.",
+							},
+						},
+					},
+				},
+			},
+			"fabric": datasource_schema.SingleNestedAttribute{
+				Computed:    true,
+				Description: "Microsoft Fabric connection configuration.",
+				Attributes: map[string]datasource_schema.Attribute{
+					"server": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "The server hostname.",
+					},
+					"port": datasource_schema.Int64Attribute{
+						Computed:    true,
+						Description: "The port to connect to for this connection. Default=1433",
+					},
+					"database": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "The database to connect to for this connection.",
+					},
+					"retries": datasource_schema.Int64Attribute{
+						Computed:    true,
+						Description: "The number of automatic times to retry a query before failing. Defaults to 1. Queries with syntax errors will not be retried. This setting can be used to overcome intermittent network issues.",
+					},
+					"login_timeout": datasource_schema.Int64Attribute{
+						Computed:    true,
+						Description: "The number of seconds used to establish a connection before failing. Defaults to 0, which means that the timeout is disabled or uses the default system settings.",
+					},
+					"query_timeout": datasource_schema.Int64Attribute{
+						Computed:    true,
+						Description: "The number of seconds used to wait for a query before failing. Defaults to 0, which means that the timeout is disabled or uses the default system settings.",
+					},
+				},
+			},
+			"synapse": datasource_schema.SingleNestedAttribute{
+				Computed:    true,
+				Description: "Azure Synapse Analytics connection configuration.",
+				Attributes: map[string]datasource_schema.Attribute{
+					"host": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "The server hostname.",
+					},
+					"port": datasource_schema.Int64Attribute{
+						Computed:    true,
+						Description: "The port to connect to for this connection. Default=1433",
+					},
+					"database": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "The database to connect to for this connection.",
+					},
+					"retries": datasource_schema.Int64Attribute{
+						Computed:    true,
+						Description: "The number of automatic times to retry a query before failing. Defaults to 1. Queries with syntax errors will not be retried. This setting can be used to overcome intermittent network issues.",
+					},
+					"login_timeout": datasource_schema.Int64Attribute{
+						Computed:    true,
+						Description: "The number of seconds used to establish a connection before failing. Defaults to 0, which means that the timeout is disabled or uses the default system settings.",
+					},
+					"query_timeout": datasource_schema.Int64Attribute{
+						Computed:    true,
+						Description: "The number of seconds used to wait for a query before failing. Defaults to 0, which means that the timeout is disabled or uses the default system settings.",
+					},
+				},
+			},
+			"starburst": datasource_schema.SingleNestedAttribute{
+				Computed:    true,
+				Description: "Starburst/Trino connection configuration.",
+				Attributes: map[string]datasource_schema.Attribute{
+					// not too useful now, but should be easy to modify if we support for authentication methods
+					"method": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "The authentication method. Only LDAP for now.",
+					},
+					"host": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "The hostname of the account to connect to.",
+					},
+					"port": datasource_schema.Int64Attribute{
+						Computed:    true,
+						Description: "The port to connect to for this connection. Default=443",
+					},
+				},
+			}, "athena": datasource_schema.SingleNestedAttribute{
+				Computed:    true,
+				Description: "Athena connection configuration.",
+				Attributes: map[string]datasource_schema.Attribute{
+					"region_name": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "AWS region of your Athena instance.",
+					},
+					"database": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "Specify the database (data catalog) to build models into (lowercase only).",
+					},
+					"s3_staging_dir": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "S3 location to store Athena query results and metadata.",
+					},
+					"work_group": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "Identifier of Athena workgroup.",
+					},
+					"spark_work_group": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "Identifier of Athena Spark workgroup for running Python models.",
+					},
+					"s3_data_dir": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "Prefix for storing tables, if different from the connection's S3 staging directory.",
+					},
+					"s3_data_naming": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "How to generate table paths in the S3 data directory.",
+					},
+					"s3_tmp_table_dir": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "Prefix for storing temporary tables, if different from the connection's S3 data directory.",
+					},
+					"poll_interval": datasource_schema.Int64Attribute{
+						Computed:    true,
+						Description: "Interval in seconds to use for polling the status of query results in Athena.",
+					},
+					"num_retries": datasource_schema.Int64Attribute{
+						Computed:    true,
+						Description: "Number of times to retry a failing query.",
+					},
+					"num_boto3_retries": datasource_schema.Int64Attribute{
+						Computed:    true,
+						Description: "Number of times to retry boto3 requests (e.g. deleting S3 files for materialized tables).",
+					},
+					"num_iceberg_retries": datasource_schema.Int64Attribute{
+						Computed:    true,
+						Description: "Number of times to retry iceberg commit queries to fix ICEBERG_COMMIT_ERROR.",
+					},
+				},
+			},
+			"apache_spark": datasource_schema.SingleNestedAttribute{
+				Computed:    true,
+				Description: "Apache Spark connection configuration.",
+				Attributes: map[string]datasource_schema.Attribute{
+					"method": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "Authentication method for the connection (http or thrift).",
+					},
+					"host": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "Hostname of the connection",
+					},
+					"port": datasource_schema.Int64Attribute{
+						Computed:    true,
+						Description: "Port for the connection. Default=443",
+					},
+					"cluster": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "Spark cluster for the connection",
+					},
+					"connect_timeout": datasource_schema.Int64Attribute{
+						Description: "Connection time out in seconds. Default=10",
+						Computed:    true,
+					},
+					"connect_retries": datasource_schema.Int64Attribute{
+						Description: "Connection retries. Default=0",
+						Computed:    true,
+					},
+					"organization": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "Organization ID",
+					},
+					"user": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "User",
+					},
+					"auth": datasource_schema.StringAttribute{
+						Computed:    true,
+						Description: "Auth",
+					},
+				},
+			},
+		},
+	}
+}
+
+func (r *globalConnectionsDataSource) Schema(
+	_ context.Context,
+	_ datasource.SchemaRequest,
+	resp *datasource.SchemaResponse,
+) {
+
+	resp.Schema = datasource_schema.Schema{
+		Description: "All the connections created on the account with some summary information, like their name, type, when they were created/updated and the number of environments using them.",
+		Attributes: map[string]datasource_schema.Attribute{
+			"connections": datasource_schema.SetNestedAttribute{
+				Computed:    true,
+				Description: "A set of all the connections",
+				NestedObject: datasource_schema.NestedAttributeObject{
+					Attributes: map[string]datasource_schema.Attribute{
+						"id": datasource_schema.Int64Attribute{
+							Computed:    true,
+							Description: "Connection Identifier",
+						},
+						"created_at": datasource_schema.StringAttribute{
+							Computed:    true,
+							Description: "When the connection was created",
+						},
+						"updated_at": datasource_schema.StringAttribute{
+							Computed:    true,
+							Description: "When the connection was updated",
+						},
+						"name": datasource_schema.StringAttribute{
+							Computed:    true,
+							Description: "Connection name",
+						},
+						"adapter_version": datasource_schema.StringAttribute{
+							Computed:    true,
+							Description: "Type of adapter used for the connection",
+						},
+						"private_link_endpoint_id": datasource_schema.Int64Attribute{
+							Computed:    true,
+							Description: "Private Link Endpoint ID.",
+						},
+						"is_ssh_tunnel_enabled": datasource_schema.BoolAttribute{
+							Computed: true,
+						},
+						"oauth_configuration_id": datasource_schema.Int64Attribute{
+							Computed: true,
+						},
+						"environment__count": datasource_schema.Int64Attribute{
+							Computed:    true,
+							Description: "Number of environments using this connection",
+						},
+					},
+				},
+			},
+		},
+	}
+}
diff --git a/pkg/framework/objects/project/data_source_all.go b/pkg/framework/objects/project/data_source_all.go
new file mode 100644
index 0000000..09d034b
--- /dev/null
+++ b/pkg/framework/objects/project/data_source_all.go
@@ -0,0 +1,113 @@
+package project
+
+import (
+	"context"
+
+	"github.com/dbt-labs/terraform-provider-dbtcloud/pkg/dbt_cloud"
+	"github.com/dbt-labs/terraform-provider-dbtcloud/pkg/helper"
+	"github.com/hashicorp/terraform-plugin-framework/datasource"
+	"github.com/hashicorp/terraform-plugin-framework/types"
+)
+
+var (
+	_ datasource.DataSource              = &projectsDataSource{}
+	_ datasource.DataSourceWithConfigure = &projectsDataSource{}
+)
+
+func ProjectsDataSource() datasource.DataSource {
+	return &projectsDataSource{}
+}
+
+type projectsDataSource struct {
+	client *dbt_cloud.Client
+}
+
+func (d *projectsDataSource) Metadata(
+	_ context.Context,
+	req datasource.MetadataRequest,
+	resp *datasource.MetadataResponse,
+) {
+	resp.TypeName = req.ProviderTypeName + "_projects"
+}
+
+func (d *projectsDataSource) Read(
+	ctx context.Context,
+	req datasource.ReadRequest,
+	resp *datasource.ReadResponse,
+) {
+	var config ProjectsDataSourceModel
+
+	resp.Diagnostics.Append(req.Config.Get(ctx, &config)...)
+
+	projectNameContains := config.NameContains.ValueString()
+
+	apiProjects, err := d.client.GetAllProjects(projectNameContains)
+
+	if err != nil {
+		resp.Diagnostics.AddError(
+			"Issue when retrieving projects",
+			err.Error(),
+		)
+		return
+	}
+
+	state := config
+
+	allProjects := []ProjectConnectionRepository{}
+	for _, project := range apiProjects {
+
+		currentProject := ProjectConnectionRepository{}
+		currentProject.ID = types.Int64Value(project.ID)
+		currentProject.Name = types.StringValue(project.Name)
+		currentProject.Description = types.StringValue(project.Description)
+		currentProject.SemanticLayerConfigID = types.Int64PointerValue(
+			project.SemanticLayerConfigID,
+		)
+		currentProject.DbtProjectSubdirectory = types.StringValue(
+			project.DbtProjectSubdirectory,
+		)
+		currentProject.CreatedAt = types.StringValue(project.CreatedAt)
+		currentProject.UpdatedAt = types.StringValue(project.UpdatedAt)
+
+		if project.Connection != nil {
+			currentProject.Connection = &ProjectConnection{
+				ID:             types.Int64PointerValue(project.Connection.ID),
+				Name:           types.StringPointerValue(project.Connection.Name),
+				AdapterVersion: types.StringPointerValue(project.Connection.AdapterVersion),
+			}
+		}
+
+		if project.Repository != nil {
+			currentProject.Repository = &ProjectRepository{
+				ID: types.Int64PointerValue(
+					helper.IntPointerToInt64Pointer(project.Repository.ID),
+				),
+				RemoteUrl: types.StringValue(project.Repository.RemoteUrl),
+				PullRequestURLTemplate: types.StringValue(
+					project.Repository.PullRequestURLTemplate,
+				),
+			}
+		}
+
+		allProjects = append(allProjects, currentProject)
+	}
+	state.Projects = allProjects
+
+	diags := resp.State.Set(ctx, &state)
+	resp.Diagnostics.Append(diags...)
+	if resp.Diagnostics.HasError() {
+		return
+	}
+}
+
+func (d *projectsDataSource) Configure(
+	_ context.Context,
+	req datasource.ConfigureRequest,
+	_ *datasource.ConfigureResponse,
+) {
+	if req.ProviderData == nil {
+		return
+	}
+
+	d.client = req.ProviderData.(*dbt_cloud.Client)
+}
diff --git a/pkg/framework/objects/project/data_source_all_acceptance_test.go b/pkg/framework/objects/project/data_source_all_acceptance_test.go
new file mode 100644
index 0000000..11ee74e
--- /dev/null
+++ b/pkg/framework/objects/project/data_source_all_acceptance_test.go
@@ -0,0 +1,59 @@
+package project_test
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/dbt-labs/terraform-provider-dbtcloud/pkg/framework/acctest_helper"
+	"github.com/hashicorp/terraform-plugin-testing/helper/acctest"
+	"github.com/hashicorp/terraform-plugin-testing/helper/resource"
+)
+
+func TestDbtCloudJobsDataSource(t *testing.T) {
+
+	projectName := acctest.RandStringFromCharSet(19, acctest.CharSetAlphaNum)
+	projectName1 := fmt.Sprintf("%s1", projectName)
+	projectName2 := fmt.Sprintf("%s2", projectName)
+
+	config := jobs(projectName, projectName1, projectName2)
+
+	check := resource.ComposeAggregateTestCheckFunc(
+		resource.TestCheckResourceAttr("data.dbtcloud_projects.test", "projects.#", "2"),
+		resource.TestCheckResourceAttrSet("data.dbtcloud_projects.test", "projects.0.id"),
+		resource.TestCheckResourceAttrSet("data.dbtcloud_projects.test", "projects.0.name"),
+		resource.TestCheckResourceAttrSet("data.dbtcloud_projects.test", "projects.1.id"),
+		resource.TestCheckResourceAttrSet("data.dbtcloud_projects.test", "projects.1.name"),
+	)
+
+	resource.ParallelTest(t, resource.TestCase{
+		ProtoV6ProviderFactories: acctest_helper.TestAccProtoV6ProviderFactories,
+		Steps: []resource.TestStep{
+			{
+				Config: config,
+				Check:  check,
+			},
+		},
+	})
+}
+
+func jobs(projectName string, projectName1 string, projectName2 string) string {
+	return fmt.Sprintf(`
+    resource "dbtcloud_project" "test_project1" {
+        name = "%s"
+    }
+
+	resource "dbtcloud_project" "test_project2" {
+        name = "%s"
+    }
+
+	data dbtcloud_projects test {
+		name_contains = "%s"
+
+		depends_on = [
+			dbtcloud_project.test_project1,
+			dbtcloud_project.test_project2,
+		]
+	}
+
+    `, projectName1, projectName2, projectName)
+}
diff --git a/pkg/framework/objects/project/model.go b/pkg/framework/objects/project/model.go
new file mode 100644
index 0000000..2508bef
--- /dev/null
+++ b/pkg/framework/objects/project/model.go
@@ -0,0 +1,32 @@
+package project
+
+import "github.com/hashicorp/terraform-plugin-framework/types"
+
+type ProjectsDataSourceModel struct {
+	NameContains types.String                  `tfsdk:"name_contains"`
+	Projects     []ProjectConnectionRepository `tfsdk:"projects"`
+}
+
+type ProjectConnectionRepository struct {
+	ID                     types.Int64        `tfsdk:"id"`
+	Name                   types.String       `tfsdk:"name"`
+	Description            types.String       `tfsdk:"description"`
+	SemanticLayerConfigID  types.Int64        `tfsdk:"semantic_layer_config_id"`
+	DbtProjectSubdirectory types.String       `tfsdk:"dbt_project_subdirectory"`
+	CreatedAt              types.String       `tfsdk:"created_at"`
+	UpdatedAt              types.String       `tfsdk:"updated_at"`
+	Connection             *ProjectConnection `tfsdk:"connection"`
+	Repository             *ProjectRepository `tfsdk:"repository"`
+}
+
+type ProjectRepository struct {
+	ID                     types.Int64  `tfsdk:"id"`
+	RemoteUrl              types.String `tfsdk:"remote_url"`
+	PullRequestURLTemplate types.String `tfsdk:"pull_request_url_template"`
+}
+
+type ProjectConnection struct {
+	ID             types.Int64  `tfsdk:"id"`
+	Name           types.String `tfsdk:"name"`
+	AdapterVersion types.String `tfsdk:"adapter_version"`
+}
diff --git a/pkg/framework/objects/project/schema.go b/pkg/framework/objects/project/schema.go
new file mode 100644
index 0000000..4572511
--- /dev/null
+++ b/pkg/framework/objects/project/schema.go
@@ -0,0 +1,97 @@
+package project
+
+import (
+	"context"
+
+	"github.com/hashicorp/terraform-plugin-framework/datasource"
+	"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
+)
+
+func (d *projectsDataSource) Schema(
+	ctx context.Context,
+	req datasource.SchemaRequest,
+	resp *datasource.SchemaResponse,
+) {
+	resp.Schema = schema.Schema{
+		Description: "Retrieve all the projects created in dbt Cloud with an optional filter on parts of the project name.",
+		Attributes: map[string]schema.Attribute{
+			"name_contains": schema.StringAttribute{
+				Optional:    true,
+				Computed:    true,
+				Description: "Used to filter projects by name, Optional",
+			},
+			"projects": schema.SetNestedAttribute{
+				Computed:    true,
+				Description: "Set of projects with their details",
+				NestedObject: schema.NestedAttributeObject{
+					Attributes: map[string]schema.Attribute{
+						"id": schema.Int64Attribute{
+							Computed:    true,
+							Description: "Project ID",
+						},
+						"name": schema.StringAttribute{
+							Computed:    true,
+							Description: "Project name",
+						},
+						"description": schema.StringAttribute{
+							Computed:    true,
+							Description: "Project description",
+						},
+						"semantic_layer_config_id": schema.Int64Attribute{
+							Computed:    true,
+							Description: "Semantic layer config ID",
+						},
+						"dbt_project_subdirectory": schema.StringAttribute{
+							Computed:    true,
+							Description: "Subdirectory for the dbt project inside the git repo",
+						},
+						"created_at": schema.StringAttribute{
+							Computed:    true,
+							Description: "When the project was created",
+						},
+						"updated_at": schema.StringAttribute{
+							Computed:    true,
+							Description: "When the project was last updated",
+						},
+						"repository": schema.SingleNestedAttribute{
+							Computed:    true,
+							Description: "Details for the repository linked to the project",
+							Attributes: map[string]schema.Attribute{
+								"id": schema.Int64Attribute{
+									Computed:    true,
+									Description: "Repository ID",
+								},
+								"remote_url": schema.StringAttribute{
+									Computed:    true,
+									Description: "URL of the git repo remote",
+								},
+								"pull_request_url_template": schema.StringAttribute{
+									Computed:    true,
+									Description: "URL template for PRs",
+								},
+							},
+						},
+						"connection": schema.SingleNestedAttribute{
+							Computed:    true,
+							Description: "Details for the connection linked to the project",
+							Attributes: map[string]schema.Attribute{
+								"id": schema.Int64Attribute{
+									Computed:    true,
+									Description: "Connection ID",
+								},
+								"name": schema.StringAttribute{
+									Computed:    true,
+									Description: "Connection name",
+								},
+								"adapter_version": schema.StringAttribute{
+									Computed:    true,
+									Description: "Version of the adapter for the connection. Will tell what connection type it is",
+								},
+							},
+						},
+					},
+				},
+			},
+		},
+	}
+}
diff --git a/pkg/provider/framework_provider.go b/pkg/provider/framework_provider.go
index 9a82ee9..98e04b2 100644
--- a/pkg/provider/framework_provider.go
+++ b/pkg/provider/framework_provider.go
@@ -14,6 +14,7 @@ import (
 	"github.com/dbt-labs/terraform-provider-dbtcloud/pkg/framework/objects/notification"
 	"github.com/dbt-labs/terraform-provider-dbtcloud/pkg/framework/objects/partial_license_map"
 	"github.com/dbt-labs/terraform-provider-dbtcloud/pkg/framework/objects/partial_notification"
+	"github.com/dbt-labs/terraform-provider-dbtcloud/pkg/framework/objects/project"
 	"github.com/dbt-labs/terraform-provider-dbtcloud/pkg/framework/objects/service_token"
 	"github.com/dbt-labs/terraform-provider-dbtcloud/pkg/framework/objects/user"
 
@@ -184,6 +185,9 @@ func (p *dbtCloudProvider) DataSources(_ context.Context) []func() datasource.Da
 		group.GroupDataSource,
 		job.JobsDataSource,
 		service_token.ServiceTokenDataSource,
+		project.ProjectsDataSource,
+		global_connection.GlobalConnectionDataSource,
+		global_connection.GlobalConnectionsDataSource,
 	}
 }