diff --git a/internal/provider/atlas_migration_data_source.go b/internal/provider/atlas_migration_data_source.go index 30813e5..faf35c7 100644 --- a/internal/provider/atlas_migration_data_source.go +++ b/internal/provider/atlas_migration_data_source.go @@ -46,8 +46,9 @@ type ( // Ensure provider defined types fully satisfy framework interfaces var ( - _ datasource.DataSource = &MigrationDataSource{} - _ datasource.DataSourceWithConfigure = &MigrationDataSource{} + _ datasource.DataSource = &MigrationDataSource{} + _ datasource.DataSourceWithConfigure = &MigrationDataSource{} + _ datasource.DataSourceWithValidateConfig = &MigrationDataSource{} ) var ( latestVersion = "Already at latest version" @@ -82,6 +83,21 @@ func (d *MigrationDataSource) Configure(ctx context.Context, req datasource.Conf resp.Diagnostics.Append(d.configure(req.ProviderData)...) } +// Validate implements resource.ResourceWithValidateConfig. +func (r MigrationDataSource) ValidateConfig(ctx context.Context, req datasource.ValidateConfigRequest, resp *datasource.ValidateConfigResponse) { + var data MigrationDataSourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + if data.Config.ValueString() != "" && !data.EnvName.IsUnknown() && data.EnvName.ValueString() == "" { + resp.Diagnostics.AddError( + "env_name is empty", + "env_name is required when config is set", + ) + } +} + // GetSchema implements datasource.DataSource. func (d *MigrationDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { resp.Schema = schema.Schema{ @@ -167,7 +183,7 @@ func (d *MigrationDataSource) Read(ctx context.Context, req datasource.ReadReque }) } }() - c, err := d.client(wd.Path()) + c, err := d.client(wd.Path(), cfg.Cloud) if err != nil { resp.Diagnostics.AddError("Failed to create client", err.Error()) return @@ -228,8 +244,8 @@ func (d *MigrationDataSourceModel) projectConfig(cloud *AtlasCloudBlock) (*proje if err != nil { return nil, err } - cfg := projectConfig{ - Config: defaultString(d.Config, baseAtlasHCL), + cfg := &projectConfig{ + Config: defaultString(d.Config, ""), EnvName: defaultString(d.EnvName, "tf"), Env: &envConfig{ URL: dbURL, @@ -243,16 +259,11 @@ func (d *MigrationDataSourceModel) projectConfig(cloud *AtlasCloudBlock) (*proje cloud = d.Cloud } if cloud.Valid() { - cfg.Cloud = &cloudConfig{ - Token: cloud.Token.ValueString(), - Project: cloud.Project.ValueStringPointer(), - URL: cloud.URL.ValueStringPointer(), + cfg.Cloud = &CloudConfig{ + Token: cloud.Token.ValueString(), } } if rd := d.RemoteDir; rd != nil { - if cfg.Cloud == nil { - return nil, fmt.Errorf("cloud configuration is not set") - } cfg.Env.Migration.DirURL, err = rd.AtlasURL() } else { cfg.Env.Migration.DirURL, err = absoluteFileURL( @@ -266,7 +277,7 @@ func (d *MigrationDataSourceModel) projectConfig(cloud *AtlasCloudBlock) (*proje return nil, fmt.Errorf("failed to parse variables: %w", err) } } - return &cfg, nil + return cfg, nil } // AtlasURL returns the atlas URL for the remote directory. diff --git a/internal/provider/atlas_migration_data_source_test.go b/internal/provider/atlas_migration_data_source_test.go index af61c12..d561d8a 100644 --- a/internal/provider/atlas_migration_data_source_test.go +++ b/internal/provider/atlas_migration_data_source_test.go @@ -69,8 +69,9 @@ func TestAccMigrationDataSource(t *testing.T) { data "atlas_migration" "hello" { # The dir attribute is required to be set, and # can't be supplied from the atlas.hcl - dir = "file://migrations?format=atlas" - config = <<-HCL + dir = "file://migrations?format=atlas" + env_name = "tf" + config = <<-HCL variable "schema_name" { type = string } @@ -122,12 +123,17 @@ func TestAccMigrationDataSource_AtlasURL(t *testing.T) { })) config = fmt.Sprintf(` data "atlas_migration" "hello" { - url = "%s" - dir = "atlas://test" - cloud { - token = "aci_bearer_token" - url = "%s" - } + url = "%s" + dir = "atlas://test" + env_name = "tf" + config = <<-HCL +atlas { + cloud { + token = "aci_bearer_token" + url = "%s" + } +} +HCL }`, dbURL, srv.URL) ) t.Cleanup(srv.Close) @@ -140,14 +146,19 @@ data "atlas_migration" "hello" { { Config: fmt.Sprintf(` data "atlas_migration" "hello" { - url = "%s" + url = "%s" + env_name = "tf" + config = <<-HCL +atlas { + cloud { + token = "aci_bearer_token" + url = "%s" + } +} +HCL remote_dir { name = "test" } - cloud { - token = "aci_bearer_token" - url = "%s" - } }`, dbURL, srv.URL), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr("data.atlas_migration.hello", "id", "remote_dir://test"), diff --git a/internal/provider/atlas_migration_resource.go b/internal/provider/atlas_migration_resource.go index 8e318f8..8933202 100644 --- a/internal/provider/atlas_migration_resource.go +++ b/internal/provider/atlas_migration_resource.go @@ -307,13 +307,6 @@ func (r MigrationResource) ValidateConfig(ctx context.Context, req resource.Vali resp.Diagnostics.AddError("url is invalid", err.Error()) return case u.Scheme == SchemaTypeAtlas: - // Remote dir, validate config for cloud - // providerData.client is set when the provider is configured - if data.Cloud == nil && (r.cloud == nil && r.providerData.client != nil) { - resp.Diagnostics.AddError( - "cloud is unset", "cloud is required when using atlas:// URL", - ) - } if f := data.ProtectedFlows; f != nil { if d := f.MigrateDown; d != nil { if d.Allow.ValueBool() && d.AutoApprove.ValueBool() { @@ -385,6 +378,12 @@ func (r MigrationResource) ValidateConfig(ctx context.Context, req resource.Vali "`atlas_migration.next` or `atlas_migration.latest`\n", ) } + if data.Config.ValueString() != "" && !data.EnvName.IsUnknown() && data.EnvName.ValueString() == "" { + resp.Diagnostics.AddError( + "env_name is empty", + "env_name is required when config is set", + ) + } } // ModifyPlan implements resource.ResourceWithModifyPlan. @@ -416,7 +415,7 @@ func (r *MigrationResource) ModifyPlan(ctx context.Context, req resource.ModifyP }) } }() - c, err := r.client(wd.Path()) + c, err := r.client(wd.Path(), cfg.Cloud) if err != nil { resp.Diagnostics.AddError("Failed to create client", err.Error()) return @@ -513,7 +512,7 @@ func (r *MigrationResource) migrate(ctx context.Context, data *MigrationResource fmt.Sprintf("Failed to create atlas.hcl: %s", err.Error())) return } - c, err := r.client(wd.Path()) + c, err := r.client(wd.Path(), cfg.Cloud) if err != nil { diags.AddError("Failed to create client", err.Error()) return @@ -629,7 +628,7 @@ func (r *MigrationResource) buildStatus(ctx context.Context, data *MigrationReso }) } }() - c, err := r.client(wd.Path()) + c, err := r.client(wd.Path(), cfg.Cloud) if err != nil { diags.AddError("Failed to create client", err.Error()) return @@ -814,8 +813,8 @@ func (d *MigrationResourceModel) projectConfig(cloud *AtlasCloudBlock, devURL st if err != nil { return nil, err } - cfg := projectConfig{ - Config: defaultString(d.Config, baseAtlasHCL), + cfg := &projectConfig{ + Config: defaultString(d.Config, ""), EnvName: defaultString(d.EnvName, "tf"), Env: &envConfig{ URL: dbURL, @@ -832,16 +831,11 @@ func (d *MigrationResourceModel) projectConfig(cloud *AtlasCloudBlock, devURL st cloud = d.Cloud } if cloud.Valid() { - cfg.Cloud = &cloudConfig{ - Token: cloud.Token.ValueString(), - Project: cloud.Project.ValueStringPointer(), - URL: cloud.URL.ValueStringPointer(), + cfg.Cloud = &CloudConfig{ + Token: cloud.Token.ValueString(), } } if rd := d.RemoteDir; rd != nil { - if cfg.Cloud == nil { - return nil, fmt.Errorf("cloud configuration is not set") - } cfg.Env.Migration.DirURL, err = rd.AtlasURL() } else { cfg.Env.Migration.DirURL, err = absoluteFileURL( @@ -869,5 +863,5 @@ func (d *MigrationResourceModel) projectConfig(cloud *AtlasCloudBlock, devURL st return nil, fmt.Errorf("failed to parse variables: %w", err) } } - return &cfg, nil + return cfg, nil } diff --git a/internal/provider/atlas_migration_resource_test.go b/internal/provider/atlas_migration_resource_test.go index 5bd7977..8783e68 100644 --- a/internal/provider/atlas_migration_resource_test.go +++ b/internal/provider/atlas_migration_resource_test.go @@ -272,6 +272,7 @@ HCL # can't be supplied from the atlas.hcl dir = "file://migrations" config = local.config + env_name = "tf" variables = local.vars } resource "atlas_migration" "testdb" { @@ -279,6 +280,7 @@ HCL # can't be supplied from the atlas.hcl dir = "file://migrations" version = data.atlas_migration.hello.next + env_name = "tf" config = local.config variables = local.vars }`, mysqlURL) @@ -551,23 +553,23 @@ func TestAccMigrationResource_AtlasURL(t *testing.T) { })) config = fmt.Sprintf(` data "atlas_migration" "hello" { - url = "%[2]s" - dir = "atlas://test" - cloud { - token = "aci_bearer_token" - url = "%[1]s" - project = "test" - } + url = "%[2]s" + dir = "atlas://test" + env_name = "tf" + config = <<-HCL +atlas { + cloud { + token = "aci_bearer_token" + url = "%[1]s" + } +} +HCL } resource "atlas_migration" "testdb" { url = "%[2]s" version = data.atlas_migration.hello.next dir = data.atlas_migration.hello.dir - cloud { - token = "aci_bearer_token" - url = "%[1]s" - project = "test" - } + config = data.atlas_migration.hello.config } `, srv.URL, dbURL) ) @@ -684,13 +686,17 @@ func TestAccMigrationResource_AtlasURL_WithTag(t *testing.T) { dev_url = "%[1]s" } resource "atlas_migration" "hello" { - url = "%[3]s" - dir = "atlas://test" - cloud { - token = "aci_bearer_token" - url = "%[2]s" - project = "test" - } + url = "%[3]s" + dir = "atlas://test" + env_name = "tf" + config = <<-HCL +atlas { + cloud { + token = "aci_bearer_token" + url = "%[2]s" + } +} +HCL } `, devURL, srv.URL, dbURL) resource.Test(t, resource.TestCase{ @@ -715,18 +721,22 @@ func TestAccMigrationResource_AtlasURL_WithTag(t *testing.T) { dev_url = "%[1]s" } resource "atlas_migration" "hello" { - url = "%[3]s" - dir = "atlas://test?tag=one-down" + url = "%[3]s" + dir = "atlas://test?tag=one-down" + env_name = "tf" + config = <<-HCL +atlas { + cloud { + token = "aci_bearer_token" + url = "%[2]s" + } +} +HCL protected_flows { migrate_down { allow = true } } - cloud { - token = "aci_bearer_token" - url = "%[2]s" - project = "test" - } } `, devURL, srv.URL, dbURL) resource.Test(t, resource.TestCase{ @@ -751,13 +761,17 @@ func TestAccMigrationResource_AtlasURL_WithTag(t *testing.T) { dev_url = "%[1]s" } resource "atlas_migration" "hello" { - url = "%[3]s" - dir = "atlas://test?tag=latest" - cloud { - token = "aci_bearer_token" - url = "%[2]s" - project = "test" - } + url = "%[3]s" + dir = "atlas://test?tag=latest" + env_name = "tf" + config = <<-HCL +atlas { + cloud { + token = "aci_bearer_token" + url = "%[2]s" + } +} +HCL }`, devURL, srv.URL, dbURL) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -895,13 +909,17 @@ func TestAccMigrationResource_RequireApproval(t *testing.T) { dev_url = "%[1]s" } resource "atlas_migration" "hello" { - url = "%[3]s" - dir = "atlas://test?tag=latest" - cloud { - token = "aci_bearer_token" - url = "%[2]s" - project = "test" - } + url = "%[3]s" + dir = "atlas://test?tag=latest" + env_name = "tf" + config = <<-HCL +atlas { + cloud { + token = "aci_bearer_token" + url = "%[2]s" + } +} +HCL }`, devURL, srv.URL, dbURL), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr("atlas_migration.hello", "id", "remote_dir://test"), @@ -923,13 +941,17 @@ func TestAccMigrationResource_RequireApproval(t *testing.T) { dev_url = "%[1]s" } resource "atlas_migration" "hello" { - url = "%[3]s" - dir = "atlas://test?tag=tag3" - cloud { - token = "aci_bearer_token" - url = "%[2]s" - project = "test" - } + url = "%[3]s" + dir = "atlas://test?tag=tag3" + env_name = "tf" + config = <<-HCL +atlas { + cloud { + token = "aci_bearer_token" + url = "%[2]s" + } +} +HCL }`, devURL, srv.URL, dbURL), ExpectError: regexp.MustCompile("migrate down is not allowed, set `migrate_down.allow` to true to allow"), }, @@ -945,13 +967,17 @@ func TestAccMigrationResource_RequireApproval(t *testing.T) { dev_url = "%[1]s" } resource "atlas_migration" "hello" { - url = "%[3]s" - dir = "atlas://test?tag=tag3" - cloud { - token = "aci_bearer_token" - url = "%[2]s" - project = "test" - } + url = "%[3]s" + dir = "atlas://test?tag=tag3" + env_name = "tf" + config = <<-HCL +atlas { + cloud { + token = "aci_bearer_token" + url = "%[2]s" + } +} +HCL protected_flows { migrate_down { allow = true @@ -976,13 +1002,17 @@ func TestAccMigrationResource_RequireApproval(t *testing.T) { dev_url = "%[1]s" } resource "atlas_migration" "hello" { - url = "%[3]s" - dir = "atlas://test?tag=tag3" - cloud { - token = "aci_bearer_token" - url = "%[2]s" - project = "test" - } + url = "%[3]s" + dir = "atlas://test?tag=tag3" + env_name = "tf" + config = <<-HCL +atlas { + cloud { + token = "aci_bearer_token" + url = "%[2]s" + } +} +HCL protected_flows { migrate_down { allow = true @@ -1010,13 +1040,17 @@ func TestAccMigrationResource_RequireApproval(t *testing.T) { dev_url = "%[1]s" } resource "atlas_migration" "hello" { - url = "%[3]s" - dir = "atlas://test?tag=tag2" - cloud { - token = "aci_bearer_token" - url = "%[2]s" - project = "test" - } + url = "%[3]s" + dir = "atlas://test?tag=tag2" + env_name = "tf" + config = <<-HCL +atlas { + cloud { + token = "aci_bearer_token" + url = "%[2]s" + } +} +HCL protected_flows { migrate_down { allow = true diff --git a/internal/provider/atlas_schema_data_source.go b/internal/provider/atlas_schema_data_source.go index 179be2b..533d15b 100644 --- a/internal/provider/atlas_schema_data_source.go +++ b/internal/provider/atlas_schema_data_source.go @@ -134,7 +134,7 @@ func (d *AtlasSchemaDataSource) Read(ctx context.Context, req datasource.ReadReq }) } }() - c, err := d.client(wd.Path()) + c, err := d.client(wd.Path(), cfg.Cloud) if err != nil { resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to create client, got error: %s", err), @@ -158,7 +158,6 @@ func (d *AtlasSchemaDataSource) Read(ctx context.Context, req datasource.ReadReq func (d *AtlasSchemaDataSourceModel) projectConfig(cloud *AtlasCloudBlock, devURL string) (*projectConfig, *atlas.WorkingDir, error) { cfg := &projectConfig{ - Config: baseAtlasHCL, EnvName: "tf", Env: &envConfig{ URL: "file://schema.hcl", @@ -166,7 +165,7 @@ func (d *AtlasSchemaDataSourceModel) projectConfig(cloud *AtlasCloudBlock, devUR }, } if cloud.Valid() { - cfg.Cloud = &cloudConfig{ + cfg.Cloud = &CloudConfig{ Token: cloud.Token.ValueString(), } } diff --git a/internal/provider/atlas_schema_resource.go b/internal/provider/atlas_schema_resource.go index 281defc..09b21e2 100644 --- a/internal/provider/atlas_schema_resource.go +++ b/internal/provider/atlas_schema_resource.go @@ -286,7 +286,7 @@ func (r *AtlasSchemaResource) ModifyPlan(ctx context.Context, req resource.Modif resp.Diagnostics.Append(PrintPlanSQL(ctx, r.cloud, r.client, r.getDevURL(plan.DevURL), plan)...) } -func PrintPlanSQL(ctx context.Context, cloud *AtlasCloudBlock, fn func(string) (AtlasExec, error), devURL string, data *AtlasSchemaResourceModel) (diags diag.Diagnostics) { +func PrintPlanSQL(ctx context.Context, cloud *AtlasCloudBlock, fn func(string, *CloudConfig) (AtlasExec, error), devURL string, data *AtlasSchemaResourceModel) (diags diag.Diagnostics) { cfg, wd, err := data.projectConfig(cloud, devURL) if err != nil { diags.AddError("HCL Error", @@ -301,7 +301,7 @@ func PrintPlanSQL(ctx context.Context, cloud *AtlasCloudBlock, fn func(string) ( }) } }() - c, err := fn(wd.Path()) + c, err := fn(wd.Path(), cfg.Cloud) if err != nil { diags.AddError("Client Error", fmt.Sprintf("Unable to create client, got error: %s", err), @@ -346,7 +346,7 @@ func (r *AtlasSchemaResource) readSchema(ctx context.Context, data *AtlasSchemaR }) } }() - c, err := r.client(wd.Path()) + c, err := r.client(wd.Path(), cfg.Cloud) if err != nil { diags.AddError("Client Error", fmt.Sprintf("Unable to create client, got error: %s", err), @@ -382,7 +382,7 @@ func (r *AtlasSchemaResource) applySchema(ctx context.Context, data *AtlasSchema }) } }() - c, err := r.client(wd.Path()) + c, err := r.client(wd.Path(), cfg.Cloud) if err != nil { diags.AddError("Client Error", fmt.Sprintf("Unable to create client, got error: %s", err), @@ -418,7 +418,7 @@ func (r *AtlasSchemaResource) firstRunCheck(ctx context.Context, data *AtlasSche }) } }() - c, err := r.client(wd.Path()) + c, err := r.client(wd.Path(), cfg.Cloud) if err != nil { diags.AddError("Client Error", fmt.Sprintf("Unable to create client, got error: %s", err), @@ -459,7 +459,6 @@ func (data *AtlasSchemaResourceModel) projectConfig(cloud *AtlasCloudBlock, devd return nil, nil, err } cfg := &projectConfig{ - Config: baseAtlasHCL, EnvName: "tf", Env: &envConfig{ URL: dbURL, @@ -469,7 +468,7 @@ func (data *AtlasSchemaResourceModel) projectConfig(cloud *AtlasCloudBlock, devd }, } if cloud.Valid() { - cfg.Cloud = &cloudConfig{ + cfg.Cloud = &CloudConfig{ Token: cloud.Token.ValueString(), } } diff --git a/internal/provider/atlas_schema_resource_test.go b/internal/provider/atlas_schema_resource_test.go index b41c9ce..e23696f 100644 --- a/internal/provider/atlas_schema_resource_test.go +++ b/internal/provider/atlas_schema_resource_test.go @@ -650,7 +650,7 @@ table "orders" { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - gotDiags := provider.PrintPlanSQL(tt.args.ctx, nil, func(wd string) (provider.AtlasExec, error) { + gotDiags := provider.PrintPlanSQL(tt.args.ctx, nil, func(wd string, _ *provider.CloudConfig) (provider.AtlasExec, error) { return atlas.NewClient(wd, "atlas") }, mysqlDevURL, tt.args.data) require.Equal(t, tt.wantDiags, gotDiags) diff --git a/internal/provider/builder.go b/internal/provider/builder.go index f3a86ef..c8065cb 100644 --- a/internal/provider/builder.go +++ b/internal/provider/builder.go @@ -24,7 +24,7 @@ import ( type ( // projectConfig is the builder for the atlas.hcl file. projectConfig struct { - Cloud *cloudConfig + Cloud *CloudConfig Env *envConfig Config string // The base atlas.hcl to merge with, provided by the user @@ -41,10 +41,8 @@ type ( Diff *Diff Migration *migrationConfig } - cloudConfig struct { - Token string - Project *string - URL *string + CloudConfig struct { + Token string } migrationConfig struct { DirURL string @@ -54,97 +52,82 @@ type ( } ) -// we will allow the user configure the base atlas.hcl file -const baseAtlasHCL = "env {\n name = atlas.env\n}" - // Render writes the atlas config to the given writer. func (c *projectConfig) Render(w io.Writer) error { dst, diags := hclwrite.ParseConfig([]byte(c.Config), "atlas.hcl", hcl.InitialPos) if diags.HasErrors() { return diags } - mergeFile(dst, c.File()) + if err := mergeEnvBlock(dst.Body(), c.Env.AsBlock(), c.EnvName); err != nil { + return err + } _, err := dst.WriteTo(w) return err } -// File returns the HCL file representation of the project config. -func (c *projectConfig) File() *hclwrite.File { - f := hclwrite.NewEmptyFile() - r := f.Body() - if cloud := c.Cloud; cloud != nil { - a := r.AppendNewBlock("atlas", nil).Body() - c := a.AppendNewBlock("cloud", nil).Body() - c.SetAttributeValue("token", cty.StringVal(cloud.Token)) - if cloud.Project != nil { - c.SetAttributeValue("project", cty.StringVal(*cloud.Project)) - } - if cloud.URL != nil { - c.SetAttributeValue("url", cty.StringVal(*cloud.URL)) - } +// AsBlock returns the HCL block for the environment configuration. +func (env *envConfig) AsBlock() *hclwrite.Block { + blk := hclwrite.NewBlock("env", nil) + e := blk.Body() + if env.URL != "" { + e.SetAttributeValue("url", cty.StringVal(env.URL)) } - if env := c.Env; env != nil { - e := r.AppendNewBlock("env", nil).Body() - if env.URL != "" { - e.SetAttributeValue("url", cty.StringVal(env.URL)) - } - if env.DevURL != "" { - e.SetAttributeValue("dev", cty.StringVal(env.DevURL)) + if env.DevURL != "" { + e.SetAttributeValue("dev", cty.StringVal(env.DevURL)) + } + if env.Source != "" { + e.SetAttributeValue("src", cty.StringVal(env.Source)) + } + if l := deleteZero(env.Schemas); len(l) > 0 { + e.SetAttributeValue("schemas", listStringVal(l)) + } + if l := deleteZero(env.Exclude); len(l) > 0 { + e.SetAttributeValue("exclude", listStringVal(l)) + } + if md := env.Migration; md != nil { + m := e.AppendNewBlock("migration", nil).Body() + if md.DirURL != "" { + m.SetAttributeValue("dir", cty.StringVal(md.DirURL)) } - if env.Source != "" { - e.SetAttributeValue("src", cty.StringVal(env.Source)) + if md.Baseline != "" { + m.SetAttributeValue("baseline", cty.StringVal(md.Baseline)) } - if l := deleteZero(env.Schemas); len(l) > 0 { - e.SetAttributeValue("schemas", listStringVal(l)) + if md.ExecOrder != "" { + m.SetAttributeTraversal("exec_order", hcl.Traversal{ + hcl.TraverseRoot{Name: hclValue(md.ExecOrder)}, + }) } - if l := deleteZero(env.Exclude); len(l) > 0 { - e.SetAttributeValue("exclude", listStringVal(l)) + if md.RevisionsSchema != "" { + m.SetAttributeValue("revisions_schema", cty.StringVal(md.RevisionsSchema)) } - if md := env.Migration; md != nil { - m := e.AppendNewBlock("migration", nil).Body() - if md.DirURL != "" { - m.SetAttributeValue("dir", cty.StringVal(md.DirURL)) - } - if md.Baseline != "" { - m.SetAttributeValue("baseline", cty.StringVal(md.Baseline)) - } - if md.ExecOrder != "" { - m.SetAttributeTraversal("exec_order", hcl.Traversal{ - hcl.TraverseRoot{Name: hclValue(md.ExecOrder)}, - }) - } - if md.RevisionsSchema != "" { - m.SetAttributeValue("revisions_schema", cty.StringVal(md.RevisionsSchema)) - } + } + if dd := env.Diff; dd != nil { + d := e.AppendNewBlock("diff", nil).Body() + if v := dd.ConcurrentIndex; v != nil { + b := d.AppendNewBlock("concurrent_index", nil).Body() + attrBoolPtr(b, v.Create, "create") + attrBoolPtr(b, v.Drop, "drop") } - if dd := env.Diff; dd != nil { - d := e.AppendNewBlock("diff", nil).Body() - if v := dd.ConcurrentIndex; v != nil { - b := d.AppendNewBlock("concurrent_index", nil).Body() - attrBoolPtr(b, v.Create, "create") - attrBoolPtr(b, v.Drop, "drop") - } - if v := dd.Skip; v != nil { - b := d.AppendNewBlock("skip", nil).Body() - attrBoolPtr(b, v.AddSchema, "add_schema") - attrBoolPtr(b, v.DropSchema, "drop_schema") - attrBoolPtr(b, v.ModifySchema, "modify_schema") - attrBoolPtr(b, v.AddTable, "add_table") - attrBoolPtr(b, v.DropTable, "drop_table") - attrBoolPtr(b, v.ModifyTable, "modify_table") - attrBoolPtr(b, v.AddColumn, "add_column") - attrBoolPtr(b, v.DropColumn, "drop_column") - attrBoolPtr(b, v.ModifyColumn, "modify_column") - attrBoolPtr(b, v.AddIndex, "add_index") - attrBoolPtr(b, v.DropIndex, "drop_index") - attrBoolPtr(b, v.ModifyIndex, "modify_index") - attrBoolPtr(b, v.AddForeignKey, "add_foreign_key") - attrBoolPtr(b, v.DropForeignKey, "drop_foreign_key") - attrBoolPtr(b, v.ModifyForeignKey, "modify_foreign_key") - } + if v := dd.Skip; v != nil { + b := d.AppendNewBlock("skip", nil).Body() + attrBoolPtr(b, v.AddSchema, "add_schema") + attrBoolPtr(b, v.DropSchema, "drop_schema") + attrBoolPtr(b, v.ModifySchema, "modify_schema") + attrBoolPtr(b, v.AddTable, "add_table") + attrBoolPtr(b, v.DropTable, "drop_table") + attrBoolPtr(b, v.ModifyTable, "modify_table") + attrBoolPtr(b, v.AddColumn, "add_column") + attrBoolPtr(b, v.DropColumn, "drop_column") + attrBoolPtr(b, v.ModifyColumn, "modify_column") + attrBoolPtr(b, v.AddIndex, "add_index") + attrBoolPtr(b, v.DropIndex, "drop_index") + attrBoolPtr(b, v.ModifyIndex, "modify_index") + attrBoolPtr(b, v.AddForeignKey, "add_foreign_key") + attrBoolPtr(b, v.DropForeignKey, "drop_foreign_key") + attrBoolPtr(b, v.ModifyForeignKey, "modify_foreign_key") } } - return f + return blk } // DirURL returns the URL to the migration directory. @@ -229,33 +212,36 @@ func hclValue(s string) string { return strings.ReplaceAll(strings.ToUpper(s), "-", "_") } -func parseConfig(cfg string) (*hclwrite.File, error) { - f, diags := hclwrite.ParseConfig([]byte(cfg), "atlas.hcl", hcl.InitialPos) - if diags.HasErrors() { - return nil, diags +func mergeEnvBlock(dst *hclwrite.Body, blk *hclwrite.Block, name string) error { + blocks := dst.Blocks() + envBlocks := make([]*hclwrite.Block, 0, len(blocks)) + for _, b := range blocks { + if b.Type() == "env" { + envBlocks = append(envBlocks, b) + } } - return f, nil -} - -func mergeFile(dst, src *hclwrite.File) { - dstBody, srcBody := dst.Body(), src.Body() - dstBlocks := make(map[string]*hclwrite.Block) - for _, blk := range dstBody.Blocks() { - dstBlocks[address(blk)] = blk - } - for _, blk := range srcBody.Blocks() { - if dstBlk, ok := dstBlocks[address(blk)]; ok { - // Merge the blocks if they have the same address. - mergeBlock(dstBlk, blk) - } else { - appendBlock(dstBody, blk) + if len(envBlocks) == 0 { + // No env blocks found, create a new one. + mergeBlock(dst.AppendNewBlock("env", []string{name}), blk) + return nil + } + // Check if there is an env block with the given name. + env := slices.IndexFunc(envBlocks, func(b *hclwrite.Block) bool { + labels := b.Labels() + return len(labels) == 1 && labels[0] == name + }) + if env == -1 { + // No block matched, check if there is an unnamed env block. + env = slices.IndexFunc(envBlocks, func(b *hclwrite.Block) bool { + return len(b.Labels()) == 0 + }) + if env == -1 { + return fmt.Errorf(`the env block %q was not found in the give config`, name) } } -} - -func address(block *hclwrite.Block) string { - parts := append([]string{block.Type()}, block.Labels()...) - return strings.Join(parts, ".") + // Found the block to merge with. + mergeBlock(envBlocks[env], blk) + return nil } func mergeBlock(dst, src *hclwrite.Block) { diff --git a/internal/provider/builder_test.go b/internal/provider/builder_test.go index c09f931..9e66570 100644 --- a/internal/provider/builder_test.go +++ b/internal/provider/builder_test.go @@ -8,6 +8,8 @@ import ( "path/filepath" "testing" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclwrite" "github.com/stretchr/testify/require" ) @@ -18,10 +20,7 @@ func TestTemplate(t *testing.T) { data projectConfig }{ {name: "token", data: projectConfig{ - Config: baseAtlasHCL, - Cloud: &cloudConfig{ - Token: "token+%=_-", - }, + EnvName: "tf", Env: &envConfig{ URL: "mysql://user:pass@localhost:3306/tf-db", Migration: &migrationConfig{ @@ -30,12 +29,7 @@ func TestTemplate(t *testing.T) { }, }}, {name: "cloud", data: projectConfig{ - Config: baseAtlasHCL, - Cloud: &cloudConfig{ - Token: "token", - URL: ptr("url"), - Project: ptr("project"), - }, + EnvName: "tf", Env: &envConfig{ URL: "mysql://user:pass@localhost:3306/tf-db", Migration: &migrationConfig{ @@ -44,7 +38,7 @@ func TestTemplate(t *testing.T) { }, }}, {name: "local", data: projectConfig{ - Config: baseAtlasHCL, + EnvName: "tf", Env: &envConfig{ URL: "mysql://user:pass@localhost:3306/tf-db", Migration: &migrationConfig{ @@ -53,7 +47,7 @@ func TestTemplate(t *testing.T) { }, }}, {name: "local-exec-order", data: projectConfig{ - Config: baseAtlasHCL, + EnvName: "tf", Env: &envConfig{ URL: "mysql://user:pass@localhost:3306/tf-db", Migration: &migrationConfig{ @@ -63,7 +57,7 @@ func TestTemplate(t *testing.T) { }, }}, {name: "baseline", data: projectConfig{ - Config: baseAtlasHCL, + EnvName: "tf", Env: &envConfig{ URL: "mysql://user:pass@localhost:3306/tf-db", Migration: &migrationConfig{ @@ -73,10 +67,7 @@ func TestTemplate(t *testing.T) { }, }}, {name: "cloud-tag", data: projectConfig{ - Config: baseAtlasHCL, - Cloud: &cloudConfig{ - Token: "token", - }, + EnvName: "tf", Env: &envConfig{ URL: "mysql://user:pass@localhost:3306/tf-db", Migration: &migrationConfig{ @@ -107,7 +98,8 @@ func TestTemplate(t *testing.T) { func Test_SchemaTemplate(t *testing.T) { data := &projectConfig{ - Config: baseAtlasHCL, + Config: "", + EnvName: "tf", Env: &envConfig{ Source: "file://schema.hcl", URL: "mysql://user:pass@localhost:3306/tf-db", @@ -126,11 +118,10 @@ func Test_SchemaTemplate(t *testing.T) { out := &bytes.Buffer{} require.NoError(t, data.Render(out)) - require.Equal(t, `env { - name = atlas.env - dev = "mysql://user:pass@localhost:3307/tf-db" - src = "file://schema.hcl" - url = "mysql://user:pass@localhost:3306/tf-db" + require.Equal(t, `env "tf" { + dev = "mysql://user:pass@localhost:3307/tf-db" + src = "file://schema.hcl" + url = "mysql://user:pass@localhost:3306/tf-db" diff { concurrent_index { create = true @@ -140,40 +131,98 @@ func Test_SchemaTemplate(t *testing.T) { add_index = true } } -}`, out.String()) } +`, out.String()) +} + +func Test_mergeEnv(t *testing.T) { + envBlock := (&envConfig{ + URL: "sqlite://file.db", + DevURL: "sqlite://file?mode=memory", + Migration: &migrationConfig{ + DirURL: "file://migrations", + }, + }).AsBlock() -func Test_mergeFile(t *testing.T) { + // Merge with existing env block. dst, err := parseConfig(` -atlas {} +env "foo" { +} +`) + require.NoError(t, err) + require.NoError(t, mergeEnvBlock(dst.Body(), envBlock, "foo")) + require.Equal(t, ` +env "foo" { + dev = "sqlite://file?mode=memory" + url = "sqlite://file.db" + migration { + dir = "file://migrations" + } +} +`, string(dst.Bytes())) + + // Merge with non-existing env block. + dst, err = parseConfig(` +env "bar" { +} +`) + require.NoError(t, err) + require.ErrorContains(t, mergeEnvBlock(dst.Body(), envBlock, "foo"), `the env block "foo" was not found in the give config`) + + // Merge with un-named env block. + dst, err = parseConfig(` env { name = atlas.env } `) require.NoError(t, err) - - src, err := parseConfig(` -atlas { - cloud { - token = "aci_token" + require.NoError(t, mergeEnvBlock(dst.Body(), envBlock, "foo")) + require.Equal(t, ` +env { + name = atlas.env + dev = "sqlite://file?mode=memory" + url = "sqlite://file.db" + migration { + dir = "file://migrations" } } +`, string(dst.Bytes())) + + // Merge with existing env block and un-named env block. + dst, err = parseConfig(` +env "foo" { +} env { - url = "sqlite://file.db" + name = atlas.env +} +`) + require.NoError(t, err) + require.NoError(t, mergeEnvBlock(dst.Body(), envBlock, "foo")) + require.Equal(t, ` +env "foo" { dev = "sqlite://file?mode=memory" + url = "sqlite://file.db" migration { - dir = "file://migrations" - } + dir = "file://migrations" + } +} +env { + name = atlas.env +} +`, string(dst.Bytes())) + + // Merge with un-named env block. + dst, err = parseConfig(` +env "foo" { +} +env { + name = atlas.env } `) require.NoError(t, err) - mergeFile(dst, src) - + require.NoError(t, mergeEnvBlock(dst.Body(), envBlock, "bar")) require.Equal(t, ` -atlas { - cloud { - token = "aci_token" - } +env "foo" { } env { name = atlas.env @@ -199,3 +248,11 @@ func checkContent(t *testing.T, actual string, gen func(string) error) { func ptr[T any](s T) *T { return &s } + +func parseConfig(cfg string) (*hclwrite.File, error) { + f, diags := hclwrite.ParseConfig([]byte(cfg), "atlas.hcl", hcl.InitialPos) + if diags.HasErrors() { + return nil, diags + } + return f, nil +} diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 8f1d9b9..80bd195 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -62,7 +62,7 @@ type ( providerData struct { // client is the factory function to create a new AtlasExec client. // It is set during the provider configuration. - client func(wd string) (AtlasExec, error) + client func(wd string, c *CloudConfig) (AtlasExec, error) // devURL is the URL of the dev-db. devURL string // cloud is the Atlas Cloud configuration. @@ -149,19 +149,28 @@ func (p *AtlasProvider) Configure(ctx context.Context, req provider.ConfigureReq if s := model.BinaryPath.ValueString(); s != "" { binPath = s } - fnClient := func(wd string) (AtlasExec, error) { + fnClient := func(wd string, cloud *CloudConfig) (AtlasExec, error) { c, err := atlas.NewClient(wd, binPath) if err != nil { return nil, err } env := atlas.NewOSEnviron() env["ATLAS_INTEGRATION"] = fmt.Sprintf("terraform-provider-atlas/v%s", p.version) + if cloud != nil && cloud.Token != "" { + env["ATLAS_TOKEN"] = cloud.Token + } if err = c.SetEnv(env); err != nil { return nil, err } return c, nil } - c, err := fnClient("") + var cloud *CloudConfig + if model != nil && model.Cloud.Valid() { + cloud = &CloudConfig{ + Token: model.Cloud.Token.ValueString(), + } + } + c, err := fnClient("", cloud) if err != nil { resp.Diagnostics.AddError("Failed to create client", err.Error()) return @@ -178,14 +187,6 @@ func (p *AtlasProvider) Configure(ctx context.Context, req provider.ConfigureReq tflog.Debug(ctx, "found atlas-cli", map[string]any{ "version": version, }) - if model != nil && model.Cloud != nil && model.Cloud.Token.ValueString() != "" { - if err := c.Login(ctx, &atlas.LoginParams{ - Token: model.Cloud.Token.ValueString(), - }); err != nil { - resp.Diagnostics.AddError("Login failure", err.Error()) - return - } - } p.data = providerData{client: fnClient, cloud: model.Cloud, version: p.version} if model != nil { p.data.devURL = model.DevURL.ValueString() diff --git a/internal/provider/testdata/TestTemplate/baseline-cfg.hcl b/internal/provider/testdata/TestTemplate/baseline-cfg.hcl index c3d0da3..088d2eb 100644 --- a/internal/provider/testdata/TestTemplate/baseline-cfg.hcl +++ b/internal/provider/testdata/TestTemplate/baseline-cfg.hcl @@ -1,8 +1,7 @@ -env { - name = atlas.env - url = "mysql://user:pass@localhost:3306/tf-db" +env "tf" { + url = "mysql://user:pass@localhost:3306/tf-db" migration { dir = "file://migrations" baseline = "100000" } -} \ No newline at end of file +} diff --git a/internal/provider/testdata/TestTemplate/cloud-cfg.hcl b/internal/provider/testdata/TestTemplate/cloud-cfg.hcl index 2b7dcd4..3e5a391 100644 --- a/internal/provider/testdata/TestTemplate/cloud-cfg.hcl +++ b/internal/provider/testdata/TestTemplate/cloud-cfg.hcl @@ -1,14 +1,6 @@ -env { - name = atlas.env - url = "mysql://user:pass@localhost:3306/tf-db" +env "tf" { + url = "mysql://user:pass@localhost:3306/tf-db" migration { dir = "atlas://tf-dir?tag=latest" } } -atlas { - cloud { - token = "token" - project = "project" - url = "url" - } -} diff --git a/internal/provider/testdata/TestTemplate/cloud-tag-cfg.hcl b/internal/provider/testdata/TestTemplate/cloud-tag-cfg.hcl index 4d434a9..857115d 100644 --- a/internal/provider/testdata/TestTemplate/cloud-tag-cfg.hcl +++ b/internal/provider/testdata/TestTemplate/cloud-tag-cfg.hcl @@ -1,12 +1,6 @@ -env { - name = atlas.env - url = "mysql://user:pass@localhost:3306/tf-db" +env "tf" { + url = "mysql://user:pass@localhost:3306/tf-db" migration { dir = "atlas://tf-dir?tag=tag" } } -atlas { - cloud { - token = "token" - } -} diff --git a/internal/provider/testdata/TestTemplate/local-cfg.hcl b/internal/provider/testdata/TestTemplate/local-cfg.hcl index f625932..edfcc86 100644 --- a/internal/provider/testdata/TestTemplate/local-cfg.hcl +++ b/internal/provider/testdata/TestTemplate/local-cfg.hcl @@ -1,7 +1,6 @@ -env { - name = atlas.env - url = "mysql://user:pass@localhost:3306/tf-db" +env "tf" { + url = "mysql://user:pass@localhost:3306/tf-db" migration { dir = "file://migrations" } -} \ No newline at end of file +} diff --git a/internal/provider/testdata/TestTemplate/local-exec-order-cfg.hcl b/internal/provider/testdata/TestTemplate/local-exec-order-cfg.hcl index 125e475..3c11830 100644 --- a/internal/provider/testdata/TestTemplate/local-exec-order-cfg.hcl +++ b/internal/provider/testdata/TestTemplate/local-exec-order-cfg.hcl @@ -1,8 +1,7 @@ -env { - name = atlas.env - url = "mysql://user:pass@localhost:3306/tf-db" +env "tf" { + url = "mysql://user:pass@localhost:3306/tf-db" migration { dir = "file://migrations" exec_order = LINEAR_SKIP } -} \ No newline at end of file +} diff --git a/internal/provider/testdata/TestTemplate/token-cfg.hcl b/internal/provider/testdata/TestTemplate/token-cfg.hcl index fa649fc..edfcc86 100644 --- a/internal/provider/testdata/TestTemplate/token-cfg.hcl +++ b/internal/provider/testdata/TestTemplate/token-cfg.hcl @@ -1,12 +1,6 @@ -env { - name = atlas.env - url = "mysql://user:pass@localhost:3306/tf-db" +env "tf" { + url = "mysql://user:pass@localhost:3306/tf-db" migration { dir = "file://migrations" } } -atlas { - cloud { - token = "token+%=_-" - } -}