From c3c8d664e5046299fe4c906ebb7dddd70fb9922b Mon Sep 17 00:00:00 2001 From: Bohdan Siryk Date: Tue, 27 Feb 2024 16:03:19 +0200 Subject: [PATCH] zookeeper cr codebase was refactored --- .secrets.baseline | 30 +- apis/clusters/v1beta1/structs.go | 295 ------------------ apis/clusters/v1beta1/validation.go | 100 +----- apis/clusters/v1beta1/validation_test.go | 258 --------------- apis/clusters/v1beta1/zookeeper_types.go | 183 ++++++----- apis/clusters/v1beta1/zookeeper_webhook.go | 22 +- .../clusters/v1beta1/zz_generated.deepcopy.go | 143 +++------ .../clusters.instaclustr.com_zookeepers.yaml | 146 ++++++--- .../samples/clusters_v1beta1_cassandra.yaml | 2 +- .../samples/clusters_v1beta1_zookeeper.yaml | 6 +- .../awsencryptionkey_controller.go | 8 +- .../awsendpointserviceprincipal_controller.go | 8 +- ...awssecuritygroupfirewallrule_controller.go | 8 +- .../awsvpcpeering_controller.go | 10 +- .../azurevnetpeering_controller.go | 8 +- .../clusternetworkfirewallrule_controller.go | 4 +- .../gcpvpcpeering_controller.go | 8 +- controllers/clusters/cadence_controller.go | 12 +- controllers/clusters/cassandra_controller.go | 32 +- .../clusters/datatest/zookeeper_v1beta1.yaml | 6 +- controllers/clusters/helpers.go | 94 ------ controllers/clusters/kafka_controller.go | 12 +- .../clusters/kafkaconnect_controller.go | 8 +- controllers/clusters/opensearch_controller.go | 10 +- controllers/clusters/postgresql_controller.go | 12 +- controllers/clusters/redis_controller.go | 10 +- controllers/clusters/zookeeper_controller.go | 166 +++++----- .../kafkamanagement/mirror_controller.go | 6 +- pkg/instaclustr/client.go | 10 +- pkg/instaclustr/interfaces.go | 2 +- pkg/instaclustr/mock/client.go | 2 +- pkg/models/redis_apiv2.go | 4 +- pkg/models/zookeeper_apiv2.go | 22 +- pkg/scheduler/scheduler.go | 2 +- 34 files changed, 469 insertions(+), 1180 deletions(-) diff --git a/.secrets.baseline b/.secrets.baseline index 8c2f2d68e..17b680fc9 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -75,10 +75,6 @@ { "path": "detect_secrets.filters.allowlist.is_line_allowlisted" }, - { - "path": "detect_secrets.filters.common.is_baseline_file", - "filename": ".secrets.baseline" - }, { "path": "detect_secrets.filters.common.is_ignored_due_to_verification_policies", "min_level": 2 @@ -208,7 +204,7 @@ "filename": "apis/clusters/v1beta1/cassandra_webhook.go", "hashed_secret": "e0a46b27231f798fe22dc4d5d82b5feeb5dcf085", "is_verified": false, - "line_number": 235 + "line_number": 236 } ], "apis/clusters/v1beta1/kafka_types.go": [ @@ -351,7 +347,7 @@ "filename": "apis/clusters/v1beta1/redis_webhook.go", "hashed_secret": "bc1c5ae5fd4a238d86261f422e62c489de408c22", "is_verified": false, - "line_number": 322 + "line_number": 323 } ], "apis/clusters/v1beta1/zz_generated.deepcopy.go": [ @@ -360,7 +356,7 @@ "filename": "apis/clusters/v1beta1/zz_generated.deepcopy.go", "hashed_secret": "44e17306b837162269a410204daaa5ecee4ec22c", "is_verified": false, - "line_number": 1316 + "line_number": 665 } ], "apis/kafkamanagement/v1beta1/kafkauser_types.go": [ @@ -529,6 +525,22 @@ "line_number": 51 } ], + "controllers/clusters/helpers.go": [ + { + "type": "Secret Keyword", + "filename": "controllers/clusters/helpers.go", + "hashed_secret": "5ffe533b830f08a0326348a9160afafc8ada44db", + "is_verified": false, + "line_number": 119 + }, + { + "type": "Secret Keyword", + "filename": "controllers/clusters/helpers.go", + "hashed_secret": "5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8", + "is_verified": false, + "line_number": 124 + } + ], "controllers/clusters/kafkaconnect_controller_test.go": [ { "type": "Secret Keyword", @@ -709,7 +721,7 @@ "filename": "pkg/instaclustr/client.go", "hashed_secret": "5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8", "is_verified": false, - "line_number": 2078 + "line_number": 2084 } ], "pkg/instaclustr/mock/client.go": [ @@ -1116,5 +1128,5 @@ } ] }, - "generated_at": "2024-02-28T09:07:57Z" + "generated_at": "2024-02-28T12:21:11Z" } diff --git a/apis/clusters/v1beta1/structs.go b/apis/clusters/v1beta1/structs.go index 164758108..ab4e0797d 100644 --- a/apis/clusters/v1beta1/structs.go +++ b/apis/clusters/v1beta1/structs.go @@ -32,18 +32,6 @@ type CloudProviderSettings struct { DisableSnapshotAutoExpiry bool `json:"disableSnapshotAutoExpiry,omitempty"` } -type DataCentre struct { - Name string `json:"name,omitempty"` - Region string `json:"region"` - CloudProvider string `json:"cloudProvider"` - ProviderAccountName string `json:"accountName,omitempty"` - CloudProviderSettings []*CloudProviderSettings `json:"cloudProviderSettings,omitempty"` - Network string `json:"network"` - NodeSize string `json:"nodeSize"` - NodesNumber int `json:"nodesNumber"` - Tags map[string]string `json:"tags,omitempty"` -} - type DataCentreStatus struct { Name string `json:"name,omitempty"` ID string `json:"id,omitempty"` @@ -66,41 +54,12 @@ type RestoreCustomVPCSettings struct { Network string `json:"network"` } -type Options struct { - DataNodeSize string `json:"dataNodeSize,omitempty"` - MasterNodeSize string `json:"masterNodeSize,omitempty"` - OpenSearchDashboardsNodeSize string `json:"openSearchDashboardsNodeSize,omitempty"` -} - -type Cluster struct { - // Name [ 3 .. 32 ] characters. - Name string `json:"name,omitempty"` - - Version string `json:"version,omitempty"` - - // The PCI compliance standards relate to the security of user data and transactional information. - // Can only be applied clusters provisioned on AWS_VPC, running Cassandra, Kafka, Elasticsearch and Redis. - PCICompliance bool `json:"pciCompliance,omitempty"` - - PrivateNetworkCluster bool `json:"privateNetworkCluster,omitempty"` - - // Non-production clusters may receive lower priority support and reduced SLAs. - // Production tier is not available when using Developer class nodes. See SLA Tier for more information. - // Enum: "PRODUCTION" "NON_PRODUCTION". - SLATier string `json:"slaTier,omitempty"` - - TwoFactorDelete []*TwoFactorDelete `json:"twoFactorDelete,omitempty"` - - Description string `json:"description,omitempty"` -} - type ClusterStatus struct { ID string `json:"id,omitempty"` State string `json:"state,omitempty"` DataCentres []*DataCentreStatus `json:"dataCentres,omitempty"` CDCID string `json:"cdcid,omitempty"` TwoFactorDeleteEnabled bool `json:"twoFactorDeleteEnabled,omitempty"` - Options *Options `json:"options,omitempty"` CurrentClusterOperationStatus string `json:"currentClusterOperationStatus,omitempty"` MaintenanceEvents []*clusterresource.ClusteredMaintenanceEventStatus `json:"maintenanceEvents,omitempty"` NodeCount string `json:"nodeCount,omitempty"` @@ -223,10 +182,6 @@ func privateLinksToInstAPI(p []*PrivateLink) []*models.PrivateLink { return links } -type PrivateLinkV1 struct { - IAMPrincipalARNs []string `json:"iamPrincipalARNs"` -} - type immutableCluster struct { Name string Version string @@ -295,30 +250,6 @@ type ReplaceOperation struct { Status string `json:"status,omitempty"` } -func (c *Cluster) IsEqual(cluster Cluster) bool { - return c.Name == cluster.Name && - c.Version == cluster.Version && - c.PCICompliance == cluster.PCICompliance && - c.PrivateNetworkCluster == cluster.PrivateNetworkCluster && - c.SLATier == cluster.SLATier && - c.Description == cluster.Description && - c.IsTwoFactorDeleteEqual(cluster.TwoFactorDelete) -} - -func (c *Cluster) IsTwoFactorDeleteEqual(tfds []*TwoFactorDelete) bool { - if len(c.TwoFactorDelete) != len(tfds) { - return false - } - - for i, tfd := range tfds { - if *tfd != *c.TwoFactorDelete[i] { - return false - } - } - - return true -} - func (tfd *TwoFactorDelete) ToInstAPI() *models.TwoFactorDelete { return &models.TwoFactorDelete{ ConfirmationPhoneNumber: tfd.Phone, @@ -326,81 +257,6 @@ func (tfd *TwoFactorDelete) ToInstAPI() *models.TwoFactorDelete { } } -func (c *Cluster) TwoFactorDeletesToInstAPI() (TFDs []*models.TwoFactorDelete) { - for _, tfd := range c.TwoFactorDelete { - TFDs = append(TFDs, tfd.ToInstAPI()) - } - return -} - -func (c *Cluster) ClusterSettingsUpdateToInstAPI() *models.ClusterSettings { - settingsToAPI := &models.ClusterSettings{} - if c.TwoFactorDelete != nil { - iTFD := &models.TwoFactorDelete{} - for _, tfd := range c.TwoFactorDelete { - iTFD = tfd.ToInstAPI() - } - settingsToAPI.TwoFactorDelete = iTFD - } - settingsToAPI.Description = c.Description - - return settingsToAPI -} - -func (c *Cluster) TwoFactorDeleteToInstAPIv1() *models.TwoFactorDeleteV1 { - if len(c.TwoFactorDelete) == 0 { - return nil - } - - return &models.TwoFactorDeleteV1{ - DeleteVerifyEmail: c.TwoFactorDelete[0].Email, - DeleteVerifyPhone: c.TwoFactorDelete[0].Phone, - } -} - -func (dc *DataCentre) ToInstAPI() models.DataCentre { - providerSettings := dc.CloudProviderSettingsToInstAPI() - return models.DataCentre{ - Name: dc.Name, - Network: dc.Network, - NodeSize: dc.NodeSize, - NumberOfNodes: dc.NodesNumber, - AWSSettings: providerSettings.AWSSettings, - GCPSettings: providerSettings.GCPSettings, - AzureSettings: providerSettings.AzureSettings, - Tags: dc.TagsToInstAPI(), - CloudProvider: dc.CloudProvider, - Region: dc.Region, - ProviderAccountName: dc.ProviderAccountName, - } -} - -func (dc *DataCentre) CloudProviderSettingsToInstAPI() *models.CloudProviderSettings { - iSettings := &models.CloudProviderSettings{} - switch dc.CloudProvider { - case models.AWSVPC: - awsSettings := []*models.AWSSetting{} - for _, providerSettings := range dc.CloudProviderSettings { - awsSettings = append(awsSettings, providerSettings.AWSToInstAPI()) - } - iSettings.AWSSettings = awsSettings - case models.AZUREAZ: - azureSettings := []*models.AzureSetting{} - for _, providerSettings := range dc.CloudProviderSettings { - azureSettings = append(azureSettings, providerSettings.AzureToInstAPI()) - } - iSettings.AzureSettings = azureSettings - case models.GCP: - gcpSettings := []*models.GCPSetting{} - for _, providerSettings := range dc.CloudProviderSettings { - gcpSettings = append(gcpSettings, providerSettings.GCPToInstAPI()) - } - iSettings.GCPSettings = gcpSettings - } - - return iSettings -} - func (cps *CloudProviderSettings) AWSToInstAPI() *models.AWSSetting { return &models.AWSSetting{ EBSEncryptionKey: cps.DiskEncryptionKey, @@ -422,62 +278,6 @@ func (cps *CloudProviderSettings) GCPToInstAPI() *models.GCPSetting { } } -func (dc *DataCentre) TagsToInstAPI() (tags []*models.Tag) { - for key, value := range dc.Tags { - tags = append(tags, &models.Tag{ - Key: key, - Value: value, - }) - } - - return -} - -func (dc *DataCentre) IsEqual(iDC DataCentre) bool { - return iDC.Region == dc.Region && - iDC.CloudProvider == dc.CloudProvider && - iDC.ProviderAccountName == dc.ProviderAccountName && - dc.AreCloudProviderSettingsEqual(iDC.CloudProviderSettings) && - iDC.Network == dc.Network && - iDC.NodeSize == dc.NodeSize && - iDC.NodesNumber == dc.NodesNumber && - dc.AreTagsEqual(iDC.Tags) -} - -func (dc *DataCentre) AreCloudProviderSettingsEqual(settings []*CloudProviderSettings) bool { - if len(dc.CloudProviderSettings) != len(settings) { - return false - } - - for i, setting := range settings { - if *dc.CloudProviderSettings[i] != *setting { - return false - } - } - - return true -} - -func (dc *DataCentre) AreTagsEqual(tags map[string]string) bool { - if len(dc.Tags) != len(tags) { - return false - } - - for key, val := range tags { - if value, exists := dc.Tags[key]; !exists || value != val { - return false - } - } - - return true -} - -func (dc *DataCentre) SetDefaultValues() { - if dc.ProviderAccountName == "" { - dc.ProviderAccountName = models.DefaultAccountName - } -} - func (cs *ClusterStatus) AreMaintenanceEventStatusesEqual( iEventStatuses []*clusterresource.ClusteredMaintenanceEventStatus, ) bool { @@ -552,101 +352,6 @@ func (cs *ClusterStatus) DCFromInstAPI(iDC models.DataCentre) *DataCentreStatus } } -func (c *Cluster) TwoFactorDeleteFromInstAPI(iTFDs []*models.TwoFactorDelete) (tfd []*TwoFactorDelete) { - for _, iTFD := range iTFDs { - tfd = append(tfd, &TwoFactorDelete{ - Email: iTFD.ConfirmationEmail, - Phone: iTFD.ConfirmationPhoneNumber, - }) - } - return -} - -func (c *Cluster) DCFromInstAPI(iDC models.DataCentre) DataCentre { - return DataCentre{ - Name: iDC.Name, - Region: iDC.Region, - CloudProvider: iDC.CloudProvider, - ProviderAccountName: iDC.ProviderAccountName, - CloudProviderSettings: c.CloudProviderSettingsFromInstAPI(iDC), - Network: iDC.Network, - NodeSize: iDC.NodeSize, - NodesNumber: iDC.NumberOfNodes, - Tags: c.TagsFromInstAPI(iDC.Tags), - } -} - -func (c *Cluster) TagsFromInstAPI(iTags []*models.Tag) map[string]string { - newTags := map[string]string{} - for _, iTag := range iTags { - newTags[iTag.Key] = iTag.Value - } - return newTags -} - -func (c *Cluster) ClusterSettingsNeedUpdate(iCluster Cluster) bool { - return len(c.TwoFactorDelete) != 0 && len(iCluster.TwoFactorDelete) == 0 || - c.Description != iCluster.Description -} - -func (c *Cluster) CloudProviderSettingsFromInstAPI(iDC models.DataCentre) (settings []*CloudProviderSettings) { - if isCloudProviderSettingsEmpty(iDC) { - return nil - } - - switch iDC.CloudProvider { - case models.AWSVPC: - for _, awsSetting := range iDC.AWSSettings { - settings = append(settings, &CloudProviderSettings{ - CustomVirtualNetworkID: awsSetting.CustomVirtualNetworkID, - DiskEncryptionKey: awsSetting.EBSEncryptionKey, - BackupBucket: awsSetting.BackupBucket, - }) - } - case models.GCP: - for _, gcpSetting := range iDC.GCPSettings { - settings = append(settings, &CloudProviderSettings{ - CustomVirtualNetworkID: gcpSetting.CustomVirtualNetworkID, - DisableSnapshotAutoExpiry: gcpSetting.DisableSnapshotAutoExpiry, - }) - } - case models.AZUREAZ: - for _, azureSetting := range iDC.AzureSettings { - settings = append(settings, &CloudProviderSettings{ - ResourceGroup: azureSetting.ResourceGroup, - }) - } - } - return -} - -func isCloudProviderSettingsEmpty(iDC models.DataCentre) bool { - var empty bool - - for i := range iDC.AWSSettings { - empty = *iDC.AWSSettings[i] == models.AWSSetting{} - if !empty { - return false - } - } - - for i := range iDC.AzureSettings { - empty = *iDC.AzureSettings[i] == models.AzureSetting{} - if !empty { - return false - } - } - - for i := range iDC.GCPSettings { - empty = *iDC.GCPSettings[i] == models.GCPSetting{} - if !empty { - return false - } - } - - return true -} - func (cs *ClusterStatus) NodesFromInstAPI(iNodes []*models.Node) (nodes []*Node) { for _, iNode := range iNodes { nodes = append(nodes, &Node{ diff --git a/apis/clusters/v1beta1/validation.go b/apis/clusters/v1beta1/validation.go index f27248f82..91ed82011 100644 --- a/apis/clusters/v1beta1/validation.go +++ b/apis/clusters/v1beta1/validation.go @@ -32,77 +32,6 @@ import ( "github.com/instaclustr/operator/pkg/validation" ) -func (c *Cluster) ValidateCreation() error { - clusterNameMatched, err := regexp.Match(models.ClusterNameRegExp, []byte(c.Name)) - if !clusterNameMatched || err != nil { - return fmt.Errorf("cluster name should have lenght from 3 to 32 symbols and fit pattern: %s", - models.ClusterNameRegExp) - } - - if len(c.TwoFactorDelete) > 1 { - return fmt.Errorf("two factor delete should not have more than 1 item") - } - - if !validation.Contains(c.SLATier, models.SLATiers) { - return fmt.Errorf("cluster SLATier %s is unavailable, available values: %v", - c.SLATier, models.SLATiers) - } - - return nil -} - -func (dc *DataCentre) ValidateCreation() error { - if !validation.Contains(dc.CloudProvider, models.CloudProviders) { - return fmt.Errorf("cloud provider %s is unavailable for data centre: %s, available values: %v", - dc.CloudProvider, dc.Name, models.CloudProviders) - } - - switch dc.CloudProvider { - case models.AWSVPC: - if !validation.Contains(dc.Region, models.AWSRegions) { - return fmt.Errorf("AWS Region: %s is unavailable, available regions: %v", - dc.Region, models.AWSRegions) - } - case models.AZUREAZ: - if !validation.Contains(dc.Region, models.AzureRegions) { - return fmt.Errorf("azure Region: %s is unavailable, available regions: %v", - dc.Region, models.AzureRegions) - } - case models.GCP: - if !validation.Contains(dc.Region, models.GCPRegions) { - return fmt.Errorf("GCP Region: %s is unavailable, available regions: %v", - dc.Region, models.GCPRegions) - } - case models.ONPREMISES: - if dc.Region != models.CLIENTDC { - return fmt.Errorf("ONPREMISES Region: %s is unavailable, available regions: %v", - dc.Region, models.CLIENTDC) - } - } - - if dc.ProviderAccountName == models.DefaultAccountName && len(dc.CloudProviderSettings) != 0 { - return fmt.Errorf("cloud provider settings can be used only with RIYOA accounts") - } - - if len(dc.CloudProviderSettings) > 1 { - return fmt.Errorf("cloud provider settings should not have more than 1 item") - } - - for _, cp := range dc.CloudProviderSettings { - err := cp.ValidateCreation() - if err != nil { - return err - } - } - - networkMatched, err := regexp.Match(models.PeerSubnetsRegExp, []byte(dc.Network)) - if !networkMatched || err != nil { - return fmt.Errorf("the provided CIDR: %s must contain four dot separated parts and form the Private IP address. All bits in the host part of the CIDR must be 0. Suffix must be between 16-28. %v", dc.Network, err) - } - - return nil -} - func (ops *OnPremisesSpec) ValidateCreation() error { if ops.StorageClassName == "" || ops.DataDiskSize == "" || ops.OSDiskSize == "" || ops.NodeCPU == 0 || ops.NodeMemory == "" || ops.OSImageURL == "" || ops.CloudInitScriptRef == nil { @@ -143,20 +72,6 @@ func (ops *OnPremisesSpec) ValidateSSHGatewayCreation() error { return nil } -func (dc *DataCentre) validateImmutableCloudProviderSettingsUpdate(oldSettings []*CloudProviderSettings) error { - if len(oldSettings) != len(dc.CloudProviderSettings) { - return models.ErrImmutableCloudProviderSettings - } - - for i, newProviderSettings := range dc.CloudProviderSettings { - if *newProviderSettings != *oldSettings[i] { - return models.ErrImmutableCloudProviderSettings - } - } - - return nil -} - func (cps *CloudProviderSettings) ValidateCreation() error { if (cps.ResourceGroup != "" && cps.DiskEncryptionKey != "") || (cps.ResourceGroup != "" && cps.CustomVirtualNetworkID != "") { @@ -279,20 +194,7 @@ func validateSingleConcurrentResize(concurrentResizes int) error { return nil } -func (dc *DataCentre) ValidateOnPremisesCreation() error { - if dc.CloudProvider != models.ONPREMISES { - return fmt.Errorf("cloud provider %s is unavailable for data centre: %s, available value: %s", - dc.CloudProvider, dc.Name, models.ONPREMISES) - } - - if dc.Region != models.CLIENTDC { - return fmt.Errorf("region %s is unavailable for data centre: %s, available value: %s", - dc.Region, dc.Name, models.CLIENTDC) - } - - return nil -} - +//nolint:unused func ContainsKubeVirtAddon(ctx context.Context, client client.Client) (bool, error) { namespaces := &k8scorev1.NamespaceList{} err := client.List(ctx, namespaces) diff --git a/apis/clusters/v1beta1/validation_test.go b/apis/clusters/v1beta1/validation_test.go index fe5c64e80..a912a1941 100644 --- a/apis/clusters/v1beta1/validation_test.go +++ b/apis/clusters/v1beta1/validation_test.go @@ -6,194 +6,6 @@ import ( "github.com/instaclustr/operator/pkg/models" ) -func TestCluster_ValidateCreation(t *testing.T) { - type fields struct { - Name string - Version string - PCICompliance bool - PrivateNetworkCluster bool - SLATier string - TwoFactorDelete []*TwoFactorDelete - Description string - } - tests := []struct { - name string - fields fields - wantErr bool - }{ - { - name: "empty cluster name", - fields: fields{ - Name: "", - }, - wantErr: true, - }, - { - name: "more than one two factor delete", - fields: fields{ - Name: "test", - TwoFactorDelete: []*TwoFactorDelete{ - { - Email: "test@mail.com", - Phone: "12345", - }, { - Email: "test@mail.com", - Phone: "12345", - }, - }, - }, - wantErr: true, - }, - { - name: "unsupported SLAtier", - fields: fields{ - Name: "test", - SLATier: "test", - }, - wantErr: true, - }, - { - name: "valid cluster", - fields: fields{ - Name: "test", - SLATier: "NON_PRODUCTION", - }, - wantErr: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - c := &Cluster{ - Name: tt.fields.Name, - Version: tt.fields.Version, - PCICompliance: tt.fields.PCICompliance, - PrivateNetworkCluster: tt.fields.PrivateNetworkCluster, - SLATier: tt.fields.SLATier, - TwoFactorDelete: tt.fields.TwoFactorDelete, - Description: tt.fields.Description, - } - if err := c.ValidateCreation(); (err != nil) != tt.wantErr { - t.Errorf("ValidateCreation() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func TestDataCentre_ValidateCreation(t *testing.T) { - type fields struct { - Name string - Region string - CloudProvider string - ProviderAccountName string - CloudProviderSettings []*CloudProviderSettings - Network string - NodeSize string - NodesNumber int - Tags map[string]string - } - tests := []struct { - name string - fields fields - wantErr bool - }{ - { - name: "unavailable cloud provider", - fields: fields{ - CloudProvider: "some unavailable cloud provider", - }, - wantErr: true, - }, - { - name: "unavailable region for AWS cloud provider", - fields: fields{ - Region: models.AzureRegions[0], - CloudProvider: models.AWSVPC, - }, - wantErr: true, - }, - { - name: "unavailable region for Azure cloud provider", - fields: fields{ - Region: models.AWSRegions[0], - CloudProvider: models.AZUREAZ, - }, - wantErr: true, - }, - { - name: "unavailable region for GCP cloud provider", - fields: fields{ - Region: models.AzureRegions[0], - CloudProvider: models.GCP, - }, - wantErr: true, - }, - { - name: "unavailable region for ONPREMISES cloud provider", - fields: fields{ - Region: models.AzureRegions[0], - CloudProvider: models.ONPREMISES, - }, - wantErr: true, - }, - { - name: "cloud provider settings on not RIYOA account", - fields: fields{ - Region: models.AWSRegions[0], - CloudProvider: models.AWSVPC, - ProviderAccountName: models.DefaultAccountName, - CloudProviderSettings: []*CloudProviderSettings{{}}, - }, - wantErr: true, - }, - { - name: "more than one cloud provider settings", - fields: fields{ - Region: models.AWSRegions[0], - CloudProvider: models.AWSVPC, - ProviderAccountName: "custom", - CloudProviderSettings: []*CloudProviderSettings{{}, {}}, - }, - wantErr: true, - }, - { - name: "invalid network", - fields: fields{ - Region: models.AWSRegions[0], - CloudProvider: models.AWSVPC, - Network: "test", - }, - wantErr: true, - }, - { - name: "valid DC", - fields: fields{ - Region: models.AWSRegions[0], - CloudProvider: models.AWSVPC, - Network: "172.16.0.0/19", - }, - wantErr: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - dc := &DataCentre{ - Name: tt.fields.Name, - Region: tt.fields.Region, - CloudProvider: tt.fields.CloudProvider, - ProviderAccountName: tt.fields.ProviderAccountName, - CloudProviderSettings: tt.fields.CloudProviderSettings, - Network: tt.fields.Network, - NodeSize: tt.fields.NodeSize, - NodesNumber: tt.fields.NodesNumber, - Tags: tt.fields.Tags, - } - if err := dc.ValidateCreation(); (err != nil) != tt.wantErr { - t.Errorf("ValidateCreation() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - func TestCloudProviderSettings_ValidateCreation(t *testing.T) { type fields struct { CustomVirtualNetworkID string @@ -941,76 +753,6 @@ func TestGenericDataCentreSpec_hasCloudProviderSettings(t *testing.T) { } } -func TestDataCentre_validateImmutableCloudProviderSettingsUpdate(t *testing.T) { - type fields struct { - Name string - Region string - CloudProvider string - ProviderAccountName string - CloudProviderSettings []*CloudProviderSettings - Network string - NodeSize string - NodesNumber int - Tags map[string]string - } - type args struct { - oldSettings []*CloudProviderSettings - } - tests := []struct { - name string - fields fields - args args - wantErr bool - }{ - { - name: "different len of the CloudProviderSettings", - fields: fields{ - CloudProviderSettings: []*CloudProviderSettings{{}, {}}, - }, - args: args{oldSettings: []*CloudProviderSettings{{}}}, - wantErr: true, - }, - { - name: "different CloudProviderSettings", - fields: fields{CloudProviderSettings: []*CloudProviderSettings{{ - CustomVirtualNetworkID: "new", - }}}, - args: args{oldSettings: []*CloudProviderSettings{{ - CustomVirtualNetworkID: "test", - }}}, - wantErr: true, - }, - { - name: "unchanged CloudProviderSettings", - fields: fields{CloudProviderSettings: []*CloudProviderSettings{{ - CustomVirtualNetworkID: "test", - }}}, - args: args{oldSettings: []*CloudProviderSettings{{ - CustomVirtualNetworkID: "test", - }}}, - wantErr: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - dc := &DataCentre{ - Name: tt.fields.Name, - Region: tt.fields.Region, - CloudProvider: tt.fields.CloudProvider, - ProviderAccountName: tt.fields.ProviderAccountName, - CloudProviderSettings: tt.fields.CloudProviderSettings, - Network: tt.fields.Network, - NodeSize: tt.fields.NodeSize, - NodesNumber: tt.fields.NodesNumber, - Tags: tt.fields.Tags, - } - if err := dc.validateImmutableCloudProviderSettingsUpdate(tt.args.oldSettings); (err != nil) != tt.wantErr { - t.Errorf("validateImmutableCloudProviderSettingsUpdate() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - func TestGenericClusterSpec_ValidateCreation(t *testing.T) { type fields struct { Name string diff --git a/apis/clusters/v1beta1/zookeeper_types.go b/apis/clusters/v1beta1/zookeeper_types.go index b5b7d1a9c..a3c6b5735 100644 --- a/apis/clusters/v1beta1/zookeeper_types.go +++ b/apis/clusters/v1beta1/zookeeper_types.go @@ -17,32 +17,41 @@ limitations under the License. package v1beta1 import ( - "encoding/json" "fmt" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/instaclustr/operator/pkg/models" + "github.com/instaclustr/operator/pkg/utils/slices" ) type ZookeeperDataCentre struct { - DataCentre `json:",inline"` + GenericDataCentreSpec `json:",inline"` + + NumberOfNodes int `json:"numberOfNodes"` + NodeSize string `json:"nodeSize"` ClientToServerEncryption bool `json:"clientToServerEncryption"` - EnforceAuthSchemes []string `json:"enforceAuthSchemes,omitempty"` EnforceAuthEnabled bool `json:"enforceAuthEnabled,omitempty"` + EnforceAuthSchemes []string `json:"enforceAuthSchemes,omitempty"` } // ZookeeperSpec defines the desired state of Zookeeper type ZookeeperSpec struct { - Cluster `json:",inline"` - DataCentres []*ZookeeperDataCentre `json:"dataCentres"` + GenericClusterSpec `json:",inline"` + DataCentres []*ZookeeperDataCentre `json:"dataCentres"` } // ZookeeperStatus defines the observed state of Zookeeper type ZookeeperStatus struct { - ClusterStatus `json:",inline"` - DefaultUserSecretRef *Reference `json:"defaultUserSecretRef,omitempty"` + GenericStatus `json:",inline"` + DataCentres []*ZookeeperDataCentreStatus `json:"dataCentres,omitempty"` + DefaultUserSecretRef *Reference `json:"defaultUserSecretRef,omitempty"` +} + +type ZookeeperDataCentreStatus struct { + GenericDataCentreStatus `json:",inline"` + Nodes []*Node `json:"nodes"` } //+kubebuilder:object:root=true @@ -85,19 +94,9 @@ func (z *Zookeeper) NewPatch() client.Patch { return client.MergeFrom(old) } -func (z *Zookeeper) FromInstAPI(iData []byte) (*Zookeeper, error) { - iZook := &models.ZookeeperCluster{} - err := json.Unmarshal(iData, iZook) - if err != nil { - return nil, err - } - - return &Zookeeper{ - TypeMeta: z.TypeMeta, - ObjectMeta: z.ObjectMeta, - Spec: z.Spec.FromInstAPI(iZook), - Status: z.Status.FromInstAPI(iZook), - }, nil +func (z *Zookeeper) FromInstAPI(instaModel *models.ZookeeperCluster) { + z.Spec.FromInstAPI(instaModel) + z.Status.FromInstAPI(instaModel) } func (z *Zookeeper) GetDataCentreID(cdcName string) string { @@ -116,31 +115,15 @@ func (z *Zookeeper) GetClusterID() string { return z.Status.ID } -func (zs *ZookeeperSpec) FromInstAPI(iZook *models.ZookeeperCluster) ZookeeperSpec { - return ZookeeperSpec{ - Cluster: Cluster{ - Name: iZook.Name, - Version: iZook.ZookeeperVersion, - Description: iZook.Description, - PrivateNetworkCluster: iZook.PrivateNetworkCluster, - SLATier: iZook.SLATier, - TwoFactorDelete: zs.Cluster.TwoFactorDeleteFromInstAPI(iZook.TwoFactorDelete), - }, - DataCentres: zs.DCsFromInstAPI(iZook.DataCentres), - } +func (zs *ZookeeperSpec) FromInstAPI(instaModel *models.ZookeeperCluster) { + zs.GenericClusterSpec.FromInstAPI(&instaModel.GenericClusterFields, instaModel.ZookeeperVersion) + zs.DCsFromInstAPI(instaModel.DataCentres) } -func (zs *ZookeeperStatus) FromInstAPI(iZook *models.ZookeeperCluster) ZookeeperStatus { - return ZookeeperStatus{ - ClusterStatus: ClusterStatus{ - ID: iZook.ID, - State: iZook.Status, - DataCentres: zs.DCsFromInstAPI(iZook.DataCentres), - CurrentClusterOperationStatus: iZook.CurrentClusterOperationStatus, - MaintenanceEvents: zs.MaintenanceEvents, - NodeCount: zs.GetNodeCount(iZook.DataCentres), - }, - } +func (zs *ZookeeperStatus) FromInstAPI(instaModel *models.ZookeeperCluster) { + zs.GenericStatus.FromInstAPI(&instaModel.GenericClusterFields) + zs.DCsFromInstAPI(instaModel.DataCentres) + zs.NodeCount = zs.GetNodeCount(instaModel.DataCentres) } func (zs *ZookeeperStatus) GetNodeCount(dcs []*models.ZookeeperDataCentre) string { @@ -156,34 +139,31 @@ func (zs *ZookeeperStatus) GetNodeCount(dcs []*models.ZookeeperDataCentre) strin return fmt.Sprintf("%v/%v", running, total) } -func (zs *ZookeeperSpec) DCsFromInstAPI(iDCs []*models.ZookeeperDataCentre) (dcs []*ZookeeperDataCentre) { - for _, iDC := range iDCs { - dcs = append(dcs, &ZookeeperDataCentre{ - DataCentre: zs.Cluster.DCFromInstAPI(iDC.DataCentre), - ClientToServerEncryption: iDC.ClientToServerEncryption, - EnforceAuthSchemes: iDC.EnforceAuthSchemes, - EnforceAuthEnabled: iDC.EnforceAuthEnabled, - }) +func (zs *ZookeeperSpec) DCsFromInstAPI(instaModels []*models.ZookeeperDataCentre) { + dcs := make([]*ZookeeperDataCentre, len(instaModels)) + for i, instaModel := range instaModels { + dc := &ZookeeperDataCentre{} + dc.FromInstAPI(instaModel) + dcs[i] = dc } - return + zs.DataCentres = dcs } -func (zs *ZookeeperStatus) DCsFromInstAPI(iDCs []*models.ZookeeperDataCentre) (dcs []*DataCentreStatus) { - for _, iDC := range iDCs { - dcs = append(dcs, zs.ClusterStatus.DCFromInstAPI(iDC.DataCentre)) +func (zs *ZookeeperStatus) DCsFromInstAPI(instaModels []*models.ZookeeperDataCentre) { + dcs := make([]*ZookeeperDataCentreStatus, len(instaModels)) + for i, instaModel := range instaModels { + dc := &ZookeeperDataCentreStatus{} + dc.FromInstAPI(instaModel) + dcs[i] = dc } - return + zs.DataCentres = dcs } func (zs *ZookeeperSpec) ToInstAPI() *models.ZookeeperCluster { return &models.ZookeeperCluster{ - Name: zs.Name, - ZookeeperVersion: zs.Version, - PrivateNetworkCluster: zs.PrivateNetworkCluster, - SLATier: zs.SLATier, - TwoFactorDelete: zs.Cluster.TwoFactorDeletesToInstAPI(), - DataCentres: zs.DCsToInstAPI(), - Description: zs.Description, + GenericClusterFields: zs.GenericClusterSpec.ToInstAPI(), + ZookeeperVersion: zs.Version, + DataCentres: zs.DCsToInstAPI(), } } @@ -196,10 +176,12 @@ func (zs *ZookeeperSpec) DCsToInstAPI() (dcs []*models.ZookeeperDataCentre) { func (zdc *ZookeeperDataCentre) ToInstAPI() *models.ZookeeperDataCentre { return &models.ZookeeperDataCentre{ - DataCentre: zdc.DataCentre.ToInstAPI(), + GenericDataCentreFields: zdc.GenericDataCentreSpec.ToInstAPI(), ClientToServerEncryption: zdc.ClientToServerEncryption, EnforceAuthSchemes: zdc.EnforceAuthSchemes, EnforceAuthEnabled: zdc.EnforceAuthEnabled, + NumberOfNodes: zdc.NumberOfNodes, + NodeSize: zdc.NodeSize, } } @@ -208,26 +190,79 @@ func (z *Zookeeper) GetSpec() ZookeeperSpec { return z.Spec } func (z *Zookeeper) IsSpecEqual(spec ZookeeperSpec) bool { return z.Spec.IsEqual(spec) } func (a *ZookeeperSpec) IsEqual(b ZookeeperSpec) bool { - return a.Cluster.IsEqual(b.Cluster) && - a.areDCsEqual(b.DataCentres) + return a.GenericClusterSpec.Equals(&b.GenericClusterSpec) && + a.DCsEqual(b.DataCentres) } -func (rs *ZookeeperSpec) areDCsEqual(b []*ZookeeperDataCentre) bool { - a := rs.DataCentres - if len(a) != len(b) { +func (rs *ZookeeperSpec) DCsEqual(o []*ZookeeperDataCentre) bool { + if len(rs.DataCentres) != len(o) { return false } - for i := range b { - if a[i].Name != b[i].Name { - continue + m := map[string]*ZookeeperDataCentre{} + for _, dc := range rs.DataCentres { + m[dc.Name] = dc + } + + for _, iDC := range o { + dc, ok := m[iDC.Name] + if !ok || !dc.Equals(iDC) { + return false } + } + + return true +} - if !a[i].DataCentre.IsEqual(b[i].DataCentre) || - a[i].ClientToServerEncryption != b[i].ClientToServerEncryption { +func (zdc *ZookeeperDataCentre) FromInstAPI(instaModel *models.ZookeeperDataCentre) { + zdc.GenericDataCentreSpec.FromInstAPI(&instaModel.GenericDataCentreFields) + zdc.NodeSize = instaModel.NodeSize + zdc.NumberOfNodes = instaModel.NumberOfNodes + zdc.ClientToServerEncryption = instaModel.ClientToServerEncryption + zdc.EnforceAuthEnabled = instaModel.EnforceAuthEnabled + zdc.EnforceAuthSchemes = instaModel.EnforceAuthSchemes +} + +func (s *ZookeeperDataCentreStatus) FromInstAPI(instaModel *models.ZookeeperDataCentre) { + s.GenericDataCentreStatus.FromInstAPI(&instaModel.GenericDataCentreFields) + s.Nodes = nodesFromInstAPI(instaModel.Nodes) +} + +func (zdc *ZookeeperDataCentre) Equals(o *ZookeeperDataCentre) bool { + return zdc.GenericDataCentreSpec.Equals(&o.GenericDataCentreSpec) && + zdc.NumberOfNodes == o.NumberOfNodes && + zdc.NodeSize == o.NodeSize && + zdc.ClientToServerEncryption == o.ClientToServerEncryption && + zdc.EnforceAuthEnabled == o.EnforceAuthEnabled && + slices.Equals(zdc.EnforceAuthSchemes, o.EnforceAuthSchemes) +} + +func (zs *ZookeeperStatus) Equals(o *ZookeeperStatus) bool { + return zs.GenericStatus.Equals(&o.GenericStatus) && + zs.DCsEquals(o.DataCentres) +} + +func (zs *ZookeeperStatus) DCsEquals(o []*ZookeeperDataCentreStatus) bool { + if len(zs.DataCentres) != len(o) { + return false + } + + m := map[string]*ZookeeperDataCentreStatus{} + for _, dc := range zs.DataCentres { + m[dc.Name] = dc + } + + for _, iDC := range o { + dc, ok := m[iDC.Name] + if !ok || !dc.Equals(iDC) { return false } } return true } + +func (s *ZookeeperDataCentreStatus) Equals(o *ZookeeperDataCentreStatus) bool { + return s.GenericDataCentreStatus.Equals(&o.GenericDataCentreStatus) && + nodesEqual(s.Nodes, o.Nodes) +} diff --git a/apis/clusters/v1beta1/zookeeper_webhook.go b/apis/clusters/v1beta1/zookeeper_webhook.go index c12fb67db..a4c67fe18 100644 --- a/apis/clusters/v1beta1/zookeeper_webhook.go +++ b/apis/clusters/v1beta1/zookeeper_webhook.go @@ -61,10 +61,6 @@ func (z *Zookeeper) Default() { models.ResourceStateAnnotation: "", }) } - - for _, dataCentre := range z.Spec.DataCentres { - dataCentre.SetDefaultValues() - } } // TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. @@ -86,7 +82,7 @@ func (zv *zookeeperValidator) ValidateCreate(ctx context.Context, obj runtime.Ob return err } - err = z.Spec.Cluster.ValidateCreation() + err = z.Spec.GenericClusterSpec.ValidateCreation() if err != nil { return err } @@ -107,7 +103,7 @@ func (zv *zookeeperValidator) ValidateCreate(ctx context.Context, obj runtime.Ob } for _, dc := range z.Spec.DataCentres { - err = dc.DataCentre.ValidateCreation() + err = dc.GenericDataCentreSpec.validateCreation() if err != nil { return err } @@ -127,17 +123,21 @@ func (zv *zookeeperValidator) ValidateUpdate(ctx context.Context, old runtime.Ob return fmt.Errorf("cannot assert object %v to zookeeper", new.GetObjectKind()) } - zookeeperlog.Info("validate update", "name", newZookeeper.Name) - - if newZookeeper.Status.ID == "" { - return zv.ValidateCreate(ctx, newZookeeper) + if newZookeeper.Annotations[models.ResourceStateAnnotation] == models.SyncingEvent { + return nil } if newZookeeper.Annotations[models.ExternalChangesAnnotation] == models.True { return nil } - if newZookeeper.Generation != oldZookeeper.Generation && !oldZookeeper.Spec.ClusterSettingsNeedUpdate(newZookeeper.Spec.Cluster) { + if newZookeeper.Status.ID == "" { + return zv.ValidateCreate(ctx, newZookeeper) + } + + zookeeperlog.Info("validate update", "name", newZookeeper.Name) + + if newZookeeper.Generation != oldZookeeper.Generation && !oldZookeeper.Spec.ClusterSettingsNeedUpdate(&newZookeeper.Spec.GenericClusterSpec) { return fmt.Errorf("update is not allowed") } diff --git a/apis/clusters/v1beta1/zz_generated.deepcopy.go b/apis/clusters/v1beta1/zz_generated.deepcopy.go index 0fc301951..4f263d365 100644 --- a/apis/clusters/v1beta1/zz_generated.deepcopy.go +++ b/apis/clusters/v1beta1/zz_generated.deepcopy.go @@ -705,32 +705,6 @@ func (in *CloudProviderSettings) DeepCopy() *CloudProviderSettings { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Cluster) DeepCopyInto(out *Cluster) { - *out = *in - if in.TwoFactorDelete != nil { - in, out := &in.TwoFactorDelete, &out.TwoFactorDelete - *out = make([]*TwoFactorDelete, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(TwoFactorDelete) - **out = **in - } - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. -func (in *Cluster) DeepCopy() *Cluster { - if in == nil { - return nil - } - out := new(Cluster) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterManagerNodes) DeepCopyInto(out *ClusterManagerNodes) { *out = *in @@ -760,11 +734,6 @@ func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { } } } - if in.Options != nil { - in, out := &in.Options, &out.Options - *out = new(Options) - **out = **in - } if in.MaintenanceEvents != nil { in, out := &in.MaintenanceEvents, &out.MaintenanceEvents *out = make([]*clusterresourcesv1beta1.ClusteredMaintenanceEventStatus, len(*in)) @@ -884,39 +853,6 @@ func (in *CustomConnectors) DeepCopy() *CustomConnectors { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DataCentre) DeepCopyInto(out *DataCentre) { - *out = *in - if in.CloudProviderSettings != nil { - in, out := &in.CloudProviderSettings, &out.CloudProviderSettings - *out = make([]*CloudProviderSettings, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(CloudProviderSettings) - **out = **in - } - } - } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataCentre. -func (in *DataCentre) DeepCopy() *DataCentre { - if in == nil { - return nil - } - out := new(DataCentre) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DataCentreStatus) DeepCopyInto(out *DataCentreStatus) { *out = *in @@ -2120,21 +2056,6 @@ func (in *OpenSearchStatus) DeepCopy() *OpenSearchStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Options) DeepCopyInto(out *Options) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Options. -func (in *Options) DeepCopy() *Options { - if in == nil { - return nil - } - out := new(Options) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PackagedProvisioning) DeepCopyInto(out *PackagedProvisioning) { *out = *in @@ -2558,26 +2479,6 @@ func (in PrivateLinkStatuses) DeepCopy() PrivateLinkStatuses { return *out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PrivateLinkV1) DeepCopyInto(out *PrivateLinkV1) { - *out = *in - if in.IAMPrincipalARNs != nil { - in, out := &in.IAMPrincipalARNs, &out.IAMPrincipalARNs - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateLinkV1. -func (in *PrivateLinkV1) DeepCopy() *PrivateLinkV1 { - if in == nil { - return nil - } - out := new(PrivateLinkV1) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Redis) DeepCopyInto(out *Redis) { *out = *in @@ -3109,7 +3010,7 @@ func (in *Zookeeper) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ZookeeperDataCentre) DeepCopyInto(out *ZookeeperDataCentre) { *out = *in - in.DataCentre.DeepCopyInto(&out.DataCentre) + in.GenericDataCentreSpec.DeepCopyInto(&out.GenericDataCentreSpec) if in.EnforceAuthSchemes != nil { in, out := &in.EnforceAuthSchemes, &out.EnforceAuthSchemes *out = make([]string, len(*in)) @@ -3127,6 +3028,33 @@ func (in *ZookeeperDataCentre) DeepCopy() *ZookeeperDataCentre { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZookeeperDataCentreStatus) DeepCopyInto(out *ZookeeperDataCentreStatus) { + *out = *in + in.GenericDataCentreStatus.DeepCopyInto(&out.GenericDataCentreStatus) + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = make([]*Node, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Node) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZookeeperDataCentreStatus. +func (in *ZookeeperDataCentreStatus) DeepCopy() *ZookeeperDataCentreStatus { + if in == nil { + return nil + } + out := new(ZookeeperDataCentreStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ZookeeperList) DeepCopyInto(out *ZookeeperList) { *out = *in @@ -3162,7 +3090,7 @@ func (in *ZookeeperList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ZookeeperSpec) DeepCopyInto(out *ZookeeperSpec) { *out = *in - in.Cluster.DeepCopyInto(&out.Cluster) + in.GenericClusterSpec.DeepCopyInto(&out.GenericClusterSpec) if in.DataCentres != nil { in, out := &in.DataCentres, &out.DataCentres *out = make([]*ZookeeperDataCentre, len(*in)) @@ -3189,7 +3117,18 @@ func (in *ZookeeperSpec) DeepCopy() *ZookeeperSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ZookeeperStatus) DeepCopyInto(out *ZookeeperStatus) { *out = *in - in.ClusterStatus.DeepCopyInto(&out.ClusterStatus) + in.GenericStatus.DeepCopyInto(&out.GenericStatus) + if in.DataCentres != nil { + in, out := &in.DataCentres, &out.DataCentres + *out = make([]*ZookeeperDataCentreStatus, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ZookeeperDataCentreStatus) + (*in).DeepCopyInto(*out) + } + } + } if in.DefaultUserSecretRef != nil { in, out := &in.DefaultUserSecretRef, &out.DefaultUserSecretRef *out = new(apiextensions.ObjectReference) diff --git a/config/crd/bases/clusters.instaclustr.com_zookeepers.yaml b/config/crd/bases/clusters.instaclustr.com_zookeepers.yaml index d0f887e7b..a13845efe 100644 --- a/config/crd/bases/clusters.instaclustr.com_zookeepers.yaml +++ b/config/crd/bases/clusters.instaclustr.com_zookeepers.yaml @@ -55,52 +55,138 @@ spec: items: properties: accountName: + default: INSTACLUSTR + description: For customers running in their own account. Your + provider account can be found on the Create Cluster page on + the Instaclustr Console, or the "Provider Account" property + on any existing cluster. For customers provisioning on Instaclustr's + cloud provider accounts, this property may be omitted. type: string - clientToServerEncryption: - type: boolean - cloudProvider: - type: string - cloudProviderSettings: + awsSettings: + description: AWS specific settings for the Data Centre. Cannot + be provided with GCP or Azure settings. items: properties: backupBucket: + description: Specify the S3 bucket to use for storing + backup data for the cluster data centre. Only available + for customers running in their own cloud provider accounts. + Currently supported for OpenSearch clusters only. type: string customVirtualNetworkId: + description: VPC ID into which the Data Centre will be + provisioned. The Data Centre's network allocation must + match the IPv4 CIDR block of the specified VPC. type: string - disableSnapshotAutoExpiry: - type: boolean - diskEncryptionKey: + encryptionKey: + description: ID of a KMS encryption key to encrypt data + on nodes. KMS encryption key must be set in Cluster + Resources through the Instaclustr Console before provisioning + an encrypted Data Centre. + type: string + type: object + maxItems: 1 + type: array + azureSettings: + description: Azure specific settings for the Data Centre. Cannot + be provided with AWS or GCP settings. + items: + properties: + customVirtualNetworkId: + description: VNet ID into which the Data Centre will be + provisioned. The VNet must have an available address + space for the Data Centre's network allocation to be + appended to the VNet. Currently supported for PostgreSQL + clusters only. type: string resourceGroup: + description: The name of the Azure Resource Group into + which the Data Centre will be provisioned. + type: string + storageNetwork: + description: 'The private network address block to be + used for the storage network. This is only used for + certain node sizes, currently limited to those which + use Azure NetApp Files: for all other node sizes, this + field should not be provided. The network must have + a prefix length between /16 and /28, and must be part + of a private address range.' type: string type: object + maxItems: 1 type: array + clientToServerEncryption: + type: boolean + cloudProvider: + description: Name of a cloud provider service. + type: string enforceAuthEnabled: type: boolean enforceAuthSchemes: items: type: string type: array + gcpSettings: + description: GCP specific settings for the Data Centre. Cannot + be provided with AWS or Azure settings. + items: + properties: + customVirtualNetworkId: + description: "Network name or a relative Network or Subnetwork + URI. The Data Centre's network allocation must match + the IPv4 CIDR block of the specified subnet. \n Examples: + Network URI: projects/{riyoa-gcp-project-name}/global/networks/{network-name}. + Network name: {network-name}, equivalent to projects/{riyoa-gcp-project-name}/global/networks/{network-name}. + Same-project subnetwork URI: projects/{riyoa-gcp-project-name}/regions/{region-id}/subnetworks/{subnetwork-name}. + Shared VPC subnetwork URI: projects/{riyoa-gcp-host-project-name}/regions/{region-id}/subnetworks/{subnetwork-name}." + type: string + disableSnapshotAutoExpiry: + description: Specify whether the GCS backup bucket should + automatically expire data after 7 days or not. Setting + this to true will disable automatic expiry and will + allow for creation of custom snapshot repositories with + customisable retention using the Index Management Plugin. + The storage will have to be manually cleared after the + cluster is deleted. Only available for customers running + in their own cloud provider accounts. Currently supported + for OpenSearch clusters only. + type: boolean + type: object + maxItems: 1 + type: array name: + description: A logical name for the data centre within a cluster. + These names must be unique in the cluster. type: string network: + description: The private network address block for the Data + Centre specified using CIDR address notation. The network + must have a prefix length between /12 and /22 and must be + part of a private address space. type: string nodeSize: type: string - nodesNumber: + numberOfNodes: type: integer region: + description: Region of the Data Centre. type: string tags: additionalProperties: type: string + description: List of tags to apply to the Data Centre. Tags + are metadata labels which allow you to identify, categorize + and filter clusters. This can be useful for grouping together + clusters into applications, environments, or any category + that you require. type: object required: - clientToServerEncryption - cloudProvider + - name - network - nodeSize - - nodesNumber + - numberOfNodes - region type: object type: array @@ -109,13 +195,7 @@ spec: name: description: Name [ 3 .. 32 ] characters. type: string - pciCompliance: - description: The PCI compliance standards relate to the security of - user data and transactional information. Can only be applied clusters - provisioned on AWS_VPC, running Cassandra, Kafka, Elasticsearch - and Redis. - type: boolean - privateNetworkCluster: + privateNetwork: type: boolean slaTier: description: 'Non-production clusters may receive lower priority support @@ -144,15 +224,11 @@ spec: status: description: ZookeeperStatus defines the observed state of Zookeeper properties: - cdcid: - type: string currentClusterOperationStatus: type: string dataCentres: items: properties: - encryptionKeyId: - type: string id: type: string name: @@ -178,21 +254,6 @@ spec: type: string type: object type: array - nodesNumber: - type: integer - privateLink: - items: - properties: - advertisedHostname: - type: string - endPointServiceId: - type: string - endPointServiceName: - type: string - required: - - advertisedHostname - type: object - type: array resizeOperations: items: properties: @@ -249,6 +310,8 @@ spec: type: array status: type: string + required: + - nodes type: object type: array defaultUserSecretRef: @@ -352,19 +415,8 @@ spec: type: array nodeCount: type: string - options: - properties: - dataNodeSize: - type: string - masterNodeSize: - type: string - openSearchDashboardsNodeSize: - type: string - type: object state: type: string - twoFactorDeleteEnabled: - type: boolean type: object type: object served: true diff --git a/config/samples/clusters_v1beta1_cassandra.yaml b/config/samples/clusters_v1beta1_cassandra.yaml index 57a663df3..338ca2b85 100644 --- a/config/samples/clusters_v1beta1_cassandra.yaml +++ b/config/samples/clusters_v1beta1_cassandra.yaml @@ -4,7 +4,7 @@ metadata: name: cassandra-cluster spec: name: "username-cassandra" #(immutable) - version: "4.1.13" #(immutable) + version: "4.1.3" #(immutable) privateNetwork: false #(immutable) dataCentres: - name: "AWS_cassandra" #(mutable) diff --git a/config/samples/clusters_v1beta1_zookeeper.yaml b/config/samples/clusters_v1beta1_zookeeper.yaml index 570d4582e..7f6132325 100644 --- a/config/samples/clusters_v1beta1_zookeeper.yaml +++ b/config/samples/clusters_v1beta1_zookeeper.yaml @@ -3,7 +3,7 @@ kind: Zookeeper metadata: name: zookeeper-sample spec: - name: "username-zookeeper" + name: "bohdan-zookeeper" # description: "some description" dataCentres: - clientToServerEncryption: false @@ -12,8 +12,8 @@ spec: network: "10.0.0.0/16" nodeSize: "zookeeper-developer-t3.small-20" # nodeSize: "zookeeper-production-m5.large-60" - nodesNumber: 3 + numberOfNodes: 3 region: "US_EAST_1" - privateNetworkCluster: false + privateNetwork: false slaTier: "NON_PRODUCTION" version: "3.8.2" diff --git a/controllers/clusterresources/awsencryptionkey_controller.go b/controllers/clusterresources/awsencryptionkey_controller.go index f19f2cd19..631963afd 100644 --- a/controllers/clusterresources/awsencryptionkey_controller.go +++ b/controllers/clusterresources/awsencryptionkey_controller.go @@ -228,7 +228,7 @@ func (r *AWSEncryptionKeyReconciler) handleDelete( ) } - r.Scheduler.RemoveJob(encryptionKey.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(encryptionKey.GetJobID(scheduler.SyncJob)) patch := encryptionKey.NewPatch() controllerutil.RemoveFinalizer(encryptionKey, models.DeletionFinalizer) encryptionKey.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent @@ -265,7 +265,7 @@ func (r *AWSEncryptionKeyReconciler) handleDelete( func (r *AWSEncryptionKeyReconciler) startEncryptionKeyStatusJob(encryptionKey *v1beta1.AWSEncryptionKey) error { job := r.newWatchStatusJob(encryptionKey) - err := r.Scheduler.ScheduleJob(encryptionKey.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + err := r.Scheduler.ScheduleJob(encryptionKey.GetJobID(scheduler.SyncJob), scheduler.ClusterStatusInterval, job) if err != nil { return err } @@ -286,7 +286,7 @@ func (r *AWSEncryptionKeyReconciler) newWatchStatusJob(encryptionKey *v1beta1.AW "namespaced name", key, ) - r.Scheduler.RemoveJob(encryptionKey.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(encryptionKey.GetJobID(scheduler.SyncJob)) return nil } @@ -333,7 +333,7 @@ func (r *AWSEncryptionKeyReconciler) handleExternalDelete(ctx context.Context, k l.Info(instaclustr.MsgInstaclustrResourceNotFound) r.EventRecorder.Eventf(key, models.Warning, models.ExternalDeleted, instaclustr.MsgInstaclustrResourceNotFound) - r.Scheduler.RemoveJob(key.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(key.GetJobID(scheduler.SyncJob)) return nil } diff --git a/controllers/clusterresources/awsendpointserviceprincipal_controller.go b/controllers/clusterresources/awsendpointserviceprincipal_controller.go index 3f5c9b783..e4fc26b8d 100644 --- a/controllers/clusterresources/awsendpointserviceprincipal_controller.go +++ b/controllers/clusterresources/awsendpointserviceprincipal_controller.go @@ -167,7 +167,7 @@ func (r *AWSEndpointServicePrincipalReconciler) handleCreate(ctx context.Context return ctrl.Result{}, err } r.EventRecorder.Eventf(principal, models.Normal, models.Created, - "Status check job %s has been started", principal.GetJobID(scheduler.StatusChecker), + "Status check job %s has been started", principal.GetJobID(scheduler.SyncJob), ) return ctrl.Result{}, nil @@ -203,7 +203,7 @@ func (r *AWSEndpointServicePrincipalReconciler) handleDelete(ctx context.Context func (r *AWSEndpointServicePrincipalReconciler) startWatchStatusJob(ctx context.Context, resource *clusterresourcesv1beta1.AWSEndpointServicePrincipal) error { job := r.newWatchStatusJob(ctx, resource) - return r.Scheduler.ScheduleJob(resource.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + return r.Scheduler.ScheduleJob(resource.GetJobID(scheduler.SyncJob), scheduler.ClusterStatusInterval, job) } func (r *AWSEndpointServicePrincipalReconciler) newWatchStatusJob(ctx context.Context, principal *clusterresourcesv1beta1.AWSEndpointServicePrincipal) scheduler.Job { @@ -218,7 +218,7 @@ func (r *AWSEndpointServicePrincipalReconciler) newWatchStatusJob(ctx context.Co "namespaced name", key, ) - r.Scheduler.RemoveJob(principal.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(principal.GetJobID(scheduler.SyncJob)) return nil } @@ -252,7 +252,7 @@ func (r *AWSEndpointServicePrincipalReconciler) handleExternalDelete(ctx context l.Info(instaclustr.MsgInstaclustrResourceNotFound) r.EventRecorder.Eventf(principal, models.Warning, models.ExternalDeleted, instaclustr.MsgInstaclustrResourceNotFound) - r.Scheduler.RemoveJob(principal.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(principal.GetJobID(scheduler.SyncJob)) return nil } diff --git a/controllers/clusterresources/awssecuritygroupfirewallrule_controller.go b/controllers/clusterresources/awssecuritygroupfirewallrule_controller.go index 0818463f1..f035ca80c 100644 --- a/controllers/clusterresources/awssecuritygroupfirewallrule_controller.go +++ b/controllers/clusterresources/awssecuritygroupfirewallrule_controller.go @@ -256,7 +256,7 @@ func (r *AWSSecurityGroupFirewallRuleReconciler) handleDeleteFirewallRule( ) } - r.Scheduler.RemoveJob(firewallRule.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(firewallRule.GetJobID(scheduler.SyncJob)) controllerutil.RemoveFinalizer(firewallRule, models.DeletionFinalizer) firewallRule.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent err = r.Patch(ctx, firewallRule, patch) @@ -292,7 +292,7 @@ func (r *AWSSecurityGroupFirewallRuleReconciler) handleDeleteFirewallRule( func (r *AWSSecurityGroupFirewallRuleReconciler) startFirewallRuleStatusJob(firewallRule *v1beta1.AWSSecurityGroupFirewallRule) error { job := r.newWatchStatusJob(firewallRule) - err := r.Scheduler.ScheduleJob(firewallRule.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + err := r.Scheduler.ScheduleJob(firewallRule.GetJobID(scheduler.SyncJob), scheduler.ClusterStatusInterval, job) if err != nil { return err } @@ -313,7 +313,7 @@ func (r *AWSSecurityGroupFirewallRuleReconciler) newWatchStatusJob(firewallRule "namespaced name", key, ) - r.Scheduler.RemoveJob(firewallRule.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(firewallRule.GetJobID(scheduler.SyncJob)) return nil } @@ -360,7 +360,7 @@ func (r *AWSSecurityGroupFirewallRuleReconciler) handleExternalDelete(ctx contex l.Info(instaclustr.MsgInstaclustrResourceNotFound) r.EventRecorder.Eventf(rule, models.Warning, models.ExternalDeleted, instaclustr.MsgInstaclustrResourceNotFound) - r.Scheduler.RemoveJob(rule.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(rule.GetJobID(scheduler.SyncJob)) return nil } diff --git a/controllers/clusterresources/awsvpcpeering_controller.go b/controllers/clusterresources/awsvpcpeering_controller.go index 9d6d20d5d..e1a7d7466 100644 --- a/controllers/clusterresources/awsvpcpeering_controller.go +++ b/controllers/clusterresources/awsvpcpeering_controller.go @@ -362,7 +362,7 @@ func (r *AWSVPCPeeringReconciler) handleDeletePeering( return ctrl.Result{}, err } - r.Scheduler.RemoveJob(aws.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(aws.GetJobID(scheduler.SyncJob)) patch := aws.NewPatch() controllerutil.RemoveFinalizer(aws, models.DeletionFinalizer) @@ -398,7 +398,7 @@ func (r *AWSVPCPeeringReconciler) handleDeletePeering( func (r *AWSVPCPeeringReconciler) startAWSVPCPeeringStatusJob(awsPeering *v1beta1.AWSVPCPeering) error { job := r.newWatchStatusJob(awsPeering) - err := r.Scheduler.ScheduleJob(awsPeering.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + err := r.Scheduler.ScheduleJob(awsPeering.GetJobID(scheduler.SyncJob), scheduler.ClusterStatusInterval, job) if err != nil { return err } @@ -418,7 +418,7 @@ func (r *AWSVPCPeeringReconciler) newWatchStatusJob(awsPeering *v1beta1.AWSVPCPe "namespaced name", namespacedName, ) - r.Scheduler.RemoveJob(awsPeering.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(awsPeering.GetJobID(scheduler.SyncJob)) return nil } @@ -459,7 +459,7 @@ func (r *AWSVPCPeeringReconciler) newWatchStatusJob(awsPeering *v1beta1.AWSVPCPe "The AWSPeering was deleted on AWS, stopping job...", ) - r.Scheduler.RemoveJob(awsPeering.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(awsPeering.GetJobID(scheduler.SyncJob)) return nil } @@ -546,7 +546,7 @@ func (r *AWSVPCPeeringReconciler) handleExternalDelete(ctx context.Context, key l.Info(instaclustr.MsgInstaclustrResourceNotFound) r.EventRecorder.Eventf(key, models.Warning, models.ExternalDeleted, instaclustr.MsgInstaclustrResourceNotFound) - r.Scheduler.RemoveJob(key.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(key.GetJobID(scheduler.SyncJob)) return nil } diff --git a/controllers/clusterresources/azurevnetpeering_controller.go b/controllers/clusterresources/azurevnetpeering_controller.go index 233a22675..c92144b19 100644 --- a/controllers/clusterresources/azurevnetpeering_controller.go +++ b/controllers/clusterresources/azurevnetpeering_controller.go @@ -247,7 +247,7 @@ func (r *AzureVNetPeeringReconciler) handleDeletePeering( } if status != nil { - r.Scheduler.RemoveJob(azure.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(azure.GetJobID(scheduler.SyncJob)) err = r.API.DeletePeering(azure.Status.ID, instaclustr.AzurePeeringEndpoint) if err != nil { l.Error(err, "cannot update Azure VNet Peering resource status", @@ -312,7 +312,7 @@ func (r *AzureVNetPeeringReconciler) startAzureVNetPeeringStatusJob(azurePeering ) error { job := r.newWatchStatusJob(azurePeering) - err := r.Scheduler.ScheduleJob(azurePeering.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + err := r.Scheduler.ScheduleJob(azurePeering.GetJobID(scheduler.SyncJob), scheduler.ClusterStatusInterval, job) if err != nil { return err } @@ -334,7 +334,7 @@ func (r *AzureVNetPeeringReconciler) newWatchStatusJob(azureVNetPeering *v1beta1 "namespaced name", key, ) - r.Scheduler.RemoveJob(azureVNetPeering.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(azureVNetPeering.GetJobID(scheduler.SyncJob)) return nil } @@ -382,7 +382,7 @@ func (r *AzureVNetPeeringReconciler) handleExternalDelete(ctx context.Context, k l.Info(instaclustr.MsgInstaclustrResourceNotFound) r.EventRecorder.Eventf(key, models.Warning, models.ExternalDeleted, instaclustr.MsgInstaclustrResourceNotFound) - r.Scheduler.RemoveJob(key.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(key.GetJobID(scheduler.SyncJob)) return nil } diff --git a/controllers/clusterresources/clusternetworkfirewallrule_controller.go b/controllers/clusterresources/clusternetworkfirewallrule_controller.go index 287cec570..2baca470d 100644 --- a/controllers/clusterresources/clusternetworkfirewallrule_controller.go +++ b/controllers/clusterresources/clusternetworkfirewallrule_controller.go @@ -276,7 +276,7 @@ func (r *ClusterNetworkFirewallRuleReconciler) HandleDeleteFirewallRule( ) } - r.Scheduler.RemoveJob(firewallRule.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(firewallRule.GetJobID(scheduler.SyncJob)) controllerutil.RemoveFinalizer(firewallRule, models.DeletionFinalizer) firewallRule.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent err = r.Patch(ctx, firewallRule, patch) @@ -311,7 +311,7 @@ func (r *ClusterNetworkFirewallRuleReconciler) HandleDeleteFirewallRule( func (r *ClusterNetworkFirewallRuleReconciler) startFirewallRuleStatusJob(firewallRule *v1beta1.ClusterNetworkFirewallRule) error { job := r.newWatchStatusJob(firewallRule) - err := r.Scheduler.ScheduleJob(firewallRule.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + err := r.Scheduler.ScheduleJob(firewallRule.GetJobID(scheduler.SyncJob), scheduler.ClusterStatusInterval, job) if err != nil { return err } diff --git a/controllers/clusterresources/gcpvpcpeering_controller.go b/controllers/clusterresources/gcpvpcpeering_controller.go index 61b5b7eab..0c269a0e1 100644 --- a/controllers/clusterresources/gcpvpcpeering_controller.go +++ b/controllers/clusterresources/gcpvpcpeering_controller.go @@ -257,7 +257,7 @@ func (r *GCPVPCPeeringReconciler) handleDeleteCluster( } patch := gcp.NewPatch() - r.Scheduler.RemoveJob(gcp.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(gcp.GetJobID(scheduler.SyncJob)) controllerutil.RemoveFinalizer(gcp, models.DeletionFinalizer) gcp.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent err = r.Patch(ctx, gcp, patch) @@ -295,7 +295,7 @@ func (r *GCPVPCPeeringReconciler) handleDeleteCluster( func (r *GCPVPCPeeringReconciler) startGCPVPCPeeringStatusJob(gcpPeering *v1beta1.GCPVPCPeering) error { job := r.newWatchStatusJob(gcpPeering) - err := r.Scheduler.ScheduleJob(gcpPeering.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + err := r.Scheduler.ScheduleJob(gcpPeering.GetJobID(scheduler.SyncJob), scheduler.ClusterStatusInterval, job) if err != nil { return err } @@ -316,7 +316,7 @@ func (r *GCPVPCPeeringReconciler) newWatchStatusJob(gcpPeering *v1beta1.GCPVPCPe "namespaced name", key, ) - r.Scheduler.RemoveJob(gcpPeering.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(gcpPeering.GetJobID(scheduler.SyncJob)) return nil } @@ -364,7 +364,7 @@ func (r *GCPVPCPeeringReconciler) handleExternalDelete(ctx context.Context, key l.Info(instaclustr.MsgInstaclustrResourceNotFound) r.EventRecorder.Eventf(key, models.Warning, models.ExternalDeleted, instaclustr.MsgInstaclustrResourceNotFound) - r.Scheduler.RemoveJob(key.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(key.GetJobID(scheduler.SyncJob)) return nil } diff --git a/controllers/clusters/cadence_controller.go b/controllers/clusters/cadence_controller.go index 872513845..15cd446e5 100644 --- a/controllers/clusters/cadence_controller.go +++ b/controllers/clusters/cadence_controller.go @@ -247,7 +247,7 @@ func (r *CadenceReconciler) handleCreateCluster( } r.EventRecorder.Event(c, models.Normal, models.Created, - "Cluster status check job is started") + "Cluster sync job is started") } return ctrl.Result{}, nil @@ -424,7 +424,7 @@ func (r *CadenceReconciler) handleDeleteCluster( } } - r.Scheduler.RemoveJob(c.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(c.GetJobID(scheduler.SyncJob)) patch := c.NewPatch() controllerutil.RemoveFinalizer(c, models.DeletionFinalizer) c.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent @@ -689,7 +689,7 @@ func (r *CadenceReconciler) newCassandraSpec(c *v1beta1.Cadence, latestCassandra func (r *CadenceReconciler) startSyncJob(c *v1beta1.Cadence) error { job := r.newSyncJob(c) - err := r.Scheduler.ScheduleJob(c.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + err := r.Scheduler.ScheduleJob(c.GetJobID(scheduler.SyncJob), scheduler.ClusterStatusInterval, job) if err != nil { return err } @@ -698,14 +698,14 @@ func (r *CadenceReconciler) startSyncJob(c *v1beta1.Cadence) error { } func (r *CadenceReconciler) newSyncJob(c *v1beta1.Cadence) scheduler.Job { - l := log.Log.WithValues("syncJob", c.GetJobID(scheduler.StatusChecker), "clusterID", c.Status.ID) + l := log.Log.WithValues("syncJob", c.GetJobID(scheduler.SyncJob), "clusterID", c.Status.ID) return func() error { namespacedName := client.ObjectKeyFromObject(c) err := r.Get(context.Background(), namespacedName, c) if k8serrors.IsNotFound(err) { l.Info("Resource is not found in the k8s cluster. Closing Instaclustr status sync.", "namespaced name", namespacedName) - r.Scheduler.RemoveJob(c.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(c.GetJobID(scheduler.SyncJob)) return nil } if err != nil { @@ -1150,7 +1150,7 @@ func (r *CadenceReconciler) handleExternalDelete(ctx context.Context, c *v1beta1 r.EventRecorder.Eventf(c, models.Warning, models.ExternalDeleted, instaclustr.MsgInstaclustrResourceNotFound) r.Scheduler.RemoveJob(c.GetJobID(scheduler.BackupsChecker)) - r.Scheduler.RemoveJob(c.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(c.GetJobID(scheduler.SyncJob)) return nil } diff --git a/controllers/clusters/cassandra_controller.go b/controllers/clusters/cassandra_controller.go index 80db278ae..6f7f02487 100644 --- a/controllers/clusters/cassandra_controller.go +++ b/controllers/clusters/cassandra_controller.go @@ -239,6 +239,11 @@ func (r *CassandraReconciler) createCluster(ctx context.Context, c *v1beta1.Cass return fmt.Errorf("failed to update cassandra status, err: %w", err) } + err = r.createDefaultSecret(ctx, c, l) + if err != nil { + return fmt.Errorf("failed to create default cassandra user secret, err: %w", err) + } + l.Info( "Cluster has been created", "cluster name", c.Spec.Name, @@ -264,7 +269,7 @@ func (r *CassandraReconciler) startClusterJobs(c *v1beta1.Cassandra, l logr.Logg r.EventRecorder.Eventf( c, models.Normal, models.Created, - "Cluster status check job is started", + "Cluster sync job is started", ) err = r.startClusterBackupsJob(c) @@ -316,21 +321,6 @@ func (r *CassandraReconciler) handleCreateCluster( } } - err := r.createDefaultSecret(ctx, c, l) - if err != nil { - l.Error(err, "Cannot create default secret for Cassandra", - "cluster name", c.Spec.Name, - "clusterID", c.Status.ID, - ) - r.EventRecorder.Eventf( - c, models.Warning, models.CreationFailed, - "Default user secret creation on the Instaclustr is failed. Reason: %v", - err, - ) - - return reconcile.Result{}, err - } - if c.Status.State != models.DeletedStatus { patch := c.NewPatch() c.Annotations[models.ResourceStateAnnotation] = models.CreatedEvent @@ -532,7 +522,7 @@ func (r *CassandraReconciler) handleDeleteCluster( r.Scheduler.RemoveJob(c.GetJobID(scheduler.UserCreator)) r.Scheduler.RemoveJob(c.GetJobID(scheduler.BackupsChecker)) - r.Scheduler.RemoveJob(c.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(c.GetJobID(scheduler.SyncJob)) l.Info("Deleting cluster backup resources", "cluster ID", c.Status.ID) @@ -615,7 +605,7 @@ func (r *CassandraReconciler) handleDeleteCluster( func (r *CassandraReconciler) startSyncJob(c *v1beta1.Cassandra) error { job := r.newSyncJob(c) - err := r.Scheduler.ScheduleJob(c.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + err := r.Scheduler.ScheduleJob(c.GetJobID(scheduler.SyncJob), scheduler.ClusterStatusInterval, job) if err != nil { return err } @@ -658,7 +648,7 @@ func (r *CassandraReconciler) startClusterOnPremisesIPsJob(c *v1beta1.Cassandra, } func (r *CassandraReconciler) newSyncJob(c *v1beta1.Cassandra) scheduler.Job { - l := log.Log.WithValues("syncJob", c.GetJobID(scheduler.StatusChecker), "clusterID", c.Status.ID) + l := log.Log.WithValues("syncJob", c.GetJobID(scheduler.SyncJob), "clusterID", c.Status.ID) return func() error { namespacedName := client.ObjectKeyFromObject(c) @@ -668,7 +658,7 @@ func (r *CassandraReconciler) newSyncJob(c *v1beta1.Cassandra) scheduler.Job { "namespaced name", namespacedName) r.Scheduler.RemoveJob(c.GetJobID(scheduler.BackupsChecker)) r.Scheduler.RemoveJob(c.GetJobID(scheduler.UserCreator)) - r.Scheduler.RemoveJob(c.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(c.GetJobID(scheduler.SyncJob)) return nil } @@ -1083,7 +1073,7 @@ func (r *CassandraReconciler) handleExternalDelete(ctx context.Context, c *v1bet r.Scheduler.RemoveJob(c.GetJobID(scheduler.BackupsChecker)) r.Scheduler.RemoveJob(c.GetJobID(scheduler.UserCreator)) - r.Scheduler.RemoveJob(c.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(c.GetJobID(scheduler.SyncJob)) return nil } diff --git a/controllers/clusters/datatest/zookeeper_v1beta1.yaml b/controllers/clusters/datatest/zookeeper_v1beta1.yaml index b82202173..32400e173 100644 --- a/controllers/clusters/datatest/zookeeper_v1beta1.yaml +++ b/controllers/clusters/datatest/zookeeper_v1beta1.yaml @@ -12,15 +12,15 @@ spec: name: "MyTestDataCentre1" network: "10.0.0.0/16" nodeSize: "zookeeper-developer-t3.small-20" - nodesNumber: 3 + numberOfNodes: 3 region: "US_EAST_1" # accountName: "Custrom" # cloudProviderSettings: # - customVirtualNetworkId: "vpc-12345678" # diskEncryptionKey: "123e4567-e89b-12d3-a456-426614174000" # resourceGroup: "asdfadfsdfas" - name: "OperatorZookeeper" - privateNetworkCluster: true + name: "bohdan-zookeeper-test" + privateNetwork: true # twoFactorDelete: # - email: "emailTEST" # phone: "phoneTEST" diff --git a/controllers/clusters/helpers.go b/controllers/clusters/helpers.go index 2380137c1..a92068507 100644 --- a/controllers/clusters/helpers.go +++ b/controllers/clusters/helpers.go @@ -30,12 +30,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" - "k8s.io/utils/strings/slices" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/instaclustr/operator/apis/clusters/v1beta1" "github.com/instaclustr/operator/pkg/models" "github.com/instaclustr/operator/pkg/utils/dcomparison" ) @@ -60,98 +58,6 @@ func convertAPIv2ConfigToMap(instConfigs []*models.ConfigurationProperties) map[ return newConfigs } -func areStatusesEqual(a, b *v1beta1.ClusterStatus) bool { - if a == nil && b == nil { - return true - } - - if a == nil || b == nil || - a.ID != b.ID || - a.State != b.State || - a.CDCID != b.CDCID || - a.TwoFactorDeleteEnabled != b.TwoFactorDeleteEnabled || - a.CurrentClusterOperationStatus != b.CurrentClusterOperationStatus || - !areDataCentresEqual(a.DataCentres, b.DataCentres) || - !areDataCentreOptionsEqual(a.Options, b.Options) || - !b.PrivateLinkStatusesEqual(a) { - return false - } - - return true -} - -func areDataCentreOptionsEqual(a, b *v1beta1.Options) bool { - if a == nil && b == nil { - return true - } - - if a == nil || b == nil { - return false - } - - return *a == *b -} - -func areDataCentresEqual(a, b []*v1beta1.DataCentreStatus) bool { - if a == nil && b == nil { - return true - } - - if len(a) != len(b) { - return false - } - - for i := range a { - if a[i].ID != b[i].ID { - continue - } - - if a[i].Status != b[i].Status || - a[i].NodesNumber != b[i].NodesNumber || - a[i].EncryptionKeyID != b[i].EncryptionKeyID { - return false - } - - if !isDataCentreNodesEqual(a[i].Nodes, b[i].Nodes) { - return false - } - } - - return true -} - -func isDataCentreNodesEqual(a, b []*v1beta1.Node) bool { - if a == nil && b == nil { - return true - } - if len(a) != len(b) { - return false - } - - for i := range a { - var eq bool - for j := range b { - if a[i].ID != b[j].ID { - continue - } - - if a[i].Size != b[j].Size || - a[i].PublicAddress != b[j].PublicAddress || - a[i].PrivateAddress != b[j].PrivateAddress || - a[i].Status != b[j].Status || - !slices.Equal(a[i].Roles, b[j].Roles) || - a[i].Rack != b[j].Rack { - return false - } - eq = true - } - if !eq { - return false - } - } - return true -} - func getSortedAppVersions(versions []*models.AppVersions, appType string) []*version.Version { for _, apps := range versions { if apps.Application == appType { diff --git a/controllers/clusters/kafka_controller.go b/controllers/clusters/kafka_controller.go index b82b98a0b..037a38108 100644 --- a/controllers/clusters/kafka_controller.go +++ b/controllers/clusters/kafka_controller.go @@ -156,7 +156,7 @@ func (r *KafkaReconciler) startJobs(k *v1beta1.Kafka) error { r.EventRecorder.Eventf( k, models.Normal, models.Created, - "Cluster status check job is started", + "Cluster sync job is started", ) if k.Spec.UserRefs != nil && k.Status.AvailableUsers == nil { @@ -376,7 +376,7 @@ func (r *KafkaReconciler) handleDeleteCluster(ctx context.Context, k *v1beta1.Ka return reconcile.Result{}, err } - r.Scheduler.RemoveJob(k.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(k.GetJobID(scheduler.SyncJob)) r.Scheduler.RemoveJob(k.GetJobID(scheduler.UserCreator)) controllerutil.RemoveFinalizer(k, models.DeletionFinalizer) k.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent @@ -429,7 +429,7 @@ func (r *KafkaReconciler) startClusterOnPremisesIPsJob(k *v1beta1.Kafka, b *onPr func (r *KafkaReconciler) startSyncJob(kafka *v1beta1.Kafka) error { job := r.newSyncJob(kafka) - err := r.Scheduler.ScheduleJob(kafka.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + err := r.Scheduler.ScheduleJob(kafka.GetJobID(scheduler.SyncJob), scheduler.ClusterStatusInterval, job) if err != nil { return err } @@ -438,7 +438,7 @@ func (r *KafkaReconciler) startSyncJob(kafka *v1beta1.Kafka) error { } func (r *KafkaReconciler) newSyncJob(k *v1beta1.Kafka) scheduler.Job { - l := log.Log.WithValues("syncJob", k.GetJobID(scheduler.StatusChecker), "clusterID", k.Status.ID) + l := log.Log.WithValues("syncJob", k.GetJobID(scheduler.SyncJob), "clusterID", k.Status.ID) return func() error { namespacedName := client.ObjectKeyFromObject(k) @@ -446,7 +446,7 @@ func (r *KafkaReconciler) newSyncJob(k *v1beta1.Kafka) scheduler.Job { if k8serrors.IsNotFound(err) { l.Info("Resource is not found in the k8s cluster. Closing Instaclustr status sync.", "namespaced name", namespacedName) - r.Scheduler.RemoveJob(k.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(k.GetJobID(scheduler.SyncJob)) r.Scheduler.RemoveJob(k.GetJobID(scheduler.UserCreator)) r.Scheduler.RemoveJob(k.GetJobID(scheduler.BackupsChecker)) return nil @@ -655,7 +655,7 @@ func (r *KafkaReconciler) handleExternalDelete(ctx context.Context, k *v1beta1.K r.Scheduler.RemoveJob(k.GetJobID(scheduler.BackupsChecker)) r.Scheduler.RemoveJob(k.GetJobID(scheduler.UserCreator)) - r.Scheduler.RemoveJob(k.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(k.GetJobID(scheduler.SyncJob)) return nil } diff --git a/controllers/clusters/kafkaconnect_controller.go b/controllers/clusters/kafkaconnect_controller.go index 697a4c721..029b8aea6 100644 --- a/controllers/clusters/kafkaconnect_controller.go +++ b/controllers/clusters/kafkaconnect_controller.go @@ -362,7 +362,7 @@ func (r *KafkaConnectReconciler) handleDeleteCluster(ctx context.Context, kc *v1 return reconcile.Result{}, err } - r.Scheduler.RemoveJob(kc.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(kc.GetJobID(scheduler.SyncJob)) controllerutil.RemoveFinalizer(kc, models.DeletionFinalizer) kc.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent err = r.Patch(ctx, kc, patch) @@ -467,7 +467,7 @@ func (r *KafkaConnectReconciler) startClusterOnPremisesIPsJob(k *v1beta1.KafkaCo func (r *KafkaConnectReconciler) startSyncJob(kc *v1beta1.KafkaConnect) error { job := r.newSyncJob(kc) - err := r.Scheduler.ScheduleJob(kc.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + err := r.Scheduler.ScheduleJob(kc.GetJobID(scheduler.SyncJob), scheduler.ClusterStatusInterval, job) if err != nil { return err } @@ -484,7 +484,7 @@ func (r *KafkaConnectReconciler) newSyncJob(kc *v1beta1.KafkaConnect) scheduler. if k8serrors.IsNotFound(err) { l.Info("Resource is not found in the k8s cluster. Closing Instaclustr status sync.", "namespaced name", namespacedName) - r.Scheduler.RemoveJob(kc.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(kc.GetJobID(scheduler.SyncJob)) return nil } if err != nil { @@ -673,7 +673,7 @@ func (r *KafkaConnectReconciler) handleExternalDelete(ctx context.Context, kc *v r.EventRecorder.Eventf(kc, models.Warning, models.ExternalDeleted, instaclustr.MsgInstaclustrResourceNotFound) r.Scheduler.RemoveJob(kc.GetJobID(scheduler.BackupsChecker)) - r.Scheduler.RemoveJob(kc.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(kc.GetJobID(scheduler.SyncJob)) return nil } diff --git a/controllers/clusters/opensearch_controller.go b/controllers/clusters/opensearch_controller.go index 696294cb9..c7fa5a41e 100644 --- a/controllers/clusters/opensearch_controller.go +++ b/controllers/clusters/opensearch_controller.go @@ -448,7 +448,7 @@ func (r *OpenSearchReconciler) HandleDeleteCluster( r.Scheduler.RemoveJob(o.GetJobID(scheduler.UserCreator)) r.Scheduler.RemoveJob(o.GetJobID(scheduler.BackupsChecker)) - r.Scheduler.RemoveJob(o.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(o.GetJobID(scheduler.SyncJob)) logger.Info("Deleting cluster backup resources", "cluster ID", o.Status.ID, @@ -518,7 +518,7 @@ func (r *OpenSearchReconciler) HandleDeleteCluster( func (r *OpenSearchReconciler) startClusterSyncJob(cluster *v1beta1.OpenSearch) error { job := r.newSyncJob(cluster) - err := r.Scheduler.ScheduleJob(cluster.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + err := r.Scheduler.ScheduleJob(cluster.GetJobID(scheduler.SyncJob), scheduler.ClusterStatusInterval, job) if err != nil { return err } @@ -549,7 +549,7 @@ func (r *OpenSearchReconciler) startUsersCreationJob(cluster *v1beta1.OpenSearch } func (r *OpenSearchReconciler) newSyncJob(o *v1beta1.OpenSearch) scheduler.Job { - l := log.Log.WithValues("syncJob", o.GetJobID(scheduler.StatusChecker), "clusterID", o.Status.ID) + l := log.Log.WithValues("syncJob", o.GetJobID(scheduler.SyncJob), "clusterID", o.Status.ID) return func() error { namespacedName := client.ObjectKeyFromObject(o) @@ -559,7 +559,7 @@ func (r *OpenSearchReconciler) newSyncJob(o *v1beta1.OpenSearch) scheduler.Job { "namespaced name", namespacedName) r.Scheduler.RemoveJob(o.GetJobID(scheduler.UserCreator)) r.Scheduler.RemoveJob(o.GetJobID(scheduler.BackupsChecker)) - r.Scheduler.RemoveJob(o.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(o.GetJobID(scheduler.SyncJob)) return nil } if err != nil { @@ -993,7 +993,7 @@ func (r *OpenSearchReconciler) handleExternalDelete(ctx context.Context, o *v1be r.Scheduler.RemoveJob(o.GetJobID(scheduler.BackupsChecker)) r.Scheduler.RemoveJob(o.GetJobID(scheduler.UserCreator)) - r.Scheduler.RemoveJob(o.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(o.GetJobID(scheduler.SyncJob)) return nil } diff --git a/controllers/clusters/postgresql_controller.go b/controllers/clusters/postgresql_controller.go index c972ba737..43125caeb 100644 --- a/controllers/clusters/postgresql_controller.go +++ b/controllers/clusters/postgresql_controller.go @@ -266,7 +266,7 @@ func (r *PostgreSQLReconciler) handleCreateCluster( r.EventRecorder.Eventf( pg, models.Normal, models.Created, - "Cluster status check job is started", + "Cluster sync job is started", ) if pg.Spec.DataCentres[0].CloudProvider == models.ONPREMISES { @@ -531,7 +531,7 @@ func (r *PostgreSQLReconciler) handleDeleteCluster( ) r.Scheduler.RemoveJob(pg.GetJobID(scheduler.BackupsChecker)) - r.Scheduler.RemoveJob(pg.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(pg.GetJobID(scheduler.SyncJob)) controllerutil.RemoveFinalizer(pg, models.DeletionFinalizer) pg.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent @@ -698,7 +698,7 @@ func (r *PostgreSQLReconciler) startClusterOnPremisesIPsJob(pg *v1beta1.PostgreS func (r *PostgreSQLReconciler) startClusterStatusJob(pg *v1beta1.PostgreSQL) error { job := r.newWatchStatusJob(pg) - err := r.Scheduler.ScheduleJob(pg.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + err := r.Scheduler.ScheduleJob(pg.GetJobID(scheduler.SyncJob), scheduler.ClusterStatusInterval, job) if err != nil { return err } @@ -718,7 +718,7 @@ func (r *PostgreSQLReconciler) startClusterBackupsJob(pg *v1beta1.PostgreSQL) er } func (r *PostgreSQLReconciler) newWatchStatusJob(pg *v1beta1.PostgreSQL) scheduler.Job { - l := log.Log.WithValues("syncJob", pg.GetJobID(scheduler.StatusChecker), "clusterID", pg.Status.ID) + l := log.Log.WithValues("syncJob", pg.GetJobID(scheduler.SyncJob), "clusterID", pg.Status.ID) return func() error { namespacedName := client.ObjectKeyFromObject(pg) @@ -727,7 +727,7 @@ func (r *PostgreSQLReconciler) newWatchStatusJob(pg *v1beta1.PostgreSQL) schedul l.Info("Resource is not found in the k8s cluster. Closing Instaclustr status sync.", "namespaced name", namespacedName) r.Scheduler.RemoveJob(pg.GetJobID(scheduler.BackupsChecker)) - r.Scheduler.RemoveJob(pg.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(pg.GetJobID(scheduler.SyncJob)) r.Scheduler.RemoveJob(pg.GetJobID(scheduler.UserCreator)) return nil } @@ -1348,7 +1348,7 @@ func (r *PostgreSQLReconciler) handleExternalDelete(ctx context.Context, pg *v1b r.Scheduler.RemoveJob(pg.GetJobID(scheduler.BackupsChecker)) r.Scheduler.RemoveJob(pg.GetJobID(scheduler.UserCreator)) - r.Scheduler.RemoveJob(pg.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(pg.GetJobID(scheduler.SyncJob)) return nil } diff --git a/controllers/clusters/redis_controller.go b/controllers/clusters/redis_controller.go index 89eda25db..68b451d8c 100644 --- a/controllers/clusters/redis_controller.go +++ b/controllers/clusters/redis_controller.go @@ -473,7 +473,7 @@ func (r *RedisReconciler) handleDeleteCluster( } } - r.Scheduler.RemoveJob(redis.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(redis.GetJobID(scheduler.SyncJob)) r.Scheduler.RemoveJob(redis.GetJobID(scheduler.BackupsChecker)) l.Info("Deleting cluster backup resources", @@ -567,7 +567,7 @@ func (r *RedisReconciler) startClusterOnPremisesIPsJob(redis *v1beta1.Redis, b * func (r *RedisReconciler) startSyncJob(cluster *v1beta1.Redis) error { job := r.newSyncJob(cluster) - err := r.Scheduler.ScheduleJob(cluster.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + err := r.Scheduler.ScheduleJob(cluster.GetJobID(scheduler.SyncJob), scheduler.ClusterStatusInterval, job) if err != nil { return err } @@ -632,7 +632,7 @@ func (r *RedisReconciler) newUsersCreationJob(redis *v1beta1.Redis) scheduler.Jo } func (r *RedisReconciler) newSyncJob(redis *v1beta1.Redis) scheduler.Job { - l := log.Log.WithValues("syncJob", redis.GetJobID(scheduler.StatusChecker), "clusterID", redis.Status.ID) + l := log.Log.WithValues("syncJob", redis.GetJobID(scheduler.SyncJob), "clusterID", redis.Status.ID) return func() error { namespacedName := client.ObjectKeyFromObject(redis) @@ -642,7 +642,7 @@ func (r *RedisReconciler) newSyncJob(redis *v1beta1.Redis) scheduler.Job { "namespaced name", namespacedName) r.Scheduler.RemoveJob(redis.GetJobID(scheduler.UserCreator)) r.Scheduler.RemoveJob(redis.GetJobID(scheduler.BackupsChecker)) - r.Scheduler.RemoveJob(redis.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(redis.GetJobID(scheduler.SyncJob)) return nil } @@ -1022,7 +1022,7 @@ func (r *RedisReconciler) handleExternalDelete(ctx context.Context, redis *v1bet r.Scheduler.RemoveJob(redis.GetJobID(scheduler.BackupsChecker)) r.Scheduler.RemoveJob(redis.GetJobID(scheduler.UserCreator)) - r.Scheduler.RemoveJob(redis.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(redis.GetJobID(scheduler.SyncJob)) return nil } diff --git a/controllers/clusters/zookeeper_controller.go b/controllers/clusters/zookeeper_controller.go index 369b5f142..c64d0bb14 100644 --- a/controllers/clusters/zookeeper_controller.go +++ b/controllers/clusters/zookeeper_controller.go @@ -18,7 +18,9 @@ package clusters import ( "context" + "encoding/json" "errors" + "fmt" "github.com/go-logr/logr" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -101,6 +103,52 @@ func (r *ZookeeperReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( } } +func (r *ZookeeperReconciler) createCluster(ctx context.Context, zook *v1beta1.Zookeeper, l logr.Logger) error { + l.Info("Creating zookeeper cluster", + "cluster name", zook.Spec.Name, + "data centres", zook.Spec.DataCentres) + + b, err := r.API.CreateClusterRaw(instaclustr.ZookeeperEndpoint, zook.Spec.ToInstAPI()) + if err != nil { + return fmt.Errorf("failed to create zookeeper cluster, err: %w", err) + } + + var instaModel models.ZookeeperCluster + err = json.Unmarshal(b, &instaModel) + if err != nil { + return fmt.Errorf("failed to unmarshal body to models.ZookeeperCluster, err: %w", err) + } + + patch := zook.NewPatch() + + zook.Spec.FromInstAPI(&instaModel) + zook.Annotations[models.ResourceStateAnnotation] = models.SyncingEvent + err = r.Patch(ctx, zook, patch) + if err != nil { + return fmt.Errorf("failed to patch cluster spec, err: %w", err) + } + + zook.Status.FromInstAPI(&instaModel) + err = r.Status().Patch(ctx, zook, patch) + if err != nil { + return fmt.Errorf("failed to patch cluster status, err: %w", err) + } + + l.Info("Zookeeper cluster has been created", "cluster ID", zook.Status.ID) + r.EventRecorder.Eventf( + zook, models.Normal, models.Created, + "Cluster creation request is sent. Cluster ID: %s", + zook.Status.ID, + ) + + err = r.createDefaultSecret(ctx, zook, l) + if err != nil { + return err + } + + return nil +} + func (r *ZookeeperReconciler) handleCreateCluster( ctx context.Context, zook *v1beta1.Zookeeper, @@ -110,41 +158,17 @@ func (r *ZookeeperReconciler) handleCreateCluster( l = l.WithName("Creation Event") if zook.Status.ID == "" { - l.Info("Creating zookeeper cluster", - "cluster name", zook.Spec.Name, - "data centres", zook.Spec.DataCentres) - - patch := zook.NewPatch() - - zook.Status.ID, err = r.API.CreateCluster(instaclustr.ZookeeperEndpoint, zook.Spec.ToInstAPI()) - if err != nil { - l.Error(err, "Cannot create zookeeper cluster", "spec", zook.Spec) - r.EventRecorder.Eventf( - zook, models.Warning, models.CreationFailed, - "Cluster creation on the Instaclustr is failed. Reason: %v", - err, - ) - return reconcile.Result{}, err - } - - r.EventRecorder.Eventf( - zook, models.Normal, models.Created, - "Cluster creation request is sent. Cluster ID: %s", - zook.Status.ID, - ) - - err = r.Status().Patch(ctx, zook, patch) + err := r.createCluster(ctx, zook, l) if err != nil { - l.Error(err, "Cannot patch zookeeper cluster status from the Instaclustr API", - "spec", zook.Spec) - r.EventRecorder.Eventf( - zook, models.Warning, models.PatchFailed, - "Cluster resource status patch is failed. Reason: %v", - err, + r.EventRecorder.Eventf(zook, models.Warning, models.CreationFailed, + "Failed to create Zookeeper cluster. Reason: %v", err, ) return reconcile.Result{}, err } + } + if zook.Status.State != models.DeletedStatus { + patch := zook.NewPatch() zook.Annotations[models.ResourceStateAnnotation] = models.CreatedEvent controllerutil.AddFinalizer(zook, models.DeletionFinalizer) err = r.Patch(ctx, zook, patch) @@ -158,26 +182,7 @@ func (r *ZookeeperReconciler) handleCreateCluster( return reconcile.Result{}, err } - l.Info("Zookeeper cluster has been created", "cluster ID", zook.Status.ID) - - err = r.createDefaultSecret(ctx, zook, l) - if err != nil { - l.Error(err, "Cannot create default secret for Zookeeper cluster", - "cluster name", zook.Spec.Name, - "clusterID", zook.Status.ID, - ) - r.EventRecorder.Eventf( - zook, models.Warning, models.CreationFailed, - "Default user secret creation on the Instaclustr is failed. Reason: %v", - err, - ) - - return reconcile.Result{}, err - } - } - - if zook.Status.State != models.DeletedStatus { - err = r.startClusterStatusJob(zook) + err = r.startClusterSyncJob(zook) if err != nil { l.Error(err, "Cannot start cluster status job", "zookeeper cluster ID", zook.Status.ID) @@ -191,7 +196,7 @@ func (r *ZookeeperReconciler) handleCreateCluster( r.EventRecorder.Eventf( zook, models.Normal, models.Created, - "Cluster status check job is started", + "Cluster sync job is started", ) } @@ -253,24 +258,21 @@ func (r *ZookeeperReconciler) handleUpdateCluster( ) (reconcile.Result, error) { l = l.WithName("Update Event") - iData, err := r.API.GetZookeeper(zook.Status.ID) + instaModel, err := r.API.GetZookeeper(zook.Status.ID) if err != nil { l.Error(err, "Cannot get cluster from the Instaclustr", "cluster ID", zook.Status.ID) return reconcile.Result{}, err } - iZook, err := zook.FromInstAPI(iData) - if err != nil { - l.Error(err, "Cannot convert cluster from the Instaclustr API", "cluster ID", zook.Status.ID) - return reconcile.Result{}, err - } + iZook := &v1beta1.Zookeeper{} + iZook.FromInstAPI(instaModel) if zook.Annotations[models.ExternalChangesAnnotation] == models.True || r.RateLimiter.NumRequeues(req) == rlimiter.DefaultMaxTries { return handleExternalChanges[v1beta1.ZookeeperSpec](r.EventRecorder, r.Client, zook, iZook, l) } - if zook.Spec.ClusterSettingsNeedUpdate(iZook.Spec.Cluster) { + if zook.Spec.ClusterSettingsNeedUpdate(&iZook.Spec.GenericClusterSpec) { l.Info("Updating cluster settings", "instaclustr description", iZook.Spec.Description, "instaclustr two factor delete", iZook.Spec.TwoFactorDelete) @@ -300,7 +302,7 @@ func (r *ZookeeperReconciler) handleDeleteCluster( if err != nil && !errors.Is(err, instaclustr.NotFound) { l.Error(err, "Cannot get zookeeper cluster", "cluster name", zook.Spec.Name, - "status", zook.Status.ClusterStatus.State) + "status", zook.Status.GenericStatus.State) r.EventRecorder.Eventf( zook, models.Warning, models.FetchFailed, "Cluster resource fetch from the Instaclustr API is failed. Reason: %v", @@ -366,7 +368,7 @@ func (r *ZookeeperReconciler) handleDeleteCluster( return reconcile.Result{}, err } - r.Scheduler.RemoveJob(zook.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(zook.GetJobID(scheduler.SyncJob)) controllerutil.RemoveFinalizer(zook, models.DeletionFinalizer) zook.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent err = r.Patch(ctx, zook, patch) @@ -403,10 +405,10 @@ func (r *ZookeeperReconciler) handleDeleteCluster( return models.ExitReconcile, nil } -func (r *ZookeeperReconciler) startClusterStatusJob(Zookeeper *v1beta1.Zookeeper) error { - job := r.newWatchStatusJob(Zookeeper) +func (r *ZookeeperReconciler) startClusterSyncJob(Zookeeper *v1beta1.Zookeeper) error { + job := r.newSyncJob(Zookeeper) - err := r.Scheduler.ScheduleJob(Zookeeper.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + err := r.Scheduler.ScheduleJob(Zookeeper.GetJobID(scheduler.SyncJob), scheduler.ClusterStatusInterval, job) if err != nil { return err } @@ -414,15 +416,16 @@ func (r *ZookeeperReconciler) startClusterStatusJob(Zookeeper *v1beta1.Zookeeper return nil } -func (r *ZookeeperReconciler) newWatchStatusJob(zook *v1beta1.Zookeeper) scheduler.Job { - l := log.Log.WithValues("component", "ZookeeperStatusClusterJob") +func (r *ZookeeperReconciler) newSyncJob(zook *v1beta1.Zookeeper) scheduler.Job { + l := log.Log.WithValues("syncJob", zook.GetJobID(scheduler.SyncJob), "clusterID", zook.Status.ID) + return func() error { namespacedName := client.ObjectKeyFromObject(zook) err := r.Get(context.Background(), namespacedName, zook) if k8serrors.IsNotFound(err) { l.Info("Resource is not found in the k8s cluster. Closing Instaclustr status sync.", "namespaced name", namespacedName) - r.Scheduler.RemoveJob(zook.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(zook.GetJobID(scheduler.SyncJob)) return nil } if err != nil { @@ -431,7 +434,7 @@ func (r *ZookeeperReconciler) newWatchStatusJob(zook *v1beta1.Zookeeper) schedul return err } - iData, err := r.API.GetZookeeper(zook.Status.ID) + instaModel, err := r.API.GetZookeeper(zook.Status.ID) if err != nil { if errors.Is(err, instaclustr.NotFound) { if zook.DeletionTimestamp != nil { @@ -447,21 +450,16 @@ func (r *ZookeeperReconciler) newWatchStatusJob(zook *v1beta1.Zookeeper) schedul return err } - iZook, err := zook.FromInstAPI(iData) - if err != nil { - l.Error(err, "Cannot convert cluster from the Instaclustr API", "cluster ID", zook.Status.ID) - return err - } + iZook := &v1beta1.Zookeeper{} + iZook.FromInstAPI(instaModel) - if !areStatusesEqual(&zook.Status.ClusterStatus, &iZook.Status.ClusterStatus) { - l.Info("Updating Zookeeper status", - "instaclustr status", iZook.Status, - "status", zook.Status) + if !zook.Status.Equals(&iZook.Status) { + l.Info("Updating Zookeeper status") - areDCsEqual := areDataCentresEqual(iZook.Status.ClusterStatus.DataCentres, zook.Status.ClusterStatus.DataCentres) + areDCsEqual := zook.Status.DCsEquals(iZook.Status.DataCentres) patch := zook.NewPatch() - zook.Status.ClusterStatus = iZook.Status.ClusterStatus + zook.Status.FromInstAPI(instaModel) err = r.Status().Patch(context.Background(), zook, patch) if err != nil { l.Error(err, "Cannot patch Zookeeper cluster", @@ -473,14 +471,14 @@ func (r *ZookeeperReconciler) newWatchStatusJob(zook *v1beta1.Zookeeper) schedul if !areDCsEqual { var nodes []*v1beta1.Node - for _, dc := range iZook.Status.ClusterStatus.DataCentres { + for _, dc := range iZook.Status.DataCentres { nodes = append(nodes, dc.Nodes...) } err = exposeservice.Create(r.Client, zook.Name, zook.Namespace, - zook.Spec.PrivateNetworkCluster, + zook.Spec.PrivateNetwork, nodes, models.ZookeeperConnectionPort) if err != nil { @@ -552,7 +550,7 @@ func (r *ZookeeperReconciler) handleExternalDelete(ctx context.Context, zook *v1 r.EventRecorder.Eventf(zook, models.Warning, models.ExternalDeleted, instaclustr.MsgInstaclustrResourceNotFound) r.Scheduler.RemoveJob(zook.GetJobID(scheduler.BackupsChecker)) - r.Scheduler.RemoveJob(zook.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(zook.GetJobID(scheduler.SyncJob)) return nil } @@ -577,6 +575,10 @@ func (r *ZookeeperReconciler) SetupWithManager(mgr ctrl.Manager) error { return true } + if newObj.Status.ID == "" && newObj.Annotations[models.ResourceStateAnnotation] == models.SyncingEvent { + return false + } + if newObj.Status.ID == "" { newObj.Annotations[models.ResourceStateAnnotation] = models.CreatingEvent return true @@ -607,7 +609,7 @@ func (r *ZookeeperReconciler) reconcileMaintenanceEvents(ctx context.Context, z return err } - if !z.Status.AreMaintenanceEventStatusesEqual(iMEStatuses) { + if !z.Status.MaintenanceEventsEqual(iMEStatuses) { patch := z.NewPatch() z.Status.MaintenanceEvents = iMEStatuses err = r.Status().Patch(ctx, z, patch) diff --git a/controllers/kafkamanagement/mirror_controller.go b/controllers/kafkamanagement/mirror_controller.go index 4b5677de0..752d8124b 100644 --- a/controllers/kafkamanagement/mirror_controller.go +++ b/controllers/kafkamanagement/mirror_controller.go @@ -231,7 +231,7 @@ func (r *MirrorReconciler) handleDeleteMirror( } patch := mirror.NewPatch() - r.Scheduler.RemoveJob(mirror.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(mirror.GetJobID(scheduler.SyncJob)) mirror.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent controllerutil.RemoveFinalizer(mirror, models.DeletionFinalizer) err = r.Patch(ctx, mirror, patch) @@ -256,7 +256,7 @@ func (r *MirrorReconciler) handleDeleteMirror( func (r *MirrorReconciler) startClusterStatusJob(mirror *v1beta1.Mirror) error { job := r.newWatchStatusJob(mirror) - err := r.Scheduler.ScheduleJob(mirror.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + err := r.Scheduler.ScheduleJob(mirror.GetJobID(scheduler.SyncJob), scheduler.ClusterStatusInterval, job) if err != nil { return err } @@ -275,7 +275,7 @@ func (r *MirrorReconciler) newWatchStatusJob(mirror *v1beta1.Mirror) scheduler.J r.EventRecorder.Eventf(mirror, models.Normal, models.Deleted, "Mirror is not found in the k8s cluster. Closing Instaclustr status sync.") - r.Scheduler.RemoveJob(mirror.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(mirror.GetJobID(scheduler.SyncJob)) return nil } if err != nil { diff --git a/pkg/instaclustr/client.go b/pkg/instaclustr/client.go index da6611529..8ad362ee0 100644 --- a/pkg/instaclustr/client.go +++ b/pkg/instaclustr/client.go @@ -504,7 +504,7 @@ func (c *Client) UpdateKafkaConnect(id string, kc models.KafkaConnectAPIUpdate) return nil } -func (c *Client) GetZookeeper(id string) ([]byte, error) { +func (c *Client) GetZookeeper(id string) (*models.ZookeeperCluster, error) { url := c.serverHostname + ZookeeperEndpoint + id resp, err := c.DoRequest(url, http.MethodGet, nil) @@ -526,7 +526,13 @@ func (c *Client) GetZookeeper(id string) ([]byte, error) { return nil, fmt.Errorf("status code: %d, message: %s", resp.StatusCode, body) } - return body, nil + var cluster models.ZookeeperCluster + err = json.Unmarshal(body, &cluster) + if err != nil { + return nil, err + } + + return &cluster, nil } func (c *Client) UpdateDescriptionAndTwoFactorDelete(clusterEndpoint, clusterID, description string, twoFactorDelete *v1beta1.TwoFactorDelete) error { diff --git a/pkg/instaclustr/interfaces.go b/pkg/instaclustr/interfaces.go index 146462081..72b679645 100644 --- a/pkg/instaclustr/interfaces.go +++ b/pkg/instaclustr/interfaces.go @@ -84,7 +84,7 @@ type API interface { GetKafka(id string) (*models.KafkaCluster, error) GetKafkaConnect(id string) (*models.KafkaConnectCluster, error) UpdateKafkaConnect(id string, kc models.KafkaConnectAPIUpdate) error - GetZookeeper(id string) ([]byte, error) + GetZookeeper(id string) (*models.ZookeeperCluster, error) RestoreCluster(restoreData any, clusterKind string) (string, error) GetPostgreSQL(id string) (*models.PGCluster, error) UpdatePostgreSQL(id string, r *models.PGClusterUpdate) error diff --git a/pkg/instaclustr/mock/client.go b/pkg/instaclustr/mock/client.go index 99fdebea2..fb6da34f1 100644 --- a/pkg/instaclustr/mock/client.go +++ b/pkg/instaclustr/mock/client.go @@ -314,7 +314,7 @@ func (c *mockClient) GetKafkaConnect(id string) (*models.KafkaConnectCluster, er panic("GetKafkaConnect: is not implemented") } -func (c *mockClient) GetZookeeper(id string) ([]byte, error) { +func (c *mockClient) GetZookeeper(id string) (*models.ZookeeperCluster, error) { panic("GetZookeeper: is not implemented") } diff --git a/pkg/models/redis_apiv2.go b/pkg/models/redis_apiv2.go index 003d88a40..a91481135 100644 --- a/pkg/models/redis_apiv2.go +++ b/pkg/models/redis_apiv2.go @@ -32,8 +32,8 @@ type RedisDataCentre struct { NodeSize string `json:"nodeSize"` MasterNodes int `json:"masterNodes"` - ReplicaNodes int `json:"replicaNodes,omitempty"` - ReplicationFactor int `json:"replicationFactor,omitempty"` + ReplicaNodes int `json:"replicaNodes"` + ReplicationFactor int `json:"replicationFactor"` Nodes []*Node `json:"nodes,omitempty"` PrivateLink []*PrivateLink `json:"privateLink,omitempty"` diff --git a/pkg/models/zookeeper_apiv2.go b/pkg/models/zookeeper_apiv2.go index 1645b6184..17d86293d 100644 --- a/pkg/models/zookeeper_apiv2.go +++ b/pkg/models/zookeeper_apiv2.go @@ -17,21 +17,19 @@ limitations under the License. package models type ZookeeperCluster struct { - ID string `json:"id,omitempty"` - Name string `json:"name"` - ZookeeperVersion string `json:"zookeeperVersion,omitempty"` - CurrentClusterOperationStatus string `json:"currentClusterOperationStatus,omitempty"` - Status string `json:"status,omitempty"` - PrivateNetworkCluster bool `json:"privateNetworkCluster"` - SLATier string `json:"slaTier"` - TwoFactorDelete []*TwoFactorDelete `json:"twoFactorDelete,omitempty"` - DataCentres []*ZookeeperDataCentre `json:"dataCentres"` - Description string `json:"description,omitempty"` + GenericClusterFields `json:",inline"` + + ZookeeperVersion string `json:"zookeeperVersion,omitempty"` + DataCentres []*ZookeeperDataCentre `json:"dataCentres"` } type ZookeeperDataCentre struct { - DataCentre `json:",inline"` + GenericDataCentreFields `json:",inline"` + + NumberOfNodes int `json:"numberOfNodes"` + NodeSize string `json:"nodeSize"` ClientToServerEncryption bool `json:"clientToServerEncryption"` - EnforceAuthSchemes []string `json:"enforceAuthSchemes,omitempty"` EnforceAuthEnabled bool `json:"enforceAuthEnabled"` + EnforceAuthSchemes []string `json:"enforceAuthSchemes,omitempty"` + Nodes []*Node `json:"nodes,omitempty"` } diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index 2c0d5cbd2..0f1523667 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -29,7 +29,7 @@ var ClusterBackupsInterval time.Duration var UserCreationInterval time.Duration const ( - StatusChecker = "statusChecker" + SyncJob = "sync" BackupsChecker = "backupsChecker" UserCreator = "userCreator" OnPremisesIPsChecker = "onPremisesIPsChecker"