diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 83e6d2647..681c985b3 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -18,6 +18,11 @@ env: AWS_REGION: us-west-2 AWS_ACCESS_KEY_ID: ${{ secrets.CI_AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.CI_AWS_SECRET_ACCESS_KEY }} + AZURE_SUBSCRIPTION_ID: ${{ secrets.CI_AZURE_SUBSCRIPTION_ID }} + AZURE_TENANT_ID: ${{ secrets.CI_AZURE_TENANT_ID }} + AZURE_CLIENT_ID: ${{ secrets.CI_AZURE_CLIENT_ID }} + AZURE_CLIENT_SECRET: ${{ secrets.CI_AZURE_CLIENT_SECRET }} + NAMESPACE: hmc-system jobs: e2etest: diff --git a/Makefile b/Makefile index de7e2a440..bccee5864 100644 --- a/Makefile +++ b/Makefile @@ -111,7 +111,7 @@ test: generate-all fmt vet envtest tidy external-crd ## Run tests. # compatibility with other vendors. .PHONY: test-e2e # Run the e2e tests using a Kind k8s instance as the management cluster. test-e2e: cli-install - KIND_CLUSTER_NAME="hmc-test" KIND_VERSION=$(KIND_VERSION) go test ./test/e2e/ -v -ginkgo.v -timeout=2h + KIND_CLUSTER_NAME="hmc-test" KIND_VERSION=$(KIND_VERSION) go test ./test/e2e/ -v -ginkgo.v -timeout=3h .PHONY: lint lint: golangci-lint ## Run golangci-lint linter & yamllint @@ -269,7 +269,11 @@ helm-push: helm-package chart_version=$$(echo $$base | grep -o "v\{0,1\}[0-9]\+\.[0-9]\+\.[0-9].*"); \ chart_name="$${base%-"$$chart_version"}"; \ echo "Verifying if chart $$chart_name, version $$chart_version already exists in $(REGISTRY_REPO)"; \ - chart_exists=$$($(HELM) pull $$repo_flag $(REGISTRY_REPO) $$chart_name --version $$chart_version --destination /tmp 2>&1 | grep "not found" || true); \ + if $(REGISTRY_IS_OCI); then \ + chart_exists=$$($(HELM) pull $$repo_flag $(REGISTRY_REPO)/$$chart_name --version $$chart_version --destination /tmp 2>&1 | grep "not found" || true); \ + else \ + chart_exists=$$($(HELM) pull $$repo_flag $(REGISTRY_REPO) $$chart_name --version $$chart_version --destination /tmp 2>&1 | grep "not found" || true); \ + fi; \ if [ -z "$$chart_exists" ]; then \ echo "Chart $$chart_name version $$chart_version already exists in the repository."; \ else \ diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index c0e06347a..3faf6420d 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -34,6 +34,7 @@ import ( "github.com/Mirantis/hmc/test/kubeclient" "github.com/Mirantis/hmc/test/managedcluster" "github.com/Mirantis/hmc/test/managedcluster/aws" + "github.com/Mirantis/hmc/test/managedcluster/azure" "github.com/Mirantis/hmc/test/managedcluster/vsphere" "github.com/Mirantis/hmc/test/utils" ) @@ -179,7 +180,7 @@ var _ = Describe("controller", Ordered, func() { // Populate the environment variables required for the hosted // cluster. - aws.PopulateHostedTemplateVars(context.Background(), kc) + aws.PopulateHostedTemplateVars(context.Background(), kc, clusterName) templateBy(managedcluster.TemplateAWSHostedCP, "creating a ManagedCluster") hd := managedcluster.GetUnstructured(managedcluster.TemplateAWSHostedCP) @@ -211,7 +212,7 @@ var _ = Describe("controller", Ordered, func() { ) Eventually(func() error { return deploymentValidator.Validate(context.Background(), standaloneClient) - }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + }).WithTimeout(60 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) // Delete the hosted ManagedCluster and verify it is removed. templateBy(managedcluster.TemplateAWSHostedCP, "deleting the ManagedCluster") @@ -309,7 +310,7 @@ var _ = Describe("controller", Ordered, func() { ) Eventually(func() error { return deploymentValidator.Validate(context.Background(), kc) - }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + }).WithTimeout(60 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) deletionValidator := managedcluster.NewProviderValidator( managedcluster.TemplateVSphereStandaloneCP, @@ -325,8 +326,165 @@ var _ = Describe("controller", Ordered, func() { }) }) + Describe("Azure Templates", Label("provider"), func() { + var ( + kc *kubeclient.KubeClient + standaloneClient *kubeclient.KubeClient + standaloneDeleteFunc func() error + hostedDeleteFunc func() error + kubecfgDeleteFunc func() error + sdName string + ) + + BeforeAll(func() { + By("ensuring Azure credentials are set") + kc = kubeclient.NewFromLocal(namespace) + azure.CreateCredentialSecret(context.Background(), kc) + }) + + AfterEach(func() { + // If we failed collect logs from each of the affiliated controllers + // as well as the output of clusterctl to store as artifacts. + if CurrentSpecReport().Failed() && !noCleanup() { + By("collecting failure logs from controllers") + if kc != nil { + collectLogArtifacts(kc, sdName, managedcluster.ProviderAzure, managedcluster.ProviderCAPI) + } + if standaloneClient != nil { + collectLogArtifacts(standaloneClient, sdName, managedcluster.ProviderAzure, managedcluster.ProviderCAPI) + } + + By("deleting resources after failure") + for _, deleteFunc := range []func() error{ + kubecfgDeleteFunc, + hostedDeleteFunc, + standaloneDeleteFunc, + } { + if deleteFunc != nil { + err := deleteFunc() + Expect(err).NotTo(HaveOccurred()) + } + } + } + }) + + It("should work with an Azure provider", func() { + templateBy(managedcluster.TemplateAzureStandaloneCP, "creating a ManagedCluster") + sd := managedcluster.GetUnstructured(managedcluster.TemplateAzureStandaloneCP) + sdName = sd.GetName() + + standaloneDeleteFunc := kc.CreateManagedCluster(context.Background(), sd) + + // verify the standalone cluster is deployed correctly + deploymentValidator := managedcluster.NewProviderValidator( + managedcluster.TemplateAzureStandaloneCP, + sdName, + managedcluster.ValidationActionDeploy, + ) + + templateBy(managedcluster.TemplateAzureStandaloneCP, "waiting for infrastructure provider to deploy successfully") + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), kc) + }).WithTimeout(90 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + // setup environment variables for deploying the hosted template (subnet name, etc) + azure.SetAzureEnvironmentVariables(sdName, kc) + + hd := managedcluster.GetUnstructured(managedcluster.TemplateAzureHostedCP) + hdName := hd.GetName() + + var kubeCfgPath string + kubeCfgPath, kubecfgDeleteFunc = kc.WriteKubeconfig(context.Background(), sdName) + + By("Deploy onto standalone cluster") + deployOnAzureCluster(kubeCfgPath) + + templateBy(managedcluster.TemplateAzureHostedCP, "creating a ManagedCluster") + standaloneClient = kc.NewFromCluster(context.Background(), namespace, sdName) + // verify the cluster is ready prior to creating credentials + Eventually(func() error { + err := verifyControllersUp(standaloneClient, managedcluster.ProviderAzure) + if err != nil { + _, _ = fmt.Fprintf(GinkgoWriter, "Controller validation failed: %v\n", err) + return err + } + return nil + }).WithTimeout(15 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + By("Create azure credential secret") + azure.CreateCredentialSecret(context.Background(), standaloneClient) + + templateBy(managedcluster.TemplateAzureHostedCP, + fmt.Sprintf("creating a Deployment using template %s", managedcluster.TemplateAzureHostedCP)) + hostedDeleteFunc = standaloneClient.CreateManagedCluster(context.Background(), hd) + + templateBy(managedcluster.TemplateAzureHostedCP, "waiting for infrastructure to deploy successfully") + + deploymentValidator = managedcluster.NewProviderValidator( + managedcluster.TemplateAzureStandaloneCP, + hdName, + managedcluster.ValidationActionDeploy, + ) + + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), standaloneClient) + }).WithTimeout(90 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + By("verify the deployment deletes successfully") + err := hostedDeleteFunc() + Expect(err).NotTo(HaveOccurred()) + + err = standaloneDeleteFunc() + Expect(err).NotTo(HaveOccurred()) + + deploymentValidator = managedcluster.NewProviderValidator( + managedcluster.TemplateAzureHostedCP, + hdName, + managedcluster.ValidationActionDelete, + ) + + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), standaloneClient) + }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + deploymentValidator = managedcluster.NewProviderValidator( + managedcluster.TemplateAzureStandaloneCP, + hdName, + managedcluster.ValidationActionDelete, + ) + + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), kc) + }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + }) + }) }) +func deployOnAzureCluster(kubeCfgPath string) { + GinkgoT().Helper() + GinkgoT().Setenv("KUBECONFIG", kubeCfgPath) + cmd := exec.Command("kubectl", "create", "-f", + "https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/deploy/example/"+ + "storageclass-azuredisk-csi.yaml") + _, err := utils.Run(cmd) + Expect(err).NotTo(HaveOccurred()) + + cmd = exec.Command("kubectl", "patch", "storageclass", "managed-csi", "-p", + "{\"metadata\": {\"annotations\":{\"storageclass.kubernetes.io/is-default-class\":\"true\"}}}") + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred()) + + cmd = exec.Command("make", "dev-deploy") + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred()) + + cmd = exec.Command("make", "dev-templates") + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred()) + Expect(os.Unsetenv("KUBECONFIG")).To(Succeed()) +} + // templateBy wraps a Ginkgo By with a block describing the template being // tested. func templateBy(t managedcluster.Template, description string) { diff --git a/test/managedcluster/aws/aws.go b/test/managedcluster/aws/aws.go index 11cd276ba..6c6094628 100644 --- a/test/managedcluster/aws/aws.go +++ b/test/managedcluster/aws/aws.go @@ -19,7 +19,6 @@ package aws import ( "context" "encoding/json" - "os" "os/exec" corev1 "k8s.io/api/core/v1" @@ -70,11 +69,11 @@ func CreateCredentialSecret(ctx context.Context, kc *kubeclient.KubeClient) { // PopulateHostedTemplateVars populates the environment variables required for // the AWS hosted CP template by querying the standalone CP cluster with the // given kubeclient. -func PopulateHostedTemplateVars(ctx context.Context, kc *kubeclient.KubeClient) { +func PopulateHostedTemplateVars(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) { GinkgoHelper() c := getAWSClusterClient(kc) - awsCluster, err := c.Get(ctx, os.Getenv(managedcluster.EnvVarManagedClusterName), metav1.GetOptions{}) + awsCluster, err := c.Get(ctx, clusterName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred(), "failed to get AWS cluster") vpcID, found, err := unstructured.NestedString(awsCluster.Object, "spec", "network", "vpc", "id") diff --git a/test/managedcluster/azure/azure.go b/test/managedcluster/azure/azure.go new file mode 100644 index 000000000..80dff3299 --- /dev/null +++ b/test/managedcluster/azure/azure.go @@ -0,0 +1,152 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package azure + +import ( + "bufio" + "bytes" + "context" + "errors" + "io" + "os" + + "github.com/a8m/envsubst" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/yaml" + yamlutil "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/discovery" + "k8s.io/client-go/restmapper" + + hmc "github.com/Mirantis/hmc/api/v1alpha1" + "github.com/Mirantis/hmc/test/kubeclient" +) + +func getAzureInfo(ctx context.Context, name string, kc *kubeclient.KubeClient) map[string]interface{} { + GinkgoHelper() + resourceId := schema.GroupVersionResource{ + Group: "infrastructure.cluster.x-k8s.io", + Version: "v1beta1", + Resource: "azureclusters", + } + + dc := kc.GetDynamicClient(resourceId) + list, err := dc.List(ctx, metav1.ListOptions{ + LabelSelector: labels.SelectorFromSet(map[string]string{hmc.FluxHelmChartNameKey: name}).String(), + }) + + Expect(err).NotTo(HaveOccurred()) + Expect(len(list.Items)).NotTo(BeEquivalentTo(0)) + + spec, found, err := unstructured.NestedMap(list.Items[0].Object, "spec") + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue()) + return spec +} + +func SetAzureEnvironmentVariables(clusterName string, kc *kubeclient.KubeClient) { + GinkgoHelper() + spec := getAzureInfo(context.Background(), clusterName, kc) + + networkSpec, found, err := unstructured.NestedMap(spec, "networkSpec") + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue()) + + vnet, found, err := unstructured.NestedMap(networkSpec, "vnet") + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue()) + vnetName, ok := vnet["name"].(string) + Expect(ok).To(BeTrue()) + GinkgoT().Setenv("AZURE_VM_NET_NAME", vnetName) + + subnets, found, err := unstructured.NestedSlice(networkSpec, "subnets") + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue()) + + resourceGroup := spec["resourceGroup"] + GinkgoT().Setenv("AZURE_RESOURCE_GROUP", resourceGroup.(string)) + subnetMap, ok := subnets[0].(map[string]interface{}) + Expect(ok).To(BeTrue()) + subnetName := subnetMap["name"] + GinkgoT().Setenv("AZURE_NODE_SUBNET", subnetName.(string)) + + securityGroup, found, err := unstructured.NestedMap(subnetMap, "securityGroup") + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue()) + securityGroupName := securityGroup["name"] + GinkgoT().Setenv("AZURE_SECURITY_GROUP", securityGroupName.(string)) + + routeTable, found, err := unstructured.NestedMap(subnetMap, "routeTable") + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue()) + routeTableName := routeTable["name"] + GinkgoT().Setenv("AZURE_ROUTE_TABLE", routeTableName.(string)) +} + +func CreateCredentialSecret(ctx context.Context, kc *kubeclient.KubeClient) { + GinkgoHelper() + serializer := yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme) + yamlFile, err := os.ReadFile("config/dev/azure-credentials.yaml") + Expect(err).NotTo(HaveOccurred()) + + yamlFile, err = envsubst.Bytes(yamlFile) + Expect(err).NotTo(HaveOccurred()) + + c := discovery.NewDiscoveryClientForConfigOrDie(kc.Config) + groupResources, err := restmapper.GetAPIGroupResources(c) + Expect(err).NotTo(HaveOccurred()) + + yamlReader := yamlutil.NewYAMLReader(bufio.NewReader(bytes.NewReader(yamlFile))) + for { + yamlDoc, err := yamlReader.Read() + + if err != nil { + if errors.Is(err, io.EOF) { + break + } + Expect(err).NotTo(HaveOccurred(), "failed to read yaml file") + } + + credentialResource := &unstructured.Unstructured{} + _, _, err = serializer.Decode(yamlDoc, nil, credentialResource) + Expect(err).NotTo(HaveOccurred(), "failed to parse credential resource") + + mapper := restmapper.NewDiscoveryRESTMapper(groupResources) + mapping, err := mapper.RESTMapping(credentialResource.GroupVersionKind().GroupKind()) + Expect(err).NotTo(HaveOccurred(), "failed to get rest mapping") + + dc := kc.GetDynamicClient(schema.GroupVersionResource{ + Group: credentialResource.GroupVersionKind().Group, + Version: credentialResource.GroupVersionKind().Version, + Resource: mapping.Resource.Resource, + }) + + exists, err := dc.Get(ctx, credentialResource.GetName(), metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + Expect(err).NotTo(HaveOccurred(), "failed to get azure credential secret") + } + + if exists == nil { + if _, err = dc.Create(ctx, credentialResource, metav1.CreateOptions{}); err != nil { + Expect(err).NotTo(HaveOccurred(), "failed to create azure credential secret") + } + } + } +} diff --git a/test/managedcluster/managedcluster.go b/test/managedcluster/managedcluster.go index 2a08b53e7..4259f63f2 100644 --- a/test/managedcluster/managedcluster.go +++ b/test/managedcluster/managedcluster.go @@ -26,6 +26,8 @@ import ( . "github.com/onsi/gomega" "gopkg.in/yaml.v3" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + "github.com/Mirantis/hmc/internal/utils" ) type ProviderType string @@ -44,6 +46,8 @@ type Template string const ( TemplateAWSStandaloneCP Template = "aws-standalone-cp" TemplateAWSHostedCP Template = "aws-hosted-cp" + TemplateAzureHostedCP Template = "azure-hosted-cp" + TemplateAzureStandaloneCP Template = "azure-standalone-cp" TemplateVSphereStandaloneCP Template = "vsphere-standalone-cp" TemplateVSphereHostedCP Template = "vsphere-hosted-cp" ) @@ -54,6 +58,12 @@ var awsStandaloneCPManagedClusterTemplateBytes []byte //go:embed resources/aws-hosted-cp.yaml.tpl var awsHostedCPManagedClusterTemplateBytes []byte +//go:embed resources/azure-standalone-cp.yaml.tpl +var azureStandaloneCPManagedClusterTemplateBytes []byte + +//go:embed resources/azure-hosted-cp.yaml.tpl +var azureHostedCPManagedClusterTemplateBytes []byte + //go:embed resources/vsphere-standalone-cp.yaml.tpl var vsphereStandaloneCPManagedClusterTemplateBytes []byte @@ -71,7 +81,7 @@ func GetUnstructured(templateName Template) *unstructured.Unstructured { generatedName := os.Getenv(EnvVarManagedClusterName) if generatedName == "" { - generatedName = uuid.New().String()[:8] + "-e2e-test" + generatedName = "e2e-test-" + uuid.New().String()[:8] _, _ = fmt.Fprintf(GinkgoWriter, "Generated cluster name: %q\n", generatedName) GinkgoT().Setenv(EnvVarManagedClusterName, generatedName) } else { @@ -104,10 +114,15 @@ func GetUnstructured(templateName Template) *unstructured.Unstructured { managedClusterTemplateBytes = vsphereStandaloneCPManagedClusterTemplateBytes case TemplateVSphereHostedCP: managedClusterTemplateBytes = vsphereHostedCPManagedClusterTemplateBytes + case TemplateAzureHostedCP: + managedClusterTemplateBytes = azureHostedCPManagedClusterTemplateBytes + case TemplateAzureStandaloneCP: + managedClusterTemplateBytes = azureStandaloneCPManagedClusterTemplateBytes default: - Fail(fmt.Sprintf("unsupported AWS template: %s", templateName)) + Fail(fmt.Sprintf("unsupported template: %s", templateName)) } + Expect(os.Setenv("NAMESPACE", utils.DefaultSystemNamespace)).NotTo(HaveOccurred()) managedClusterConfigBytes, err := envsubst.Bytes(managedClusterTemplateBytes) Expect(err).NotTo(HaveOccurred(), "failed to substitute environment variables") diff --git a/test/managedcluster/providervalidator.go b/test/managedcluster/providervalidator.go index fd474253c..2deae8ff7 100644 --- a/test/managedcluster/providervalidator.go +++ b/test/managedcluster/providervalidator.go @@ -64,6 +64,8 @@ func NewProviderValidator(template Template, clusterName string, action Validati case TemplateAWSStandaloneCP, TemplateAWSHostedCP: resourcesToValidate["ccm"] = validateCCM resourceOrder = append(resourceOrder, "ccm") + case TemplateAzureStandaloneCP, TemplateVSphereHostedCP: + delete(resourcesToValidate, "csi-driver") } } else { resourcesToValidate = map[string]resourceValidationFunc{ diff --git a/test/managedcluster/resources/aws-standalone-cp.yaml.tpl b/test/managedcluster/resources/aws-standalone-cp.yaml.tpl index 0d107ca43..8b8943c2b 100644 --- a/test/managedcluster/resources/aws-standalone-cp.yaml.tpl +++ b/test/managedcluster/resources/aws-standalone-cp.yaml.tpl @@ -1,7 +1,7 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ManagedCluster metadata: - name: ${MANAGED_CLUSTER_NAME} + name: ${MANAGED_CLUSTER_NAME}-aws spec: template: aws-standalone-cp config: diff --git a/test/managedcluster/resources/azure-hosted-cp.yaml.tpl b/test/managedcluster/resources/azure-hosted-cp.yaml.tpl new file mode 100644 index 000000000..6b8f7ad97 --- /dev/null +++ b/test/managedcluster/resources/azure-hosted-cp.yaml.tpl @@ -0,0 +1,23 @@ +apiVersion: hmc.mirantis.com/v1alpha1 +kind: ManagedCluster +metadata: + name: ${MANAGED_CLUSTER_NAME}-azure + namespace: ${NAMESPACE} +spec: + template: azure-hosted-cp + config: + location: "westus" + subscriptionID: "${AZURE_SUBSCRIPTION_ID}" + vmSize: Standard_A4_v2 + clusterIdentity: + name: azure-cluster-identity + namespace: hmc-system + resourceGroup: "${AZURE_RESOURCE_GROUP}" + network: + vnetName: "${AZURE_VM_NET_NAME}" + nodeSubnetName: "${AZURE_NODE_SUBNET}" + routeTableName: "${AZURE_ROUTE_TABLE}" + securityGroupName: "${AZURE_SECURITY_GROUP}" + tenantID: "${AZURE_TENANT_ID}" + clientID: "${AZURE_CLIENT_ID}" + clientSecret: "${AZURE_CLIENT_SECRET}" diff --git a/test/managedcluster/resources/azure-standalone-cp.yaml.tpl b/test/managedcluster/resources/azure-standalone-cp.yaml.tpl new file mode 100644 index 000000000..44d5abf60 --- /dev/null +++ b/test/managedcluster/resources/azure-standalone-cp.yaml.tpl @@ -0,0 +1,22 @@ +apiVersion: hmc.mirantis.com/v1alpha1 +kind: ManagedCluster +metadata: + name: ${MANAGED_CLUSTER_NAME}-azure + namespace: ${NAMESPACE} +spec: + template: azure-standalone-cp + config: + controlPlaneNumber: 1 + workersNumber: 1 + location: "westus" + subscriptionID: "${AZURE_SUBSCRIPTION_ID}" + controlPlane: + vmSize: Standard_A4_v2 + worker: + vmSize: Standard_A4_v2 + clusterIdentity: + name: azure-cluster-identity + namespace: ${NAMESPACE} + tenantID: "${AZURE_TENANT_ID}" + clientID: "${AZURE_CLIENT_ID}" + clientSecret: "${AZURE_CLIENT_SECRET}" diff --git a/test/managedcluster/resources/vsphere-hosted-cp.yaml.tpl b/test/managedcluster/resources/vsphere-hosted-cp.yaml.tpl index 2c556d9cc..a4c328b77 100644 --- a/test/managedcluster/resources/vsphere-hosted-cp.yaml.tpl +++ b/test/managedcluster/resources/vsphere-hosted-cp.yaml.tpl @@ -1,7 +1,7 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ManagedCluster metadata: - name: ${MANAGED_CLUSTER_NAME} + name: ${MANAGED_CLUSTER_NAME}-vsphere spec: template: vsphere-hosted-cp config: diff --git a/test/managedcluster/resources/vsphere-standalone-cp.yaml.tpl b/test/managedcluster/resources/vsphere-standalone-cp.yaml.tpl index 98d193257..81eb8edf3 100644 --- a/test/managedcluster/resources/vsphere-standalone-cp.yaml.tpl +++ b/test/managedcluster/resources/vsphere-standalone-cp.yaml.tpl @@ -1,7 +1,7 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ManagedCluster metadata: - name: ${MANAGED_CLUSTER_NAME} + name: ${MANAGED_CLUSTER_NAME}-vsphere spec: template: vsphere-standalone-cp config: diff --git a/test/managedcluster/validate_deployed.go b/test/managedcluster/validate_deployed.go index b80d84204..5a065338b 100644 --- a/test/managedcluster/validate_deployed.go +++ b/test/managedcluster/validate_deployed.go @@ -247,7 +247,7 @@ func validateCCM(ctx context.Context, kc *kubeclient.KubeClient, clusterName str } for _, i := range service.Status.LoadBalancer.Ingress { - if i.Hostname != "" { + if i.Hostname != "" || i.IP != "" { return nil } }