Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add a simple e2e test for the presence of consumed capacity #1079

Open
wants to merge 12 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
module github.com/codeready-toolchain/toolchain-e2e

require (
github.com/codeready-toolchain/api v0.0.0-20241114213029-44333bf24bcf
github.com/codeready-toolchain/api v0.0.0-20241119094246-f6581d52dc80
github.com/codeready-toolchain/toolchain-common v0.0.0-20241114215157-a6a85252b2f5
github.com/davecgh/go-spew v1.1.1
github.com/fatih/color v1.15.0
Expand Down
4 changes: 2 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -119,8 +119,8 @@ github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:z
github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
github.com/codeready-toolchain/api v0.0.0-20241114213029-44333bf24bcf h1:tOHKd4PT6gnV8lLh3kmqqK9YONvL6oFKHpi0kGzfsvw=
github.com/codeready-toolchain/api v0.0.0-20241114213029-44333bf24bcf/go.mod h1:DUq1ffy9Mbersdgji48i/cm9Y+6NMwAdAQJNlfOrPRo=
github.com/codeready-toolchain/api v0.0.0-20241119094246-f6581d52dc80 h1:OpZkP3OGAdrDHOb1TtHVnLSVuevEiQhOH//plnpVL/c=
github.com/codeready-toolchain/api v0.0.0-20241119094246-f6581d52dc80/go.mod h1:DUq1ffy9Mbersdgji48i/cm9Y+6NMwAdAQJNlfOrPRo=
github.com/codeready-toolchain/toolchain-common v0.0.0-20241114215157-a6a85252b2f5 h1:vW0C32c6sI9ZUGcUw3e9ftE9hqJ/bMo+TtRHp84Hung=
github.com/codeready-toolchain/toolchain-common v0.0.0-20241114215157-a6a85252b2f5/go.mod h1:wx/d4HVbDPOadwpbxn28ZGClC5OmzelIK8p4wupDJVI=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
Expand Down
127 changes: 17 additions & 110 deletions test/e2e/parallel/spaceprovisionerconfig_test.go
Original file line number Diff line number Diff line change
@@ -1,19 +1,14 @@
package parallel

import (
"context"
"testing"

toolchainv1alpha1 "github.com/codeready-toolchain/api/api/v1alpha1"
. "github.com/codeready-toolchain/toolchain-common/pkg/test/assertions"
. "github.com/codeready-toolchain/toolchain-common/pkg/test/spaceprovisionerconfig"
. "github.com/codeready-toolchain/toolchain-e2e/testsupport"
. "github.com/codeready-toolchain/toolchain-e2e/testsupport/spaceprovisionerconfig"
"github.com/codeready-toolchain/toolchain-e2e/testsupport/util"
"github.com/codeready-toolchain/toolchain-e2e/testsupport/wait"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"sigs.k8s.io/controller-runtime/pkg/client"

"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
Expand All @@ -28,17 +23,14 @@ func TestSpaceProvisionerConfig(t *testing.T) {

t.Run("ready with existing ready cluster", func(t *testing.T) {
// given
// any ToolchainCluster in th host namespace will do. We don't really care...
// any ToolchainCluster in th host namespace will do. We don't really care, because both member1 and member2 should be ready...
cluster, err := host.WaitForToolchainCluster(t)
require.NoError(t, err)

// when
spc := CreateSpaceProvisionerConfig(t, host.Awaitility, ReferencingToolchainCluster(cluster.Name))
_, err = wait.For(t, host.Awaitility, &toolchainv1alpha1.SpaceProvisionerConfig{}).FirstThat(Has(ReferenceToToolchainCluster(cluster.Name)), Is(Ready()))

// then
_, err = wait.
For(t, host.Awaitility, &toolchainv1alpha1.SpaceProvisionerConfig{}).
WithNameThat(spc.Name, Is(Ready()))
require.NoError(t, err)
})

Expand All @@ -52,109 +44,24 @@ func TestSpaceProvisionerConfig(t *testing.T) {
WithNameThat(spc.Name, Is(NotReady()))
require.NoError(t, err)
})
t.Run("becomes ready when cluster becomes ready", func(t *testing.T) {
// given
existingCluster, err := host.WaitForToolchainCluster(t, wait.UntilToolchainClusterHasName(awaitilities.Member1().ClusterName))
require.NoError(t, err)
tc := copyClusterWithoutSecret(t, host.Awaitility, existingCluster)
spc := CreateSpaceProvisionerConfig(t, host.Awaitility, ReferencingToolchainCluster(tc.Name))

_, err = wait.
For(t, host.Awaitility, &toolchainv1alpha1.SpaceProvisionerConfig{}).
WithNameThat(spc.Name, Is(NotReady()))
require.NoError(t, err)

newSecretName := util.NewObjectNamePrefix(t) + string(uuid.NewUUID()[0:20])
wait.CopyWithCleanup(t, host.Awaitility,
client.ObjectKey{Name: existingCluster.Spec.SecretRef.Name, Namespace: existingCluster.Namespace},
client.ObjectKey{Name: newSecretName, Namespace: existingCluster.Namespace},
&corev1.Secret{})

// when
_, err = wait.For(t, host.Awaitility, &toolchainv1alpha1.ToolchainCluster{}).
Update(tc.Name, host.Namespace, func(updatedTc *toolchainv1alpha1.ToolchainCluster) {
updatedTc.Spec.SecretRef.Name = newSecretName
})
require.NoError(t, err)

// then
_, err = wait.
For(t, host.Awaitility, &toolchainv1alpha1.SpaceProvisionerConfig{}).
WithNameThat(spc.Name, Is(Ready()))
require.NoError(t, err)
})
t.Run("becomes not ready when cluster disappears", func(t *testing.T) {
// given
t.Run("contains the consumed capacity", func(t *testing.T) {
test := func(t *testing.T, memberName string) {
_, err := host.WaitForToolchainCluster(t, wait.UntilToolchainClusterHasName(memberName))
require.NoError(t, err)

// we need to create a copy of the cluster and the token secret
existingCluster, err := host.WaitForToolchainCluster(t)
require.NoError(t, err)
cluster := copyClusterWithSecret(t, host.Awaitility, existingCluster)
spc, err := wait.For(t, host.Awaitility, &toolchainv1alpha1.SpaceProvisionerConfig{}).FirstThat(Has(ReferenceToToolchainCluster(memberName)))
require.NoError(t, err)

// when
spc := CreateSpaceProvisionerConfig(t, host.Awaitility, ReferencingToolchainCluster(cluster.Name))
assert.NotNil(t, spc.Status.ConsumedCapacity)
// we can't really test much about the actual values in the consumed capacity because this is a parallel test and the surrounding tests
// may mess with the number of spaces and memory usage.
}

// then
_, err = wait.
For(t, host.Awaitility, &toolchainv1alpha1.SpaceProvisionerConfig{}).
WithNameThat(spc.Name, Is(Ready()))
require.NoError(t, err)

// when
assert.NoError(t, host.Client.Delete(context.TODO(), cluster))

// then
_, err = wait.
For(t, host.Awaitility, &toolchainv1alpha1.SpaceProvisionerConfig{}).
WithNameThat(spc.Name, Is(NotReady()))
require.NoError(t, err)
})
}

func copyClusterWithSecret(t *testing.T, a *wait.Awaitility, cluster *toolchainv1alpha1.ToolchainCluster) *toolchainv1alpha1.ToolchainCluster {
t.Helper()
clusterName := util.NewObjectNamePrefix(t) + string(uuid.NewUUID()[0:20])

// copy the secret
secret := &corev1.Secret{}
wait.CopyWithCleanup(t, a,
client.ObjectKey{
Name: cluster.Spec.SecretRef.Name,
Namespace: cluster.Namespace,
},
client.ObjectKey{
Name: clusterName,
Namespace: cluster.Namespace,
},
secret,
)

// and copy the cluster referencing the new secret
newCluster := &toolchainv1alpha1.ToolchainCluster{}
wait.CopyWithCleanup(t, a,
client.ObjectKeyFromObject(cluster),
client.ObjectKey{Name: clusterName, Namespace: cluster.Namespace},
newCluster,
func(tc *toolchainv1alpha1.ToolchainCluster) {
tc.Spec.SecretRef.Name = secret.Name
tc.Status = toolchainv1alpha1.ToolchainClusterStatus{}
t.Run("for member1", func(t *testing.T) {
test(t, awaitilities.Member1().ClusterName)
})

return newCluster
}

func copyClusterWithoutSecret(t *testing.T, a *wait.Awaitility, cluster *toolchainv1alpha1.ToolchainCluster) *toolchainv1alpha1.ToolchainCluster {
t.Helper()
newName := util.NewObjectNamePrefix(t) + string(uuid.NewUUID()[0:20])
newCluster := &toolchainv1alpha1.ToolchainCluster{}
wait.CopyWithCleanup(t, a,
client.ObjectKeyFromObject(cluster),
client.ObjectKey{Name: newName, Namespace: cluster.Namespace},
newCluster,
func(tc *toolchainv1alpha1.ToolchainCluster) {
tc.Spec.SecretRef.Name = ""
tc.Status = toolchainv1alpha1.ToolchainClusterStatus{}
t.Run("for member2", func(t *testing.T) {
test(t, awaitilities.Member2().ClusterName)
})

return newCluster
})
}
52 changes: 52 additions & 0 deletions test/e2e/usersignup_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"testing"
"time"

"github.com/codeready-toolchain/toolchain-common/pkg/test/assertions"
commonauth "github.com/codeready-toolchain/toolchain-common/pkg/test/auth"
testSpc "github.com/codeready-toolchain/toolchain-common/pkg/test/spaceprovisionerconfig"
authsupport "github.com/codeready-toolchain/toolchain-e2e/testsupport/auth"
Expand Down Expand Up @@ -280,6 +281,47 @@ func (s *userSignupIntegrationTest) TestProvisionToOtherClusterWhenOneIsFull() {
})
}

func (s *userSignupIntegrationTest) TestFillingUpClusterCapacityFlipsSPCsToNotReady() {
t := s.T()
hostAwait := s.Host()
memberAwait1 := s.Member1()
memberAwait2 := s.Member2()

spaceprovisionerconfig.UpdateForCluster(t, hostAwait.Awaitility, memberAwait1.ClusterName, testSpc.MaxNumberOfSpaces(1))
spaceprovisionerconfig.UpdateForCluster(t, hostAwait.Awaitility, memberAwait2.ClusterName, testSpc.Enabled(false))
hostAwait.UpdateToolchainConfig(t, testconfig.AutomaticApproval().Enabled(true))

t.Run("SPC with free capacity is ready", func(t *testing.T) {
// then
_, err := wait.For(t, hostAwait.Awaitility, &toolchainv1alpha1.SpaceProvisionerConfig{}).FirstThat(
assertions.Has(spaceprovisionerconfig.ReferenceToToolchainCluster(memberAwait1.ClusterName)),
assertions.Is(testSpc.Ready()))
require.NoError(t, err)

_, err = wait.For(t, hostAwait.Awaitility, &toolchainv1alpha1.SpaceProvisionerConfig{}).FirstThat(
assertions.Has(spaceprovisionerconfig.ReferenceToToolchainCluster(memberAwait2.ClusterName)),
assertions.Is(testSpc.NotReady()))
require.NoError(t, err)
})

t.Run("deploying a space fills up the member, flipping its SPC to not ready", func(t *testing.T) {
// when
user1 := NewSignupRequest(s.Awaitilities).
Username("fill-up-user-1").
Email("[email protected]").
EnsureMUR().
RequireConditions(wait.ConditionSet(wait.Default(), wait.ApprovedAutomatically())...).
Execute(s.T())

// then
VerifyResourcesProvisionedForSignup(t, s.Awaitilities, user1.UserSignup)
metlos marked this conversation as resolved.
Show resolved Hide resolved
_, err := wait.For(t, hostAwait.Awaitility, &toolchainv1alpha1.SpaceProvisionerConfig{}).FirstThat(
assertions.Has(spaceprovisionerconfig.ReferenceToToolchainCluster(memberAwait1.ClusterName)),
assertions.Is(testSpc.NotReady()))
require.NoError(t, err)
})
}

func (s *userSignupIntegrationTest) TestUserIDAndAccountIDClaimsPropagated() {
hostAwait := s.Host()

Expand Down Expand Up @@ -533,7 +575,12 @@ func (s *userSignupIntegrationTest) TestCapacityManagementWithManualApproval() {
Execute(s.T())
userSignup := user.UserSignup

_, spcNotReadyError := wait.For(t, hostAwait.Awaitility, &toolchainv1alpha1.SpaceProvisionerConfig{}).FirstThat(
assertions.Has(spaceprovisionerconfig.ReferenceToToolchainCluster(memberAwait1.ClusterName)),
assertions.Is(testSpc.NotReady()))

// then
require.NoError(t, spcNotReadyError)
s.userIsNotProvisioned(t, userSignup)

t.Run("reset the max number and expect the user will be provisioned", func(t *testing.T) {
Expand Down Expand Up @@ -566,7 +613,12 @@ func (s *userSignupIntegrationTest) TestCapacityManagementWithManualApproval() {
Execute(s.T())
userSignup := user.UserSignup

_, spcNotReadyError := wait.For(t, hostAwait.Awaitility, &toolchainv1alpha1.SpaceProvisionerConfig{}).FirstThat(
assertions.Has(spaceprovisionerconfig.ReferenceToToolchainCluster(memberAwait1.ClusterName)),
assertions.Is(testSpc.NotReady()))

// then
require.NoError(t, spcNotReadyError)
s.userIsNotProvisioned(t, userSignup)

t.Run("reset the threshold and expect the user will be provisioned", func(t *testing.T) {
Expand Down
23 changes: 23 additions & 0 deletions testsupport/spaceprovisionerconfig/spaceprovisionerconfig.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
"testing"

toolchainv1alpha1 "github.com/codeready-toolchain/api/api/v1alpha1"
"github.com/codeready-toolchain/toolchain-common/pkg/test/assertions"
testSpc "github.com/codeready-toolchain/toolchain-common/pkg/test/spaceprovisionerconfig"
"github.com/codeready-toolchain/toolchain-e2e/testsupport/util"
"github.com/codeready-toolchain/toolchain-e2e/testsupport/wait"
Expand All @@ -13,6 +14,10 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)

func ReferenceToToolchainCluster(clusterName string) assertions.Predicate[*toolchainv1alpha1.SpaceProvisionerConfig] {
return &referenceToToolchainCluster{clusterName: clusterName}
}

func CreateSpaceProvisionerConfig(t *testing.T, await *wait.Awaitility, opts ...testSpc.CreateOption) *toolchainv1alpha1.SpaceProvisionerConfig {
namePrefix := util.NewObjectNamePrefix(t)

Expand Down Expand Up @@ -85,3 +90,21 @@ func findSpcForCluster(spcs []toolchainv1alpha1.SpaceProvisionerConfig, clusterN
}
return nil
}

var (
_ assertions.Predicate[*toolchainv1alpha1.SpaceProvisionerConfig] = (*referenceToToolchainCluster)(nil)
_ assertions.PredicateMatchFixer[*toolchainv1alpha1.SpaceProvisionerConfig] = (*referenceToToolchainCluster)(nil)
)

type referenceToToolchainCluster struct {
clusterName string
}

func (r *referenceToToolchainCluster) FixToMatch(obj *toolchainv1alpha1.SpaceProvisionerConfig) *toolchainv1alpha1.SpaceProvisionerConfig {
obj.Spec.ToolchainCluster = r.clusterName
return obj
}

func (r *referenceToToolchainCluster) Matches(obj *toolchainv1alpha1.SpaceProvisionerConfig) bool {
return obj.Spec.ToolchainCluster == r.clusterName
}
Loading