Skip to content

Commit

Permalink
Integration Tests
Browse files Browse the repository at this point in the history
Testing the controllers with envtest.
  • Loading branch information
otaviof committed Nov 28, 2022
1 parent a22fde4 commit 7661973
Show file tree
Hide file tree
Showing 4 changed files with 363 additions and 0 deletions.
60 changes: 60 additions & 0 deletions test/integration/helper_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
package integration

import (
"context"
"time"

"github.com/onsi/gomega/types"
"github.com/shipwright-io/build/pkg/apis/build/v1alpha1"
tknv1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/client"

. "github.com/onsi/gomega"
)

var (
timeoutDefault = 30 * time.Second
zero = int64(0)
deleteNowOpts = &client.DeleteOptions{GracePeriodSeconds: &zero}
)

// eventuallyWithTimeoutFn wraps the informed function on Eventually() with default timeout.
func eventuallyWithTimeoutFn(fn func() int) types.AsyncAssertion {
return Eventually(fn).
WithPolling(time.Second).
WithTimeout(timeoutDefault)
}

// amountOfBuildRunsFn counts the amount of BuildRuns on "default" namespace.
func amountOfBuildRunsFn() int {
var brs v1alpha1.BuildRunList
err := kubeClient.List(ctx, &brs)
if err != nil {
return -1
}
return len(brs.Items)
}

// createAndUpdatePipelineRun create and update the PipelineRun in order to preserve the status
// attribute, which gets removed by envtest[0] during marshaling. This method implements the
// workaround described in the issue #1835[1].
//
// [0] https://github.com/kubernetes-sigs/controller-runtime/pull/1640
// [1] https://github.com/kubernetes-sigs/controller-runtime/issues/1835
func createAndUpdatePipelineRun(ctx context.Context, pipelineRun tknv1beta1.PipelineRun) error {
status := pipelineRun.Status.DeepCopy()

var err error
if err = kubeClient.Create(ctx, &pipelineRun); err != nil {
return err
}

var created tknv1beta1.PipelineRun
key := pipelineRun.GetNamespacedName()
if err = kubeClient.Get(ctx, key, &created); err != nil {
return err
}

created.Status = *status
return kubeClient.Status().Update(ctx, &created)
}
75 changes: 75 additions & 0 deletions test/integration/inventory_controller_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
package integration

import (
"context"

"github.com/shipwright-io/build/pkg/apis/build/v1alpha1"
"github.com/shipwright-io/triggers/test/stubs"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)

var _ = Describe("Build Inventory Controller", Ordered, func() {
// asserts the Inventory instance is being fed by the controller, therefore the Build objects
// created on the cluster will be stored accordingly, likewise when updated or removed it will be
// reflected in the Inventory
Context("Inventory reflect Build instances in the cluster", func() {
ctx := context.Background()

buildWithGitHubTrigger := stubs.ShipwrightBuildWithTriggers(
"shipwright.io/triggers",
"build-with-github-trigger",
stubs.TriggerWhenPushToMain,
)
// searchForBuildWithGitHubTriggerFn search for the Build with GitHub trigger, returns the
// amount of instances stored in the Inventory.
searchForBuildWithGitHubTriggerFn := func() int {
return len(buildInventory.SearchForGit(
v1alpha1.GitHubWebHookTrigger,
*buildWithGitHubTrigger.Spec.Source.URL,
stubs.Branch,
))
}

buildWithPipelineTrigger := stubs.ShipwrightBuildWithTriggers(
"shipwright.io/triggers",
"build-with-pipeline-trigger",
stubs.TriggerWhenPipelineSucceeded,
)
// searchForBuildWithPipelineTriggerFn search for the Build with Pipeline trigger, returns
// the amount of instances found in the Inventory.
searchForBuildWithPipelineTriggerFn := func() int {
return len(buildInventory.SearchForObjectRef(
v1alpha1.PipelineTrigger,
buildWithPipelineTrigger.Spec.Trigger.When[0].ObjectRef,
))
}

It("Should add a Build instances (with triggers)", func() {
Expect(kubeClient.Create(ctx, buildWithGitHubTrigger)).Should(Succeed())
Expect(kubeClient.Create(ctx, buildWithPipelineTrigger)).Should(Succeed())
})

It("Should find the Build (GitHub) in the Inventory", func() {
eventuallyWithTimeoutFn(searchForBuildWithGitHubTriggerFn).Should(Equal(1))
})

It("Should find the Build (GitHub) in the Inventory", func() {
eventuallyWithTimeoutFn(searchForBuildWithPipelineTriggerFn).Should(Equal(1))
})

It("Should remove the Build instances", func() {
Expect(kubeClient.Delete(ctx, buildWithGitHubTrigger, deleteNowOpts)).Should(Succeed())
Expect(kubeClient.Delete(ctx, buildWithPipelineTrigger, deleteNowOpts)).Should(Succeed())
})

It("Should not find the Build (GitHub) in the Inventory", func() {
eventuallyWithTimeoutFn(searchForBuildWithGitHubTriggerFn).Should(Equal(0))
})

It("Should not find the Build (GitHub) in the Inventory", func() {
eventuallyWithTimeoutFn(searchForBuildWithPipelineTriggerFn).Should(Equal(0))
})
})
})
123 changes: 123 additions & 0 deletions test/integration/pipelinerun_controller_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
package integration

import (
"context"
"encoding/json"
"time"

"github.com/shipwright-io/triggers/pkg/filter"
"github.com/shipwright-io/triggers/test/stubs"

tknv1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)

var _ = Describe("PipelineRun Controller", Ordered, func() {
// asserts the PIpelineRun controller which is intercepting PipelineRun instances to trigger
// BuildRuns, when a configured trigger matches the incoming object. The test scenarios also
// asserts the controller skips Custom-Tasks and incomplete PipelineRun instances
Context("PipelineRun instances will trigger BuildRuns", func() {
ctx := context.Background()

buildWithPipelineTrigger := stubs.ShipwrightBuildWithTriggers(
"shipwright.io/triggers",
"build-with-pipeline-trigger",
stubs.TriggerWhenPipelineSucceeded,
)

// amount of time to wait for the apiserver register a new object, and also wait for the
// controller actions before asserting the insistence of BuildRuns
gracefulWait := 3 * time.Second

BeforeAll(func() {
Expect(kubeClient.Create(ctx, buildWithPipelineTrigger)).Should(Succeed())
})

AfterAll(func() {
Expect(kubeClient.Delete(ctx, buildWithPipelineTrigger, deleteNowOpts)).
Should(Succeed())
})

It("PipelineRun without status recorded won't trigger a BuildRun", func() {
pipelineRun := stubs.TektonPipelineRun(stubs.PipelineNameInTrigger)
Expect(createAndUpdatePipelineRun(ctx, pipelineRun)).Should(Succeed())

time.Sleep(gracefulWait)
eventuallyWithTimeoutFn(amountOfBuildRunsFn).Should(Equal(0))

Expect(kubeClient.Delete(ctx, &pipelineRun, deleteNowOpts)).Should(Succeed())
})

It("Custom-Task PipelineRun won't trigger a BuildRun", func() {
pipelineRun := stubs.TektonPipelineRunSucceeded(stubs.PipelineNameInTrigger)
pipelineRun.Status.PipelineSpec = stubs.TektonPipelineRunStatusCustomTaskShipwright
Expect(createAndUpdatePipelineRun(ctx, pipelineRun)).Should(Succeed())

time.Sleep(gracefulWait)
eventuallyWithTimeoutFn(amountOfBuildRunsFn).Should(Equal(0))

Expect(kubeClient.Delete(ctx, &pipelineRun, deleteNowOpts)).Should(Succeed())
})

It("PipelineRun already processed won't trigger a BuildRun", func() {
pipelineRun := stubs.TektonPipelineRunSucceeded(stubs.PipelineNameInTrigger)

objectRef, err := filter.PipelineRunToObjectRef(ctx, time.Now(), &pipelineRun)
Expect(err).To(Succeed())

triggeredBuilds := []filter.TriggeredBuild{{
BuildName: buildWithPipelineTrigger.GetName(),
ObjectRef: objectRef,
}}

annotationBytes, err := json.Marshal(triggeredBuilds)
Expect(err).To(Succeed())

pipelineRun.SetAnnotations(map[string]string{
filter.TektonPipelineRunName: pipelineRun.GetName(),
filter.TektonPipelineRunTriggeredBuilds: string(annotationBytes),
})
Expect(createAndUpdatePipelineRun(ctx, pipelineRun)).Should(Succeed())

time.Sleep(gracefulWait)
eventuallyWithTimeoutFn(amountOfBuildRunsFn).Should(Equal(0))

Expect(kubeClient.Delete(ctx, &pipelineRun, deleteNowOpts)).Should(Succeed())
})

It("PipelineRun triggers a BuildRun", func() {
pipelineRun := stubs.TektonPipelineRunSucceeded(stubs.PipelineNameInTrigger)
Expect(createAndUpdatePipelineRun(ctx, pipelineRun)).Should(Succeed())

eventuallyWithTimeoutFn(amountOfBuildRunsFn).Should(Equal(1))

Eventually(func() bool {
var pr tknv1beta1.PipelineRun
if err := kubeClient.Get(ctx, pipelineRun.GetNamespacedName(), &pr); err != nil {
return false
}

objectRef, err := filter.PipelineRunToObjectRef(ctx, time.Now(), &pr)
if err != nil {
return false
}
triggeredBuilds, err := filter.PipelineRunExtractTriggeredBuildsSlice(&pr)
if err != nil {
return false
}
return filter.TriggereBuildsContainsObjectRef(
triggeredBuilds,
[]string{buildWithPipelineTrigger.GetName()},
objectRef,
)
}).
WithPolling(time.Second).
WithTimeout(30 * time.Second).
Should(BeTrue())

Expect(kubeClient.Delete(ctx, &pipelineRun, deleteNowOpts)).Should(Succeed())
})
})
})
105 changes: 105 additions & 0 deletions test/integration/suite_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
package integration

import (
"context"
"path/filepath"
"testing"
"time"

"github.com/shipwright-io/build/pkg/apis/build/v1alpha1"
"github.com/shipwright-io/triggers/controllers"
"github.com/shipwright-io/triggers/pkg/inventory"

tknv1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
"k8s.io/client-go/kubernetes/scheme"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
//+kubebuilder:scaffold:imports
)

var (
cfg *rest.Config
testEnv *envtest.Environment
kubeClient client.Client

ctx context.Context
cancel context.CancelFunc

buildInventory *inventory.Inventory
)

func TestAPIs(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Controller Suite")
}

var _ = BeforeSuite(func() {
log.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
ctx, cancel = context.WithCancel(context.TODO())

testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "bin", "crds")},
ErrorIfCRDPathMissing: true,
}

var err error
done := make(chan struct{}, 0)
go func() {
cfg, err = testEnv.Start()
close(done)
}()
Eventually(done).WithTimeout(time.Minute).Should(BeClosed())
Expect(err).NotTo(HaveOccurred())
Expect(cfg).NotTo(BeNil())

err = clientgoscheme.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())

err = v1alpha1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())

err = tknv1beta1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())

kubeClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
Expect(err).NotTo(HaveOccurred())
Expect(kubeClient).NotTo(BeNil())

mgr, err := ctrl.NewManager(cfg, ctrl.Options{Scheme: scheme.Scheme})
Expect(err).ToNot(HaveOccurred())

buildInventory = inventory.NewInventory()

inventoryReconciler := controllers.NewInventoryReconciler(
mgr.GetClient(), mgr.GetScheme(), buildInventory)

err = inventoryReconciler.SetupWithManager(mgr)
Expect(err).ToNot(HaveOccurred())

pipelineRunReconciler := controllers.NewPipelineRunReconciler(
mgr.GetClient(), mgr.GetScheme(), buildInventory)

err = pipelineRunReconciler.SetupWithManager(mgr)
Expect(err).ToNot(HaveOccurred())

go func() {
defer GinkgoRecover()
err = mgr.Start(ctx)
Expect(err).ToNot(HaveOccurred(), "failed to run manager")
}()
})

var _ = AfterSuite(func() {
cancel()
By("tearing down the test environment")
err := testEnv.Stop()
Expect(err).NotTo(HaveOccurred())
})

0 comments on commit 7661973

Please sign in to comment.