Skip to content
This repository has been archived by the owner on Oct 3, 2019. It is now read-only.

Add controller to deploy topology service #204

Open
wants to merge 15 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 11 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions pkg/controller/add_topology.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
package controller

import "github.com/redhat-developer/devconsole-operator/pkg/controller/service/topology"

func init() {
// AddToManagerFuncs is a list of functions to create controllers and add them to a manager.
AddToManagerFuncs = append(AddToManagerFuncs, topology.Add)
}
1 change: 1 addition & 0 deletions pkg/controller/component/component_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"context"
e "errors"
"fmt"

v1 "github.com/openshift/api/apps/v1"
buildv1 "github.com/openshift/api/build/v1"
imagev1 "github.com/openshift/api/image/v1"
Expand Down
1 change: 1 addition & 0 deletions pkg/controller/service/topology/doc.go
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
package topology
246 changes: 246 additions & 0 deletions pkg/controller/service/topology/topology_controller.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,246 @@
package topology

import (
"fmt"

appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/kubernetes"

"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
"sigs.k8s.io/controller-runtime/pkg/source"
)

var log = logf.Log

// ReconcileService reconciles a Component object
type ReconcileService struct {
// This client, initialized using mgr.Client() above, is a split client
// that reads objects from the cache and writes to the apiserver
client client.Client
scheme *runtime.Scheme

// coreClient is kubernetes go client which gets intialized using mgr.Config().
coreClient kubernetes.Interface

DeploymentName string
DeploymentNamespace string
}

// Add creates a new Component Controller and adds it to the Manager. The Manager will set fields on the Controller
// and Start it when the Manager is Started.
func Add(mgr manager.Manager) error {
return add(mgr, newReconciler(mgr))
}

const (
// ServicesNamespace is the name of the namespace where this operator would install the Rest Service
ServicesNamespace = "openshift-operators" // move this out to env var ?

// ServiceName is the name that would be assigned to all objects associated with the Rest Service
ServiceName = "devconsole-app" // move this out to env var ?
)

// newReconciler returns a new reconcile.Reconciler
func newReconciler(mgr manager.Manager) reconcile.Reconciler {

// Initialize kubernetes client
cl, err := kubernetes.NewForConfig(mgr.GetConfig())
if err != nil {
log.Error(err, "Failed to create rest client")
return &ReconcileService{client: mgr.GetClient(), scheme: mgr.GetScheme(), coreClient: cl}
}

// Check if Deployment already exists
_, err = cl.AppsV1().Deployments(ServicesNamespace).Get(ServiceName, metav1.GetOptions{})
if err != nil {
// If Deployment didn't exist, then we need to create one.
_, err = cl.AppsV1().Deployments(ServicesNamespace).Create(newDeploymentConfigForAppService(nil, ServiceName, ServicesNamespace))
if err != nil {
log.Error(err, "Failed to create deployment")
}
}

// Moving ahead to create service assuming deployment is created succesfully.
_, err = cl.CoreV1().Services(ServicesNamespace).Get(ServiceName, metav1.GetOptions{})
if err != nil {
svc, _ := newService(ServicesNamespace, ServiceName, 8080)
_, err = cl.CoreV1().Services(ServicesNamespace).Create(svc)
if err != nil {
log.Error(err, "Failed to create service")
}
}

return &ReconcileService{client: mgr.GetClient(), scheme: mgr.GetScheme(), coreClient: cl}
}

// add adds a new Controller to mgr with r as the reconcile.Reconciler
func add(mgr manager.Manager, r reconcile.Reconciler) error {

// Create a new controller
c, err := controller.New("topology-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return err
}

// Do not send events which are related to the specific DC.
pred := predicate.Funcs{
// TODO: When the deployment is being created, DC gets updated
// and that would trigget this. How do we filter out such events?
UpdateFunc: func(e event.UpdateEvent) bool {
return e.MetaOld.GetName() == ServiceName && e.MetaOld.GetNamespace() == ServicesNamespace
},

// TODO: In all probability, any delele event is interesting to us.
DeleteFunc: func(e event.DeleteEvent) bool {
return e.Meta.GetName() == ServiceName && e.Meta.GetNamespace() == ServicesNamespace
},

// TODO: When a new one is created because an operator is being deployed..
CreateFunc: func(e event.CreateEvent) bool {
return false
},
}

// Watch for Deployment Update and Delete event
err = c.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForObject{}, pred)
if err != nil {
return err
}

// Watch for Service Update and Delete event
err = c.Watch(&source.Kind{Type: &corev1.Service{}}, &handler.EnqueueRequestForObject{}, pred)
if err != nil {
return err
}
return nil
}

// Reconcile handles events related to changes to the App Topology Service deployment.
// This includes events from service/dc named "ServiceName" in the namespace "ServiceNameSpace"
func (r *ReconcileService) Reconcile(request reconcile.Request) (reconcile.Result, error) {
// Check if deployment exist or not, create one if absent
namespace := request.Namespace
name := request.Name
dExist, err := r.coreClient.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{})
if err != nil {
_, err = r.coreClient.AppsV1().Deployments(namespace).Create(newDeploymentConfigForAppService(nil, name, namespace))
if err != nil {
log.Error(err, "Failed to redeploy deployment")
return reconcile.Result{}, err
}
}
if dExist != nil && dExist.Name == name {
log.Info("Deployment already exist with the name : %s", dExist.Name)
}

// Check if service exist or not, create one if absent
svcExist, err := r.coreClient.CoreV1().Services(namespace).Get(name, metav1.GetOptions{})
if svcExist != nil && svcExist.Name == name {
return reconcile.Result{}, err
}
if err != nil {
newSvc, err := newService(namespace, name, 8080)
if err != nil {
return reconcile.Result{}, err
}
_, err = r.coreClient.CoreV1().Services(namespace).Create(newSvc)
if err != nil {
fmt.Println("Failed to redeploy dc")
}
}

return reconcile.Result{}, nil
}

func newDeploymentConfigForAppService(containerPorts []corev1.ContainerPort, serviceName string, serviceNameSpace string) *appsv1.Deployment {
labels := getLabelsForServiceDeployments(ServiceName)
//annotations := resource.GetAnnotationsForCR(cp)
if containerPorts == nil {
containerPorts = []corev1.ContainerPort{{
ContainerPort: 8080,
Protocol: corev1.ProtocolTCP,
}}
}
return &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: ServiceName,
Namespace: ServicesNamespace,
Labels: labels,
},
Spec: appsv1.DeploymentSpec{
Strategy: appsv1.DeploymentStrategy{
Type: "Recreate",
},
Replicas: int32Ptr(1),
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Name: ServiceName,
Namespace: ServicesNamespace,
Labels: labels,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: ServiceName,
Image: "quay.io/redhat-developer/app-service:latest", // TODO(Akash): parameterize this
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[Note]: This path will be parameterized in another PR

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is a big technical debt!

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

How about creating a DevConsoleConfig CRD and keep the docker image as an attribute there?
See this Console CRD for example https://github.com/openshift/console-operator/blob/master/manifests/00-crd-operator-config.yaml

Ports: containerPorts,
},
},
},
},
},
}
}

func newService(namespace, name string, port int32) (*corev1.Service, error) {
labels := getLabelsForServiceDeployments(ServiceName)
if port > 65536 || port < 1024 {
return nil, fmt.Errorf("port %d is out of range [1024-65535]", port)
}
var svcPorts []corev1.ServicePort
svcPort := corev1.ServicePort{
Name: name + "-tcp",
Port: port,
Protocol: corev1.ProtocolTCP,
TargetPort: intstr.FromInt(int(port)),
}
svcPorts = append(svcPorts, svcPort)
svc := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Labels: labels,
},
Spec: corev1.ServiceSpec{
Ports: svcPorts,
Selector: map[string]string{
"deploymentconfig": name,
},
},
}
return svc, nil
}

func getLabelsForServiceDeployments(serviceName string) map[string]string {
labels := make(map[string]string)
labels["app.kubernetes.io/name"] = serviceName
labels["app"] = serviceName

return labels
}

func int32Ptr(i int32) *int32 { return &i }
49 changes: 49 additions & 0 deletions pkg/controller/service/topology/topology_controller_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
package topology

import (
"testing"

"github.com/stretchr/testify/assert"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
coreFakeClient "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/kubernetes/scheme"

"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)

func TestReconcile(t *testing.T) {
t.Run("Check if Deployment and Services are getting created", func(t *testing.T) {
objs := []runtime.Object{}
cl := fake.NewFakeClient(objs...)
s := scheme.Scheme
coreFakeC := coreFakeClient.NewSimpleClientset(objs...)
r := ReconcileService{
client: cl,
coreClient: coreFakeC,
scheme: s,
}
_, err := r.Reconcile(reconcile.Request{
NamespacedName: types.NamespacedName{
Name: ServiceName,
Namespace: ServicesNamespace,
},
})
if err != nil {
t.Log(err)
t.Fail()
}
assert.Nil(t, err, "Reconcile failed with error ")
deployment, _ := r.coreClient.AppsV1().Deployments(ServicesNamespace).Get(ServiceName, metav1.GetOptions{})
assert.NotNil(t, deployment, "Deployment should have created")
assert.Equal(t, ServiceName, deployment.Name, "Deployment is not created with name "+ServiceName)
assert.Equal(t, ServicesNamespace, deployment.Namespace, "Deployment is not created in expected namespace"+ServicesNamespace)
service, _ := r.coreClient.CoreV1().Services(ServicesNamespace).Get(ServiceName, metav1.GetOptions{})
assert.NotNil(t, service, "Service should have created")
assert.Equal(t, ServiceName, deployment.Name, "Service is not created with name "+ServiceName)
assert.Equal(t, ServicesNamespace, deployment.Namespace, "Service is not created in namespace "+ServicesNamespace)
})
}