Skip to content
This repository has been archived by the owner on Nov 9, 2022. It is now read-only.
This repository is currently being migrated. It's locked while the migration is in progress.

Commit

Permalink
Merge pull request #208 from darkowlzz/add-status-to-all-crds
Browse files Browse the repository at this point in the history
Add status to all crds
  • Loading branch information
darkowlzz authored Nov 27, 2019
2 parents 39912ff + eff4e3d commit 1be080b
Show file tree
Hide file tree
Showing 3 changed files with 48 additions and 20 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ Parameter | Description | Default
`secretRefName` | Reference name of storageos secret |
`secretRefNamespace` | Namespace of storageos secret |
`namespace` | Namespace where storageos cluster resources are created | `storageos`
`images.nodeContainer` | StorageOS node container image | `storageos/node:1.5.0`
`images.nodeContainer` | StorageOS node container image | `storageos/node:1.5.1`
`images.initContainer` | StorageOS init container image | `storageos/init:1.0.0`
`images.csiNodeDriverRegistrarContainer` | CSI Node Driver Registrar Container image | `quay.io/k8scsi/csi-node-driver-registrar:v1.0.1`
`images.csiClusterDriverRegistrarContainer` | CSI Cluster Driver Registrar Container image | `quay.io/k8scsi/csi-cluster-driver-registrar:v1.0.1`
Expand Down
38 changes: 26 additions & 12 deletions pkg/controller/job/job_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ import (
"strings"
"time"

storageosv1 "github.com/storageos/cluster-operator/pkg/apis/storageos/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
Expand All @@ -27,6 +26,8 @@ import (
"sigs.k8s.io/controller-runtime/pkg/reconcile"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
"sigs.k8s.io/controller-runtime/pkg/source"

storageosv1 "github.com/storageos/cluster-operator/pkg/apis/storageos/v1"
)

var log = logf.Log.WithName("storageos.job")
Expand Down Expand Up @@ -96,6 +97,10 @@ func (r *ReconcileJob) Reconcile(request reconcile.Request) (reconcile.Result, e
reconcilePeriod := 10 * time.Second
reconcileResult := reconcile.Result{RequeueAfter: reconcilePeriod}

// Return this for a immediate retry of the reconciliation loop with the
// same request object.
immediateRetryResult := reconcile.Result{Requeue: true}

// Fetch the Job instance
instance := &storageosv1.Job{}
err := r.client.Get(context.TODO(), request.NamespacedName, instance)
Expand All @@ -107,26 +112,31 @@ func (r *ReconcileJob) Reconcile(request reconcile.Request) (reconcile.Result, e
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
return reconcileResult, err
return immediateRetryResult, err
}

// Set Spec attribute values.
instance.Spec.LabelSelector = instance.Spec.GetLabelSelector()

// Update the object.
if err := r.client.Update(context.Background(), instance); err != nil {
return reconcileResult, err
return immediateRetryResult, err
}

// Define a new DaemonSet object
daemonset, err := newDaemonSetForCR(instance)
if err != nil {
return reconcileResult, err
log.Info("Failed to create DaemonSet for Job", "error", err)
instance.Status.Completed = false
if updateErr := r.client.Status().Update(context.Background(), instance); updateErr != nil {
err = fmt.Errorf("%v, %v", updateErr, err)
}
return immediateRetryResult, err
}

// Set Job instance as the owner and controller
if err := controllerutil.SetControllerReference(instance, daemonset, r.scheme); err != nil {
return reconcileResult, err
return immediateRetryResult, err
}

// Check if this DaemonSet already exists
Expand All @@ -136,30 +146,34 @@ func (r *ReconcileJob) Reconcile(request reconcile.Request) (reconcile.Result, e
log.Info("Creating a new DaemonSet")
err = r.client.Create(context.TODO(), daemonset)
if err != nil {
return reconcileResult, err
log.Info("Failed to create DaemonSet", "error", err)
// Update status.
instance.Status.Completed = false
if updateErr := r.client.Status().Update(context.Background(), instance); updateErr != nil {
err = fmt.Errorf("%v, %v", updateErr, err)
}
return immediateRetryResult, err
}

// DaemonSet created successfully - don't requeue
return reconcile.Result{}, nil
} else if err != nil {
return reconcileResult, err
return immediateRetryResult, err
}

if instance.Status.Completed {
// Job completed. Do nothing - don't requeue
return reconcile.Result{}, nil
}

// Check DaemonSet Pod status.
// Check DaemonSet Pod status and update it.
completed, err := checkPods(r.clientset, instance, r.recorder)
if err != nil {
return reconcileResult, err
log.Info("Failed to check pod status", "error", err)
}

// Update the Completed status of the Job (status subresource).
instance.Status.Completed = completed
if err := r.client.Status().Update(context.Background(), instance); err != nil {
return reconcileResult, err
return immediateRetryResult, err
}

return reconcileResult, nil
Expand Down
28 changes: 21 additions & 7 deletions pkg/controller/storageosupgrade/storageosupgrade_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import (
"strings"
"time"

storageosv1 "github.com/storageos/cluster-operator/pkg/apis/storageos/v1"
storageosapi "github.com/storageos/go-api"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
Expand All @@ -24,7 +24,7 @@ import (
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
"sigs.k8s.io/controller-runtime/pkg/source"

storageosapi "github.com/storageos/go-api"
storageosv1 "github.com/storageos/cluster-operator/pkg/apis/storageos/v1"
)

var log = logf.Log.WithName("storageos.upgrade")
Expand Down Expand Up @@ -220,6 +220,10 @@ func (r *ReconcileStorageOSUpgrade) Reconcile(request reconcile.Request) (reconc
reconcilePeriod := 10 * time.Second
reconcileResult := reconcile.Result{RequeueAfter: reconcilePeriod}

// Return this for a immediate retry of the reconciliation loop with the
// same request object.
immediateRetryResult := reconcile.Result{Requeue: true}

// Fetch the StorageOSUpgrade instance
instance := &storageosv1.StorageOSUpgrade{}
err := r.client.Get(context.TODO(), request.NamespacedName, instance)
Expand All @@ -237,7 +241,7 @@ func (r *ReconcileStorageOSUpgrade) Reconcile(request reconcile.Request) (reconc
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
return reconcileResult, err
return immediateRetryResult, err
}

r.SetCurrentUpgradeIfNone(instance)
Expand All @@ -256,16 +260,21 @@ func (r *ReconcileStorageOSUpgrade) Reconcile(request reconcile.Request) (reconc
// puller job, based on the cluster's node selector.
currentCluster, err := r.findCurrentCluster()
if err != nil {
log.Info("Failed to find current cluster", "error", err)
instance.Status.Completed = false
if updateErr := r.client.Status().Update(context.Background(), instance); updateErr != nil {
err = fmt.Errorf("%v,%v", updateErr, err)
}
// Re-queue if it fails to get the current cluster.
return reconcileResult, err
return immediateRetryResult, err
}
// Create image puller.
r.imagePuller = newImagePullJob(instance, currentCluster)
if err := controllerutil.SetControllerReference(instance, r.imagePuller, r.scheme); err != nil {
return reconcileResult, err
return immediateRetryResult, err
}
if err := r.client.Create(context.Background(), r.imagePuller); err != nil && !errors.IsAlreadyExists(err) {
return reconcileResult, fmt.Errorf("failed to create image puller job: %v", err)
return immediateRetryResult, fmt.Errorf("failed to create image puller job: %v", err)
}

r.recorder.Event(instance, corev1.EventTypeNormal, "PullImage", "Pulling the new container image")
Expand All @@ -285,7 +294,12 @@ func (r *ReconcileStorageOSUpgrade) Reconcile(request reconcile.Request) (reconc
}
// Re-queue if the image pull didn't complete.
if !r.imagePuller.Status.Completed {
return reconcileResult, fmt.Errorf("image pull didn't complete")
err := fmt.Errorf("image pull didn't complete")
instance.Status.Completed = false
if updateErr := r.client.Update(context.Background(), instance); updateErr != nil {
err = fmt.Errorf("%v, %v", updateErr, err)
}
return reconcileResult, err
}

// Find and pause the running cluster.
Expand Down

0 comments on commit 1be080b

Please sign in to comment.