-
Notifications
You must be signed in to change notification settings - Fork 39
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
cdp: added resources and verbs for the cluster role #614
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -68,6 +68,10 @@ spec: | |
| node.backend | (optional) Configure the backend mode for Falcon Sensor (allowed values: kernel, bpf) | | ||
| node.disableCleanup | (optional) Cleans up `/opt/CrowdStrike` on the nodes by deleting the files and directory. | | ||
| node.version | (optional) Enforce particular Falcon Sensor version to be installed (example: "6.35", "6.35.0-13207") | | ||
| node.cdpRolesEnabled | (optional) Enable cluster roles for Cloud Data Protection module | | ||
|
||
> [!IMPORTANT] | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. make docs generated this |
||
> node.tolerations will be appended to the existing tolerations for the daemonset due to GKE Autopilot allowing users to manage Tolerations directly in the console. See documentation here: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-separation. Removing Tolerations from an existing daemonset requires a redeploy of the FalconNodeSensor manifest. | ||
|
||
#### Falcon Sensor Settings | ||
| Spec | Description | | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -2,6 +2,7 @@ package falcon | |
|
||
import ( | ||
"context" | ||
goerr "errors" | ||
"reflect" | ||
|
||
falconv1alpha1 "github.com/crowdstrike/falcon-operator/api/falcon/v1alpha1" | ||
|
@@ -15,6 +16,7 @@ import ( | |
"github.com/crowdstrike/gofalcon/falcon" | ||
"github.com/go-logr/logr" | ||
"github.com/operator-framework/operator-lib/proxy" | ||
"golang.org/x/exp/slices" | ||
appsv1 "k8s.io/api/apps/v1" | ||
corev1 "k8s.io/api/core/v1" | ||
rbacv1 "k8s.io/api/rbac/v1" | ||
|
@@ -33,6 +35,16 @@ import ( | |
clog "sigs.k8s.io/controller-runtime/pkg/log" | ||
) | ||
|
||
var ( | ||
cdpRoleEnabledNil = goerr.New("CdpRolesEnabled must be defined") | ||
|
||
cdpRoles = rbacv1.PolicyRule{ | ||
APIGroups: []string{""}, | ||
Verbs: []string{"get", "watch", "list"}, | ||
Resources: []string{"pods", "services", "nodes", "daemonsets", "replicasets", "deployments", "jobs", "ingresses", "cronjobs", "persistentvolumes"}, | ||
} | ||
) | ||
|
||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. These permissions need to be added to https://github.com/CrowdStrike/falcon-operator/blob/main/config/rbac/falconnodesensor_role.yaml and https://github.com/CrowdStrike/falcon-operator/blob/main/config/non-olm/patches/falconnodesensor_role.yaml and are autogenerated through kustomize. Use |
||
// FalconNodeSensorReconciler reconciles a FalconNodeSensor object | ||
type FalconNodeSensorReconciler struct { | ||
client.Client | ||
|
@@ -76,6 +88,7 @@ func (r *FalconNodeSensorReconciler) SetupWithManager(mgr ctrl.Manager, tracker | |
//+kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources=clusterrolebindings,verbs=get;list;watch;create | ||
//+kubebuilder:rbac:groups="security.openshift.io",resources=securitycontextconstraints,resourceNames=privileged,verbs=use | ||
//+kubebuilder:rbac:groups="scheduling.k8s.io",resources=priorityclasses,verbs=get;list;watch;create;delete;update | ||
//+kubebuilder:rbac:groups="",resources=pods;services;nodes;daemonsets;replicasets;deployments;jobs;ingresses;cronjobs;persistentvolumes,verbs=get;watch;list | ||
|
||
// Reconcile is part of the main kubernetes reconciliation loop which aims to | ||
// move the current state of the cluster closer to the desired state. | ||
|
@@ -790,6 +803,11 @@ func (r *FalconNodeSensorReconciler) handlePermissions(ctx context.Context, node | |
return created, err | ||
} | ||
|
||
created, err = r.handleClusterRole(ctx, nodesensor, logger) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Clusterroles for operators should not reconcile permissions. The cluster roles are required to be auditable BEFORE install by cluster admins, security architects, and auditors. For operator security design and best practices, these permissions need to be added to https://github.com/CrowdStrike/falcon-operator/blob/main/config/rbac/falconnodesensor_role.yaml and https://github.com/CrowdStrike/falcon-operator/blob/main/config/non-olm/patches/falconnodesensor_role.yaml and are autogenerated through kustomize. Use |
||
if created || err != nil { | ||
return created, err | ||
} | ||
|
||
return r.handleClusterRoleBinding(ctx, nodesensor, logger) | ||
} | ||
|
||
|
@@ -810,7 +828,7 @@ func (r *FalconNodeSensorReconciler) handleClusterRoleBinding(ctx context.Contex | |
RoleRef: rbacv1.RoleRef{ | ||
APIGroup: "rbac.authorization.k8s.io", | ||
Kind: "ClusterRole", | ||
Name: "falcon-operator-node-sensor-role", | ||
Name: common.NodeClusterRoleName, | ||
}, | ||
Subjects: []rbacv1.Subject{ | ||
{ | ||
|
@@ -829,7 +847,7 @@ func (r *FalconNodeSensorReconciler) handleClusterRoleBinding(ctx context.Contex | |
logger.Info("Creating FalconNodeSensor ClusterRoleBinding") | ||
err = r.Create(ctx, &binding) | ||
if err != nil && !errors.IsAlreadyExists(err) { | ||
logger.Error(err, "Failed to create new ClusterRoleBinding", "ClusteRoleBinding.Name", common.NodeClusterRoleBindingName) | ||
logger.Error(err, "Failed to create new ClusterRoleBinding", "ClusterRoleBinding.Name", common.NodeClusterRoleBindingName) | ||
return false, err | ||
} | ||
|
||
|
@@ -880,6 +898,43 @@ func (r *FalconNodeSensorReconciler) handleServiceAccount(ctx context.Context, n | |
return false, nil | ||
} | ||
|
||
// handleClusterRole updates the cluster role and grants necessary permissions to it | ||
func (r *FalconNodeSensorReconciler) handleClusterRole(ctx context.Context, nodesensor *falconv1alpha1.FalconNodeSensor, logger logr.Logger) (bool, error) { | ||
if nodesensor.Spec.Node.CdpRolesEnabled == nil { | ||
return false, cdpRoleEnabledNil | ||
} | ||
|
||
if !*nodesensor.Spec.Node.CdpRolesEnabled { | ||
return false, nil | ||
} | ||
clusterRole := rbacv1.ClusterRole{} | ||
err := r.Get(ctx, types.NamespacedName{Name: common.NodeClusterRoleName}, &clusterRole) | ||
if err != nil { | ||
logger.Error(err, "Failed to get FalconNodeSensor ClusterRole") | ||
return false, err | ||
} | ||
|
||
// check if CDP cluster role was already set | ||
for _, rule := range clusterRole.Rules { | ||
if slices.Equal(rule.Resources, cdpRoles.Resources) && | ||
slices.Equal(rule.Verbs, cdpRoles.Verbs) && | ||
slices.Equal(rule.APIGroups, cdpRoles.APIGroups) { | ||
return false, nil | ||
} | ||
} | ||
|
||
clusterRole.Rules = append(clusterRole.Rules, cdpRoles) | ||
|
||
err = r.Update(ctx, &clusterRole) | ||
if err != nil { | ||
logger.Error(err, "Failed to update ClusterRole", "Namespace.Name", nodesensor.Spec.InstallNamespace, "ClusterRole.Name", common.NodeClusterRoleName) | ||
return false, err | ||
} | ||
logger.Info("Updated FalconNodeSensor ClusterRole") | ||
return true, nil | ||
|
||
} | ||
|
||
// handleServiceAccount creates and updates the service account and grants necessary permissions to it | ||
func (r *FalconNodeSensorReconciler) handleSAAnnotations(ctx context.Context, nodesensor *falconv1alpha1.FalconNodeSensor, logger logr.Logger) error { | ||
sa := corev1.ServiceAccount{} | ||
|
@@ -1030,6 +1085,11 @@ func (r *FalconNodeSensorReconciler) finalizeDaemonset(ctx context.Context, imag | |
return err | ||
} | ||
|
||
if err := r.cleanupClusterRole(ctx, nodesensor, logger); err != nil { | ||
logger.Error(err, "Failed to cleanup Falcon sensor cluster role") | ||
return err | ||
} | ||
|
||
// If we have gotten here, the cleanup should be successful | ||
logger.Info("Successfully deleted node directory", "Path", common.FalconDataDir) | ||
} else if err != nil { | ||
|
@@ -1041,6 +1101,50 @@ func (r *FalconNodeSensorReconciler) finalizeDaemonset(ctx context.Context, imag | |
return nil | ||
} | ||
|
||
// cleanupClusterRole cleanup the cluster role from permissions granted in runtime | ||
func (r *FalconNodeSensorReconciler) cleanupClusterRole(ctx context.Context, nodesensor *falconv1alpha1.FalconNodeSensor, logger logr.Logger) error { | ||
if nodesensor.Spec.Node.CdpRolesEnabled == nil { | ||
return cdpRoleEnabledNil | ||
} | ||
|
||
if !*nodesensor.Spec.Node.CdpRolesEnabled { | ||
return nil | ||
} | ||
clusterRole := rbacv1.ClusterRole{} | ||
err := r.Get(ctx, types.NamespacedName{Name: common.NodeClusterRoleName}, &clusterRole) | ||
if err != nil { | ||
logger.Error(err, "Failed to get FalconNodeSensor ClusterRole") | ||
return err | ||
} | ||
|
||
indexToRemove := 0 | ||
roleToRemoveFound := false | ||
var rule rbacv1.PolicyRule | ||
// check if CDP cluster role was set | ||
for indexToRemove, rule = range clusterRole.Rules { | ||
if slices.Equal(rule.Resources, cdpRoles.Resources) && | ||
slices.Equal(rule.Verbs, cdpRoles.Verbs) && | ||
slices.Equal(rule.APIGroups, cdpRoles.APIGroups) { | ||
roleToRemoveFound = true | ||
break | ||
} | ||
} | ||
// continue as role to remove wasn't found | ||
if !roleToRemoveFound { | ||
return nil | ||
} | ||
|
||
clusterRole.Rules = append(clusterRole.Rules[:indexToRemove], clusterRole.Rules[indexToRemove+1:]...) | ||
err = r.Update(ctx, &clusterRole) | ||
if err != nil { | ||
logger.Error(err, "Failed to update ClusterRole", "Namespace.Name", nodesensor.Spec.InstallNamespace, "ClusterRole.Name", common.NodeClusterRoleName) | ||
return err | ||
} | ||
logger.Info("Removed FalconNodeSensor ClusterRole runtime granted permissions") | ||
return nil | ||
|
||
} | ||
|
||
func (r *FalconNodeSensorReconciler) reconcileObjectWithName(ctx context.Context, name types.NamespacedName) error { | ||
obj := &falconv1alpha1.FalconNodeSensor{} | ||
err := r.Get(ctx, name, obj) | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This shouldn't be added because clusterroles should NOT be reconciled because of the auditablity and permission visibility requirement and best practices of operator permissions that many review before install.