Skip to content

Commit

Permalink
cdp: added resources and verbs for the cluster role
Browse files Browse the repository at this point in the history
  • Loading branch information
Yehonathan Bruchim committed Jan 8, 2025
1 parent b55ccb3 commit 0233db4
Show file tree
Hide file tree
Showing 9 changed files with 24 additions and 112 deletions.
5 changes: 0 additions & 5 deletions api/falcon/v1alpha1/falconnodesensor_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,11 +110,6 @@ type FalconNodeSensorConfig struct {
// For more information, please see https://github.com/CrowdStrike/falcon-operator/blob/main/docs/ADVANCED.md.
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="DaemonSet Advanced Settings"
Advanced FalconAdvanced `json:"advanced,omitempty"`

// Enable cluster roles for Cloud Data Protection module
// +kubebuilder:default=true
// +operator-sdk:csv:customresourcedefinitions:type=spec,order=13
CdpRolesEnabled *bool `json:"cdpRolesEnabled,omitempty"`
}

type PriorityClassConfig struct {
Expand Down
5 changes: 0 additions & 5 deletions api/falcon/v1alpha1/zz_generated.deepcopy.go
Original file line number Diff line number Diff line change
Expand Up @@ -1088,11 +1088,6 @@ func (in *FalconNodeSensorConfig) DeepCopyInto(out *FalconNodeSensorConfig) {
**out = **in
}
in.Advanced.DeepCopyInto(&out.Advanced)
if in.CdpRolesEnabled != nil {
in, out := &in.CdpRolesEnabled, &out.CdpRolesEnabled
*out = new(bool)
**out = **in
}
}

// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FalconNodeSensorConfig.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -170,10 +170,6 @@ spec:
- kernel
- bpf
type: string
cdpRolesEnabled:
default: true
description: Enable cluster roles for Cloud Data Protection module
type: boolean
disableCleanup:
default: false
description: Disables the cleanup of the sensor through DaemonSet
Expand Down
12 changes: 12 additions & 0 deletions config/rbac/falconcontainer_role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,5 +15,17 @@ rules:
- ""
resources:
- secrets
- cronjobs
- daemonsets
- deployments
- ingresses
- jobs
- nodes
- persistentvolumes
- pods
- replicasets
- services
verbs:
- get
- list
- watch
16 changes: 12 additions & 4 deletions deploy/falcon-operator.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3259,10 +3259,6 @@ spec:
- kernel
- bpf
type: string
cdpRolesEnabled:
default: true
description: Enable cluster roles for Cloud Data Protection module
type: boolean
disableCleanup:
default: false
description: Disables the cleanup of the sensor through DaemonSet
Expand Down Expand Up @@ -3922,8 +3918,20 @@ rules:
- ""
resources:
- secrets
- cronjobs
- daemonsets
- deployments
- ingresses
- jobs
- nodes
- persistentvolumes
- pods
- replicasets
- services
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
Expand Down
1 change: 0 additions & 1 deletion docs/deployment/openshift/resources/node/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,6 @@ spec:
| node.backend | (optional) Configure the backend mode for Falcon Sensor (allowed values: kernel, bpf) |
| node.disableCleanup | (optional) Cleans up `/opt/CrowdStrike` on the nodes by deleting the files and directory. |
| node.version | (optional) Enforce particular Falcon Sensor version to be installed (example: "6.35", "6.35.0-13207") |
| node.cdpRolesEnabled | (optional) Enable cluster roles for Cloud Data Protection module |

> [!IMPORTANT]
> node.tolerations will be appended to the existing tolerations for the daemonset due to GKE Autopilot allowing users to manage Tolerations directly in the console. See documentation here: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-separation. Removing Tolerations from an existing daemonset requires a redeploy of the FalconNodeSensor manifest.
Expand Down
1 change: 0 additions & 1 deletion docs/resources/node/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,6 @@ spec:
| node.backend | (optional) Configure the backend mode for Falcon Sensor (allowed values: kernel, bpf) |
| node.disableCleanup | (optional) Cleans up `/opt/CrowdStrike` on the nodes by deleting the files and directory. |
| node.version | (optional) Enforce particular Falcon Sensor version to be installed (example: "6.35", "6.35.0-13207") |
| node.cdpRolesEnabled | (optional) Enable cluster roles for Cloud Data Protection module |

> [!IMPORTANT]
> node.tolerations will be appended to the existing tolerations for the daemonset due to GKE Autopilot allowing users to manage Tolerations directly in the console. See documentation here: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-separation. Removing Tolerations from an existing daemonset requires a redeploy of the FalconNodeSensor manifest.
Expand Down
1 change: 0 additions & 1 deletion docs/src/resources/node.md.tmpl
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,6 @@ spec:
| node.backend | (optional) Configure the backend mode for Falcon Sensor (allowed values: kernel, bpf) |
| node.disableCleanup | (optional) Cleans up `/opt/CrowdStrike` on the nodes by deleting the files and directory. |
| node.version | (optional) Enforce particular Falcon Sensor version to be installed (example: "6.35", "6.35.0-13207") |
| node.cdpRolesEnabled | (optional) Enable cluster roles for Cloud Data Protection module |

> [!IMPORTANT]
> node.tolerations will be appended to the existing tolerations for the daemonset due to GKE Autopilot allowing users to manage Tolerations directly in the console. See documentation here: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-separation. Removing Tolerations from an existing daemonset requires a redeploy of the FalconNodeSensor manifest.
Expand Down
91 changes: 0 additions & 91 deletions internal/controller/falcon_node/falconnodesensor_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,16 +35,6 @@ import (
clog "sigs.k8s.io/controller-runtime/pkg/log"
)

var (
cdpRoleEnabledNil = goerr.New("CdpRolesEnabled must be defined")

cdpRoles = rbacv1.PolicyRule{
APIGroups: []string{""},
Verbs: []string{"get", "watch", "list"},
Resources: []string{"pods", "services", "nodes", "daemonsets", "replicasets", "deployments", "jobs", "ingresses", "cronjobs", "persistentvolumes"},
}
)

// FalconNodeSensorReconciler reconciles a FalconNodeSensor object
type FalconNodeSensorReconciler struct {
client.Client
Expand Down Expand Up @@ -898,43 +888,6 @@ func (r *FalconNodeSensorReconciler) handleServiceAccount(ctx context.Context, n
return false, nil
}

// handleClusterRole updates the cluster role and grants necessary permissions to it
func (r *FalconNodeSensorReconciler) handleClusterRole(ctx context.Context, nodesensor *falconv1alpha1.FalconNodeSensor, logger logr.Logger) (bool, error) {
if nodesensor.Spec.Node.CdpRolesEnabled == nil {
return false, cdpRoleEnabledNil
}

if !*nodesensor.Spec.Node.CdpRolesEnabled {
return false, nil
}
clusterRole := rbacv1.ClusterRole{}
err := r.Get(ctx, types.NamespacedName{Name: common.NodeClusterRoleName}, &clusterRole)
if err != nil {
logger.Error(err, "Failed to get FalconNodeSensor ClusterRole")
return false, err
}

// check if CDP cluster role was already set
for _, rule := range clusterRole.Rules {
if slices.Equal(rule.Resources, cdpRoles.Resources) &&
slices.Equal(rule.Verbs, cdpRoles.Verbs) &&
slices.Equal(rule.APIGroups, cdpRoles.APIGroups) {
return false, nil
}
}

clusterRole.Rules = append(clusterRole.Rules, cdpRoles)

err = r.Update(ctx, &clusterRole)
if err != nil {
logger.Error(err, "Failed to update ClusterRole", "Namespace.Name", nodesensor.Spec.InstallNamespace, "ClusterRole.Name", common.NodeClusterRoleName)
return false, err
}
logger.Info("Updated FalconNodeSensor ClusterRole")
return true, nil

}

// handleServiceAccount creates and updates the service account and grants necessary permissions to it
func (r *FalconNodeSensorReconciler) handleSAAnnotations(ctx context.Context, nodesensor *falconv1alpha1.FalconNodeSensor, logger logr.Logger) error {
sa := corev1.ServiceAccount{}
Expand Down Expand Up @@ -1101,50 +1054,6 @@ func (r *FalconNodeSensorReconciler) finalizeDaemonset(ctx context.Context, imag
return nil
}

// cleanupClusterRole cleanup the cluster role from permissions granted in runtime
func (r *FalconNodeSensorReconciler) cleanupClusterRole(ctx context.Context, nodesensor *falconv1alpha1.FalconNodeSensor, logger logr.Logger) error {
if nodesensor.Spec.Node.CdpRolesEnabled == nil {
return cdpRoleEnabledNil
}

if !*nodesensor.Spec.Node.CdpRolesEnabled {
return nil
}
clusterRole := rbacv1.ClusterRole{}
err := r.Get(ctx, types.NamespacedName{Name: common.NodeClusterRoleName}, &clusterRole)
if err != nil {
logger.Error(err, "Failed to get FalconNodeSensor ClusterRole")
return err
}

indexToRemove := 0
roleToRemoveFound := false
var rule rbacv1.PolicyRule
// check if CDP cluster role was set
for indexToRemove, rule = range clusterRole.Rules {
if slices.Equal(rule.Resources, cdpRoles.Resources) &&
slices.Equal(rule.Verbs, cdpRoles.Verbs) &&
slices.Equal(rule.APIGroups, cdpRoles.APIGroups) {
roleToRemoveFound = true
break
}
}
// continue as role to remove wasn't found
if !roleToRemoveFound {
return nil
}

clusterRole.Rules = append(clusterRole.Rules[:indexToRemove], clusterRole.Rules[indexToRemove+1:]...)
err = r.Update(ctx, &clusterRole)
if err != nil {
logger.Error(err, "Failed to update ClusterRole", "Namespace.Name", nodesensor.Spec.InstallNamespace, "ClusterRole.Name", common.NodeClusterRoleName)
return err
}
logger.Info("Removed FalconNodeSensor ClusterRole runtime granted permissions")
return nil

}

func (r *FalconNodeSensorReconciler) reconcileObjectWithName(ctx context.Context, name types.NamespacedName) error {
obj := &falconv1alpha1.FalconNodeSensor{}
err := r.Get(ctx, name, obj)
Expand Down

0 comments on commit 0233db4

Please sign in to comment.