From 39172376d0e9867c2b42b16d8f41b8347872728b Mon Sep 17 00:00:00 2001 From: shireenf-ibm Date: Mon, 12 Feb 2024 11:52:57 +0200 Subject: [PATCH 1/9] task1 add new api func to policy-engine; so pre-process runs only for exposure-analysis --- pkg/netpol/connlist/connlist.go | 29 +++++++++++++++++-------- pkg/netpol/eval/check.go | 26 +++++++++++----------- pkg/netpol/eval/internal/k8s/netpol.go | 4 ++-- pkg/netpol/eval/resources.go | 30 +++++++++++++++++++------- 4 files changed, 58 insertions(+), 31 deletions(-) diff --git a/pkg/netpol/connlist/connlist.go b/pkg/netpol/connlist/connlist.go index f179fca5..376b2acb 100644 --- a/pkg/netpol/connlist/connlist.go +++ b/pkg/netpol/connlist/connlist.go @@ -190,19 +190,30 @@ func (ca *ConnlistAnalyzer) hasFatalError() error { return nil } -func (ca *ConnlistAnalyzer) connslistFromParsedResources(objectsList []parser.K8sObject) ([]Peer2PeerConnection, []Peer, error) { +// getPolicyEngine returns a new policy engine considering the exposure analysis option +func (ca *ConnlistAnalyzer) getPolicyEngine(objectsList []parser.K8sObject) (*eval.PolicyEngine, error) { // TODO: do we need logger in policyEngine? - pe, err := eval.NewPolicyEngineWithObjects(objectsList) + if !ca.exposureAnalysis { + return eval.NewPolicyEngineWithObjects(objectsList) + } + // else build new policy engine with exposure analysis + pe := eval.NewPolicyEngineWithOptions(ca.exposureAnalysis) + // add objects from real resources + err := pe.AddObjects(objectsList) + if err != nil { + return nil, err + } + // add representative resources + err = pe.SetExposureAnalysisResources() + return pe, err +} + +func (ca *ConnlistAnalyzer) connslistFromParsedResources(objectsList []parser.K8sObject) ([]Peer2PeerConnection, []Peer, error) { + pe, err := ca.getPolicyEngine(objectsList) if err != nil { ca.errors = append(ca.errors, newResourceEvaluationError(err)) return nil, nil, err } - if ca.exposureAnalysis { - err = pe.SetExposureAnalysisResources() - if err != nil { - return nil, nil, err - } - } ia, err := ingressanalyzer.NewIngressAnalyzerWithObjects(objectsList, pe, ca.logger, ca.muteErrsAndWarns) if err != nil { ca.errors = append(ca.errors, newResourceEvaluationError(err)) @@ -491,7 +502,7 @@ func (ca *ConnlistAnalyzer) existsFocusWorkload(peers []Peer, excludeIngressAnal func (ca *ConnlistAnalyzer) getConnectionsBetweenPeers(pe *eval.PolicyEngine, peers []Peer) ([]Peer2PeerConnection, exposureMap, error) { connsRes := make([]Peer2PeerConnection, 0) exposuresMap := exposureMap{} - // sets for marking peer checked for ingress/egress exposure to entire cluster data once + // for exposure-analysis use: sets for marking peer checked for ingress/egress exposure to entire cluster data once ingressSet := make(map[Peer]bool, 0) egressSet := make(map[Peer]bool, 0) diff --git a/pkg/netpol/eval/check.go b/pkg/netpol/eval/check.go index da7db79f..228b3382 100644 --- a/pkg/netpol/eval/check.go +++ b/pkg/netpol/eval/check.go @@ -131,7 +131,7 @@ func (pe *PolicyEngine) AllAllowedConnectionsBetweenWorkloadPeers(srcPeer, dstPe func (pe *PolicyEngine) allAllowedConnectionsBetweenPeers(srcPeer, dstPeer Peer) (*common.ConnectionSet, error) { srcK8sPeer := srcPeer.(k8s.Peer) dstK8sPeer := dstPeer.(k8s.Peer) - // sets of peers which their exposure to entire cluster is already checked + // for exposure-analysis use: sets of peers which their exposure to entire cluster is already checked ingressSet := make(map[k8s.Peer]bool, 0) egressSet := make(map[k8s.Peer]bool, 0) var res *common.ConnectionSet @@ -179,7 +179,7 @@ func (pe *PolicyEngine) getPoliciesSelectingPod(p *k8s.Pod, direction netv1.Poli res = append(res, policy) } } - if len(res) > 0 { + if pe.exposureAnalysisFlag && len(res) > 0 { p.UpdatePodXgressProtectedFlag(direction == netv1.PolicyTypeIngress) } return res, nil @@ -276,7 +276,7 @@ func (pe *PolicyEngine) allallowedXgressConnections(src, dst k8s.Peer, isIngress if isIngress { // policy selecting dst policyAllowedConnectionsPerDirection, err = policy.GetIngressAllowedConns(src, dst) - if !ingressSet[dst] { + if pe.exposureAnalysisFlag && !ingressSet[dst] { // if this is first time scanning policies selecting this dst peer, // update its ingress entire cluster connection relying on policy data dst.GetPeerPod().UpdatePodXgressExposureToEntireClusterData(policy.IngressGeneralConns.EntireClusterConns, isIngress) @@ -284,7 +284,7 @@ func (pe *PolicyEngine) allallowedXgressConnections(src, dst k8s.Peer, isIngress } else { // policy selecting src policyAllowedConnectionsPerDirection, err = policy.GetEgressAllowedConns(dst) - if !egressSet[src] { + if pe.exposureAnalysisFlag && !egressSet[src] { // if this is first time scanning policies selecting this src peer, // update its egress entire cluster connection relying on policy data src.GetPeerPod().UpdatePodXgressExposureToEntireClusterData(policy.EgressGeneralConns.EntireClusterConns, isIngress) @@ -295,14 +295,16 @@ func (pe *PolicyEngine) allallowedXgressConnections(src, dst k8s.Peer, isIngress } allowedConns.Union(policyAllowedConnectionsPerDirection) } - if isIngress { - // after looping the policies selecting this dst for first time, the exposure to entire cluster on ingress is computed; - // no need to compute again - ingressSet[dst] = true - } else { - // after looping the policies selecting this src for first time, the exposure to entire cluster on egress is computed; - // no need to compute again - egressSet[src] = true + if pe.exposureAnalysisFlag { + if isIngress { + // after looping the policies selecting this dst for first time, the exposure to entire cluster on ingress is computed; + // no need to compute again + ingressSet[dst] = true + } else { + // after looping the policies selecting this src for first time, the exposure to entire cluster on egress is computed; + // no need to compute again + egressSet[src] = true + } } return allowedConns, nil } diff --git a/pkg/netpol/eval/internal/k8s/netpol.go b/pkg/netpol/eval/internal/k8s/netpol.go index e53a09ab..22f90200 100644 --- a/pkg/netpol/eval/internal/k8s/netpol.go +++ b/pkg/netpol/eval/internal/k8s/netpol.go @@ -37,7 +37,7 @@ import ( // -> might help to preprocess and store peers that match policy selectors + selectors in rules + set of allowed connections per rule type NetworkPolicy struct { *netv1.NetworkPolicy // embedding k8s network policy object - // following data stored in preprocessing; + // following data stored in preprocessing when exposure-analysis is on; // IngressGeneralConns contains: // - the maximal connection-set which the policy's rules allow to all destinations on ingress direction // - the maximal connection-set which the policy's rules allow to all namespaces in the cluster on ingress direction @@ -438,7 +438,7 @@ func (np *NetworkPolicy) fullName() string { } // ///////////////////////////////////////////////////////////////////////////////////////////// -// pre-processing computations - currently performed for exposure-analysis goals; +// pre-processing computations - currently performed for exposure-analysis goals only; // DetermineGeneralConnectionsOfPolicy scans policy rules and updates if it allows conns with all destinations or/ and with entire cluster // for ingress and/ or egress directions diff --git a/pkg/netpol/eval/resources.go b/pkg/netpol/eval/resources.go index 4b6c6933..f1c8807b 100644 --- a/pkg/netpol/eval/resources.go +++ b/pkg/netpol/eval/resources.go @@ -56,6 +56,20 @@ func NewPolicyEngine() *PolicyEngine { func NewPolicyEngineWithObjects(objects []parser.K8sObject) (*PolicyEngine, error) { pe := NewPolicyEngine() + err := pe.AddObjects(objects) + return pe, err +} + +// NewPolicyEngineWithOptions returns a new policy engine with an empty state but updating the exposure analysis flag, +// TBD: currently exposure-analysis is the only option supported by policy-engine, so no need for options param, +// should have the exposureFlag or just assign to true in the func? +func NewPolicyEngineWithOptions(exposureFlag bool) *PolicyEngine { + pe := NewPolicyEngine() + pe.exposureAnalysisFlag = exposureFlag + return pe +} + +func (pe *PolicyEngine) AddObjects(objects []parser.K8sObject) error { var err error for _, obj := range objects { switch obj.Kind { @@ -85,17 +99,15 @@ func NewPolicyEngineWithObjects(objects []parser.K8sObject) (*PolicyEngine, erro fmt.Printf("ignoring resource kind %s", obj.Kind) } if err != nil { - return nil, err + return err } } - err = pe.resolveMissingNamespaces() - return pe, err + return pe.resolveMissingNamespaces() } // SetExposureAnalysisResources sets the new representative peers needed for exposure analysis -// TODO: changes may be done on optimizing PR (will the flag be relevant after optimize? , should have these both funcs?..) +// TODO: changes may be done on optimizing PR (should have these both funcs?) func (pe *PolicyEngine) SetExposureAnalysisResources() error { - pe.exposureAnalysisFlag = true // scan policies' rules for new pods in (unmatched) namespaces (TODO : and unmatched pods in un/matched namespaces) return pe.addRepresentativePods() } @@ -335,10 +347,12 @@ func (pe *PolicyEngine) upsertNetworkPolicy(np *netv1.NetworkPolicy) error { } pe.netpolsMap[netpolNamespace][np.Name] = newNetpol - // scan policy ingress and egress rules to store allowed connections + // for exposure analysis only: scan policy ingress and egress rules to store allowed connections // to entire cluster and to all destinations (if such connections are allowed by the policy) - err := newNetpol.DetermineGeneralConnectionsOfPolicy() - + var err error + if pe.exposureAnalysisFlag { + err = newNetpol.DetermineGeneralConnectionsOfPolicy() + } // clear the cache on netpols changes pe.cache.clear() return err From a8474e2eeea1cb86c494be3c9892c9e7a183e352 Mon Sep 17 00:00:00 2001 From: shireenf-ibm Date: Mon, 12 Feb 2024 20:55:26 +0200 Subject: [PATCH 2/9] task2 on exposure analysis benefit from the stored data --- pkg/netpol/eval/check.go | 6 +-- pkg/netpol/eval/internal/k8s/netpol.go | 73 ++++++++++++++++++++++++-- pkg/netpol/eval/internal/k8s/pod.go | 32 +++++++---- 3 files changed, 94 insertions(+), 17 deletions(-) diff --git a/pkg/netpol/eval/check.go b/pkg/netpol/eval/check.go index 228b3382..60b5b51b 100644 --- a/pkg/netpol/eval/check.go +++ b/pkg/netpol/eval/check.go @@ -274,21 +274,21 @@ func (pe *PolicyEngine) allallowedXgressConnections(src, dst k8s.Peer, isIngress var policyAllowedConnectionsPerDirection *common.ConnectionSet var err error if isIngress { - // policy selecting dst - policyAllowedConnectionsPerDirection, err = policy.GetIngressAllowedConns(src, dst) + // policy selecting dst (dst pod is real) if pe.exposureAnalysisFlag && !ingressSet[dst] { // if this is first time scanning policies selecting this dst peer, // update its ingress entire cluster connection relying on policy data dst.GetPeerPod().UpdatePodXgressExposureToEntireClusterData(policy.IngressGeneralConns.EntireClusterConns, isIngress) } + policyAllowedConnectionsPerDirection, err = policy.GetIngressAllowedConns(src, dst, pe.exposureAnalysisFlag) } else { // policy selecting src - policyAllowedConnectionsPerDirection, err = policy.GetEgressAllowedConns(dst) if pe.exposureAnalysisFlag && !egressSet[src] { // if this is first time scanning policies selecting this src peer, // update its egress entire cluster connection relying on policy data src.GetPeerPod().UpdatePodXgressExposureToEntireClusterData(policy.EgressGeneralConns.EntireClusterConns, isIngress) } + policyAllowedConnectionsPerDirection, err = policy.GetEgressAllowedConns(dst, pe.exposureAnalysisFlag) } if err != nil { return allowedConns, err diff --git a/pkg/netpol/eval/internal/k8s/netpol.go b/pkg/netpol/eval/internal/k8s/netpol.go index 22f90200..07ac8192 100644 --- a/pkg/netpol/eval/internal/k8s/netpol.go +++ b/pkg/netpol/eval/internal/k8s/netpol.go @@ -289,11 +289,21 @@ func (np *NetworkPolicy) EgressAllowedConn(dst Peer, protocol, port string) (boo } // GetEgressAllowedConns returns the set of allowed connetions from any captured pod to the destination peer -func (np *NetworkPolicy) GetEgressAllowedConns(dst Peer) (*common.ConnectionSet, error) { +// if exposureFlag is true; the result is initiated with the matching policy's general connections +// and computations of rule conns are skipped for general rules; +func (np *NetworkPolicy) GetEgressAllowedConns(dst Peer, exposureFlag bool) (*common.ConnectionSet, error) { res := common.MakeConnectionSet(false) + if exposureFlag { + // if exposure-analysis is on; we already have the policy's entire world and entire cluster connections; + // so we can initiate the result with it + res = np.initEgressAllowedConnsFromPolicyGeneralConns(dst) + } for _, rule := range np.Spec.Egress { rulePeers := rule.To rulePorts := rule.Ports + if exposureFlag && np.isGeneralRule(rulePeers) { + continue + } peerSselected, err := np.ruleSelectsPeer(rulePeers, dst) if err != nil { return res, err @@ -314,11 +324,21 @@ func (np *NetworkPolicy) GetEgressAllowedConns(dst Peer) (*common.ConnectionSet, } // GetIngressAllowedConns returns the set of allowed connections to a captured dst pod from the src peer -func (np *NetworkPolicy) GetIngressAllowedConns(src, dst Peer) (*common.ConnectionSet, error) { +// if exposureFlag is true; the result is initiated with the matching policy's general connections +// and computations of rule conns are skipped for general rules; +func (np *NetworkPolicy) GetIngressAllowedConns(src, dst Peer, exposureFlag bool) (*common.ConnectionSet, error) { res := common.MakeConnectionSet(false) + if exposureFlag { + // if exposure-analysis is on; we already have the policy's entire world and entire cluster connections; + // so we can initiate the result + res = np.initIngressAllowedConnsFromGeneralConns(src, dst) + } for _, rule := range np.Spec.Ingress { rulePeers := rule.From rulePorts := rule.Ports + if exposureFlag && np.isGeneralRule(rulePeers) { + continue + } peerSselected, err := np.ruleSelectsPeer(rulePeers, src) if err != nil { return res, err @@ -499,7 +519,7 @@ func (np *NetworkPolicy) scanRulesForGeneralEgressConns() error { return nil } -// doRulesExposeToAllDestOrEntireCluster checks if the given rules list is exposed to entire cluster; +// doRulesExposeToAllDestOrEntireCluster checks if the given rules list is exposed to entire world or entire cluster; // i.e. if the rules list is empty or if there is a rule with empty namespaceSelector // this func assumes rules are legal (rules correctness check occurs later) func (np *NetworkPolicy) doRulesExposeToAllDestOrEntireCluster(rules []netv1.NetworkPolicyPeer) (alldests, entireCluster bool) { @@ -521,3 +541,50 @@ func (np *NetworkPolicy) doRulesExposeToAllDestOrEntireCluster(rules []netv1.Net } return false, false } + +///////////////////////////////////////////////// +// exposure analysis benefits from pre-processing stored data + +// initEgressAllowedConnsFromPolicyGeneralConns returns an init egress allowed conns between two peers +// based on the policy's general connections as following: +// - if the dst peer is ip-block, then return the copy of policy's egress AllDestinationsConns +// - if the dst peer is a real pod, return the copy of policy's egress EntireClusterConns with converting the named ports +// if found in the connection (conversion based on dst pod's ports) +// - if the dst peer is a representative pod, return the copy policy's egress EntireClusterConns without converting the named +// ports if found (for a representative pod, a named port is potential connection port) +// returns copy of a general connection, since it might be changed for the specific connection between two peers +func (np *NetworkPolicy) initEgressAllowedConnsFromPolicyGeneralConns(dst Peer) *common.ConnectionSet { + if dst.GetPeerIPBlock() != nil { // dst is ip + return np.EgressGeneralConns.AllDestinationsConns.Copy() + } + res := np.EgressGeneralConns.EntireClusterConns + if dst.GetPeerPod().FakePod { // dst is representative + return res.Copy() + } + // dst is real pod + convertedConn := dst.GetPeerPod().checkAndConvertNamedPortsInConnection(res) + if convertedConn != nil { + return convertedConn + } + return res.Copy() +} + +// initIngressAllowedConnsFromGeneralConns returns an init ingress allowed conns between two peers; +// based on policy and dst pod general connections as following: +// 1. src peer is an ip-block : returns copy of policy's ingress AllDestinationsConns +// 2. src peer is a pod (real or representative) : returns copy of dst pod's ingress' entire-cluster connection +// as it already contains the policy's ingress entire-cluster connection and converted named ports (if found) +// returns copy of a general connection, since it might be changed for the specific connection between two peers +func (np *NetworkPolicy) initIngressAllowedConnsFromGeneralConns(src, dst Peer) *common.ConnectionSet { + if src.GetPeerIPBlock() != nil { // src is ip + return np.IngressGeneralConns.AllDestinationsConns.Copy() + } + // src is pod peer + return dst.GetPeerPod().IngressExposureData.EntireClusterConnection.Copy() +} + +// isGeneralRule returns if the given rule is general; i.e. e +func (np *NetworkPolicy) isGeneralRule(rulePeer []netv1.NetworkPolicyPeer) bool { + allDests, entireCluster := np.doRulesExposeToAllDestOrEntireCluster(rulePeer) + return allDests || entireCluster +} diff --git a/pkg/netpol/eval/internal/k8s/pod.go b/pkg/netpol/eval/internal/k8s/pod.go index 80aa61df..d1b34991 100644 --- a/pkg/netpol/eval/internal/k8s/pod.go +++ b/pkg/netpol/eval/internal/k8s/pod.go @@ -280,17 +280,9 @@ func (pod *Pod) UpdatePodXgressExposureToEntireClusterData(ruleConns *common.Con if isIngress { // for a dst pod check if the given ruleConns contains namedPorts; if yes replace them with pod's // matching port number - connNamedPorts := ruleConns.GetNamedPorts() - if len(connNamedPorts) > 0 { - ruleConnsCopy := ruleConns.Copy() // copying the connectionSet; in order to replace - // the named ports with pod's port numbers - for protocol, namedPorts := range connNamedPorts { - for _, namedPort := range namedPorts { - portNum := pod.ConvertPodNamedPort(namedPort) - ruleConnsCopy.ReplaceNamedPortWithMatchingPortNum(protocol, namedPort, portNum) - } - } - pod.IngressExposureData.EntireClusterConnection.Union(ruleConnsCopy) + convertedConns := pod.checkAndConvertNamedPortsInConnection(ruleConns) + if convertedConns != nil { + pod.IngressExposureData.EntireClusterConnection.Union(convertedConns) } else { pod.IngressExposureData.EntireClusterConnection.Union(ruleConns) } @@ -299,6 +291,24 @@ func (pod *Pod) UpdatePodXgressExposureToEntireClusterData(ruleConns *common.Con } } +// checkAndConvertNamedPortsInConnection returns the copy of the given connectionSet where named ports are converted; +// returns nil if the given connectionSet has no named ports +func (pod *Pod) checkAndConvertNamedPortsInConnection(conns *common.ConnectionSet) *common.ConnectionSet { + connNamedPorts := conns.GetNamedPorts() + if len(connNamedPorts) == 0 { + return nil + } // else - found named ports + connsCopy := conns.Copy() // copying the connectionSet; in order to replace + // the named ports with pod's port numbers + for protocol, namedPorts := range connNamedPorts { + for _, namedPort := range namedPorts { + portNum := pod.ConvertPodNamedPort(namedPort) + connsCopy.ReplaceNamedPortWithMatchingPortNum(protocol, namedPort, portNum) + } + } + return connsCopy +} + // updatePodXgressProtectedFlag updates to true the relevant ingress/egress protected flag of the pod func (pod *Pod) UpdatePodXgressProtectedFlag(isIngress bool) { if isIngress { From e5a5270070c7bf3ad554df5be18bf8710744e253 Mon Sep 17 00:00:00 2001 From: shireenf-ibm Date: Tue, 13 Feb 2024 13:32:29 +0200 Subject: [PATCH 3/9] eliminate isRuleGeneral; skip general rules in ruleSelectsPeer --- pkg/netpol/eval/internal/k8s/netpol.go | 34 ++++++++++++-------------- 1 file changed, 15 insertions(+), 19 deletions(-) diff --git a/pkg/netpol/eval/internal/k8s/netpol.go b/pkg/netpol/eval/internal/k8s/netpol.go index 07ac8192..e56dbb0a 100644 --- a/pkg/netpol/eval/internal/k8s/netpol.go +++ b/pkg/netpol/eval/internal/k8s/netpol.go @@ -173,10 +173,12 @@ func (np *NetworkPolicy) ruleConnsContain(rulePorts []netv1.NetworkPolicyPort, p } // ruleSelectsPeer returns true if the given peer is in the set of peers selected by rulePeers +// if exposureFlag is true, skips general rules; i.e. returns false also if the rule selects all destinations +// or entire cluster // //gocyclo:ignore -func (np *NetworkPolicy) ruleSelectsPeer(rulePeers []netv1.NetworkPolicyPeer, peer Peer) (bool, error) { - if len(rulePeers) == 0 { +func (np *NetworkPolicy) ruleSelectsPeer(rulePeers []netv1.NetworkPolicyPeer, peer Peer, exposureFlag bool) (bool, error) { + if len(rulePeers) == 0 && !exposureFlag { return true, nil // If this field is empty or missing, this rule matches all destinations } for i := range rulePeers { @@ -200,6 +202,10 @@ func (np *NetworkPolicy) ruleSelectsPeer(rulePeers []netv1.NetworkPolicyPeer, pe if err != nil { return false, err } + if exposureFlag && selector.Empty() && rulePeers[i].PodSelector == nil { + // general rule matching entire cluster (namespaceSelector : {}); skip in case of exposure analysis + continue + } peerNamespace := peer.GetPeerNamespace() peerMatchesNamespaceSelector = selector.Matches(labels.Set(peerNamespace.Labels)) } @@ -246,7 +252,7 @@ func (np *NetworkPolicy) IngressAllowedConn(src Peer, protocol, port string, dst rulePeers := np.Spec.Ingress[i].From rulePorts := np.Spec.Ingress[i].Ports - peerSselected, err := np.ruleSelectsPeer(rulePeers, src) + peerSselected, err := np.ruleSelectsPeer(rulePeers, src, false) if err != nil { return false, err } @@ -270,7 +276,7 @@ func (np *NetworkPolicy) EgressAllowedConn(dst Peer, protocol, port string) (boo rulePeers := np.Spec.Egress[i].To rulePorts := np.Spec.Egress[i].Ports - peerSselected, err := np.ruleSelectsPeer(rulePeers, dst) + peerSselected, err := np.ruleSelectsPeer(rulePeers, dst, false) if err != nil { return false, err } @@ -301,10 +307,8 @@ func (np *NetworkPolicy) GetEgressAllowedConns(dst Peer, exposureFlag bool) (*co for _, rule := range np.Spec.Egress { rulePeers := rule.To rulePorts := rule.Ports - if exposureFlag && np.isGeneralRule(rulePeers) { - continue - } - peerSselected, err := np.ruleSelectsPeer(rulePeers, dst) + // in case of exposure analysis, returns false for general rule; as we already has its conns in res + peerSselected, err := np.ruleSelectsPeer(rulePeers, dst, exposureFlag) if err != nil { return res, err } @@ -329,17 +333,15 @@ func (np *NetworkPolicy) GetEgressAllowedConns(dst Peer, exposureFlag bool) (*co func (np *NetworkPolicy) GetIngressAllowedConns(src, dst Peer, exposureFlag bool) (*common.ConnectionSet, error) { res := common.MakeConnectionSet(false) if exposureFlag { - // if exposure-analysis is on; we already have the policy's entire world and entire cluster connections; + // if exposure-analysis is on; we already have the policy's and dst's entire world and entire cluster connections; // so we can initiate the result res = np.initIngressAllowedConnsFromGeneralConns(src, dst) } for _, rule := range np.Spec.Ingress { rulePeers := rule.From rulePorts := rule.Ports - if exposureFlag && np.isGeneralRule(rulePeers) { - continue - } - peerSselected, err := np.ruleSelectsPeer(rulePeers, src) + // in case of exposure analysis, returns false for general rule; as we already has its conns in res + peerSselected, err := np.ruleSelectsPeer(rulePeers, src, exposureFlag) if err != nil { return res, err } @@ -582,9 +584,3 @@ func (np *NetworkPolicy) initIngressAllowedConnsFromGeneralConns(src, dst Peer) // src is pod peer return dst.GetPeerPod().IngressExposureData.EntireClusterConnection.Copy() } - -// isGeneralRule returns if the given rule is general; i.e. e -func (np *NetworkPolicy) isGeneralRule(rulePeer []netv1.NetworkPolicyPeer) bool { - allDests, entireCluster := np.doRulesExposeToAllDestOrEntireCluster(rulePeer) - return allDests || entireCluster -} From f000196e1c658c1c9f7515846c5f0c27caf7d15a Mon Sep 17 00:00:00 2001 From: shireenf-ibm Date: Tue, 13 Feb 2024 13:47:01 +0200 Subject: [PATCH 4/9] missing func doc --- pkg/netpol/eval/resources.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/netpol/eval/resources.go b/pkg/netpol/eval/resources.go index f1c8807b..e990784d 100644 --- a/pkg/netpol/eval/resources.go +++ b/pkg/netpol/eval/resources.go @@ -69,6 +69,7 @@ func NewPolicyEngineWithOptions(exposureFlag bool) *PolicyEngine { return pe } +// AddObjects adds k8s objects from parsed resources to the policy engine func (pe *PolicyEngine) AddObjects(objects []parser.K8sObject) error { var err error for _, obj := range objects { From 447e916e3d3f7c59ce6d3e6bf04fedfbd7901483 Mon Sep 17 00:00:00 2001 From: shireenf-ibm <82180114+shireenf-ibm@users.noreply.github.com> Date: Tue, 13 Feb 2024 20:06:13 +0200 Subject: [PATCH 5/9] Update pkg/netpol/eval/internal/k8s/netpol.go Co-authored-by: Adi Sosnovich <82078442+adisos@users.noreply.github.com> --- pkg/netpol/eval/internal/k8s/netpol.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/netpol/eval/internal/k8s/netpol.go b/pkg/netpol/eval/internal/k8s/netpol.go index e56dbb0a..1eaebccc 100644 --- a/pkg/netpol/eval/internal/k8s/netpol.go +++ b/pkg/netpol/eval/internal/k8s/netpol.go @@ -294,7 +294,7 @@ func (np *NetworkPolicy) EgressAllowedConn(dst Peer, protocol, port string) (boo return false, nil } -// GetEgressAllowedConns returns the set of allowed connetions from any captured pod to the destination peer +// GetEgressAllowedConns returns the set of allowed connections from any captured pod to the destination peer // if exposureFlag is true; the result is initiated with the matching policy's general connections // and computations of rule conns are skipped for general rules; func (np *NetworkPolicy) GetEgressAllowedConns(dst Peer, exposureFlag bool) (*common.ConnectionSet, error) { From 4abd6019ddc004dfa3b3635f1fa6e73857a75af2 Mon Sep 17 00:00:00 2001 From: shireenf-ibm Date: Tue, 13 Feb 2024 20:11:59 +0200 Subject: [PATCH 6/9] todo comment --- pkg/netpol/connlist/connlist.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/netpol/connlist/connlist.go b/pkg/netpol/connlist/connlist.go index 376b2acb..98dbdf6c 100644 --- a/pkg/netpol/connlist/connlist.go +++ b/pkg/netpol/connlist/connlist.go @@ -203,6 +203,7 @@ func (ca *ConnlistAnalyzer) getPolicyEngine(objectsList []parser.K8sObject) (*ev if err != nil { return nil, err } + // TODO: this will be eliminated when adding representative peers while policies upsert // add representative resources err = pe.SetExposureAnalysisResources() return pe, err From d0d090a7c8050c946093230172da147d85e14fa2 Mon Sep 17 00:00:00 2001 From: shireenf-ibm Date: Wed, 14 Feb 2024 07:41:52 +0200 Subject: [PATCH 7/9] revert initiating conns between two peers --- pkg/netpol/eval/check.go | 4 +- pkg/netpol/eval/internal/k8s/netpol.go | 79 +++----------------------- 2 files changed, 10 insertions(+), 73 deletions(-) diff --git a/pkg/netpol/eval/check.go b/pkg/netpol/eval/check.go index 60b5b51b..f819f58b 100644 --- a/pkg/netpol/eval/check.go +++ b/pkg/netpol/eval/check.go @@ -280,7 +280,7 @@ func (pe *PolicyEngine) allallowedXgressConnections(src, dst k8s.Peer, isIngress // update its ingress entire cluster connection relying on policy data dst.GetPeerPod().UpdatePodXgressExposureToEntireClusterData(policy.IngressGeneralConns.EntireClusterConns, isIngress) } - policyAllowedConnectionsPerDirection, err = policy.GetIngressAllowedConns(src, dst, pe.exposureAnalysisFlag) + policyAllowedConnectionsPerDirection, err = policy.GetIngressAllowedConns(src, dst) } else { // policy selecting src if pe.exposureAnalysisFlag && !egressSet[src] { @@ -288,7 +288,7 @@ func (pe *PolicyEngine) allallowedXgressConnections(src, dst k8s.Peer, isIngress // update its egress entire cluster connection relying on policy data src.GetPeerPod().UpdatePodXgressExposureToEntireClusterData(policy.EgressGeneralConns.EntireClusterConns, isIngress) } - policyAllowedConnectionsPerDirection, err = policy.GetEgressAllowedConns(dst, pe.exposureAnalysisFlag) + policyAllowedConnectionsPerDirection, err = policy.GetEgressAllowedConns(dst) } if err != nil { return allowedConns, err diff --git a/pkg/netpol/eval/internal/k8s/netpol.go b/pkg/netpol/eval/internal/k8s/netpol.go index 1eaebccc..577621b3 100644 --- a/pkg/netpol/eval/internal/k8s/netpol.go +++ b/pkg/netpol/eval/internal/k8s/netpol.go @@ -173,12 +173,10 @@ func (np *NetworkPolicy) ruleConnsContain(rulePorts []netv1.NetworkPolicyPort, p } // ruleSelectsPeer returns true if the given peer is in the set of peers selected by rulePeers -// if exposureFlag is true, skips general rules; i.e. returns false also if the rule selects all destinations -// or entire cluster // //gocyclo:ignore -func (np *NetworkPolicy) ruleSelectsPeer(rulePeers []netv1.NetworkPolicyPeer, peer Peer, exposureFlag bool) (bool, error) { - if len(rulePeers) == 0 && !exposureFlag { +func (np *NetworkPolicy) ruleSelectsPeer(rulePeers []netv1.NetworkPolicyPeer, peer Peer) (bool, error) { + if len(rulePeers) == 0 { return true, nil // If this field is empty or missing, this rule matches all destinations } for i := range rulePeers { @@ -202,10 +200,6 @@ func (np *NetworkPolicy) ruleSelectsPeer(rulePeers []netv1.NetworkPolicyPeer, pe if err != nil { return false, err } - if exposureFlag && selector.Empty() && rulePeers[i].PodSelector == nil { - // general rule matching entire cluster (namespaceSelector : {}); skip in case of exposure analysis - continue - } peerNamespace := peer.GetPeerNamespace() peerMatchesNamespaceSelector = selector.Matches(labels.Set(peerNamespace.Labels)) } @@ -252,7 +246,7 @@ func (np *NetworkPolicy) IngressAllowedConn(src Peer, protocol, port string, dst rulePeers := np.Spec.Ingress[i].From rulePorts := np.Spec.Ingress[i].Ports - peerSselected, err := np.ruleSelectsPeer(rulePeers, src, false) + peerSselected, err := np.ruleSelectsPeer(rulePeers, src) if err != nil { return false, err } @@ -276,7 +270,7 @@ func (np *NetworkPolicy) EgressAllowedConn(dst Peer, protocol, port string) (boo rulePeers := np.Spec.Egress[i].To rulePorts := np.Spec.Egress[i].Ports - peerSselected, err := np.ruleSelectsPeer(rulePeers, dst, false) + peerSselected, err := np.ruleSelectsPeer(rulePeers, dst) if err != nil { return false, err } @@ -295,20 +289,12 @@ func (np *NetworkPolicy) EgressAllowedConn(dst Peer, protocol, port string) (boo } // GetEgressAllowedConns returns the set of allowed connections from any captured pod to the destination peer -// if exposureFlag is true; the result is initiated with the matching policy's general connections -// and computations of rule conns are skipped for general rules; -func (np *NetworkPolicy) GetEgressAllowedConns(dst Peer, exposureFlag bool) (*common.ConnectionSet, error) { +func (np *NetworkPolicy) GetEgressAllowedConns(dst Peer) (*common.ConnectionSet, error) { res := common.MakeConnectionSet(false) - if exposureFlag { - // if exposure-analysis is on; we already have the policy's entire world and entire cluster connections; - // so we can initiate the result with it - res = np.initEgressAllowedConnsFromPolicyGeneralConns(dst) - } for _, rule := range np.Spec.Egress { rulePeers := rule.To rulePorts := rule.Ports - // in case of exposure analysis, returns false for general rule; as we already has its conns in res - peerSselected, err := np.ruleSelectsPeer(rulePeers, dst, exposureFlag) + peerSselected, err := np.ruleSelectsPeer(rulePeers, dst) if err != nil { return res, err } @@ -328,20 +314,12 @@ func (np *NetworkPolicy) GetEgressAllowedConns(dst Peer, exposureFlag bool) (*co } // GetIngressAllowedConns returns the set of allowed connections to a captured dst pod from the src peer -// if exposureFlag is true; the result is initiated with the matching policy's general connections -// and computations of rule conns are skipped for general rules; -func (np *NetworkPolicy) GetIngressAllowedConns(src, dst Peer, exposureFlag bool) (*common.ConnectionSet, error) { +func (np *NetworkPolicy) GetIngressAllowedConns(src, dst Peer) (*common.ConnectionSet, error) { res := common.MakeConnectionSet(false) - if exposureFlag { - // if exposure-analysis is on; we already have the policy's and dst's entire world and entire cluster connections; - // so we can initiate the result - res = np.initIngressAllowedConnsFromGeneralConns(src, dst) - } for _, rule := range np.Spec.Ingress { rulePeers := rule.From rulePorts := rule.Ports - // in case of exposure analysis, returns false for general rule; as we already has its conns in res - peerSselected, err := np.ruleSelectsPeer(rulePeers, src, exposureFlag) + peerSselected, err := np.ruleSelectsPeer(rulePeers, src) if err != nil { return res, err } @@ -543,44 +521,3 @@ func (np *NetworkPolicy) doRulesExposeToAllDestOrEntireCluster(rules []netv1.Net } return false, false } - -///////////////////////////////////////////////// -// exposure analysis benefits from pre-processing stored data - -// initEgressAllowedConnsFromPolicyGeneralConns returns an init egress allowed conns between two peers -// based on the policy's general connections as following: -// - if the dst peer is ip-block, then return the copy of policy's egress AllDestinationsConns -// - if the dst peer is a real pod, return the copy of policy's egress EntireClusterConns with converting the named ports -// if found in the connection (conversion based on dst pod's ports) -// - if the dst peer is a representative pod, return the copy policy's egress EntireClusterConns without converting the named -// ports if found (for a representative pod, a named port is potential connection port) -// returns copy of a general connection, since it might be changed for the specific connection between two peers -func (np *NetworkPolicy) initEgressAllowedConnsFromPolicyGeneralConns(dst Peer) *common.ConnectionSet { - if dst.GetPeerIPBlock() != nil { // dst is ip - return np.EgressGeneralConns.AllDestinationsConns.Copy() - } - res := np.EgressGeneralConns.EntireClusterConns - if dst.GetPeerPod().FakePod { // dst is representative - return res.Copy() - } - // dst is real pod - convertedConn := dst.GetPeerPod().checkAndConvertNamedPortsInConnection(res) - if convertedConn != nil { - return convertedConn - } - return res.Copy() -} - -// initIngressAllowedConnsFromGeneralConns returns an init ingress allowed conns between two peers; -// based on policy and dst pod general connections as following: -// 1. src peer is an ip-block : returns copy of policy's ingress AllDestinationsConns -// 2. src peer is a pod (real or representative) : returns copy of dst pod's ingress' entire-cluster connection -// as it already contains the policy's ingress entire-cluster connection and converted named ports (if found) -// returns copy of a general connection, since it might be changed for the specific connection between two peers -func (np *NetworkPolicy) initIngressAllowedConnsFromGeneralConns(src, dst Peer) *common.ConnectionSet { - if src.GetPeerIPBlock() != nil { // src is ip - return np.IngressGeneralConns.AllDestinationsConns.Copy() - } - // src is pod peer - return dst.GetPeerPod().IngressExposureData.EntireClusterConnection.Copy() -} From cc27f02356cce88966a1b3460870507e06ced60a Mon Sep 17 00:00:00 2001 From: shireenf-ibm Date: Wed, 14 Feb 2024 13:42:15 +0200 Subject: [PATCH 8/9] avoid iterating policy if its general conns are all conns --- pkg/netpol/eval/check.go | 47 +++++++++++++++++++++++++++++----------- 1 file changed, 34 insertions(+), 13 deletions(-) diff --git a/pkg/netpol/eval/check.go b/pkg/netpol/eval/check.go index f819f58b..6d70c293 100644 --- a/pkg/netpol/eval/check.go +++ b/pkg/netpol/eval/check.go @@ -268,28 +268,26 @@ func (pe *PolicyEngine) allallowedXgressConnections(src, dst k8s.Peer, isIngress // iterate relevant network policies (that capture the required pod) for _, policy := range netpols { - // if isIngress: check for ingress rules that capture src within 'from' - // if not isIngress: check for egress rules that capture dst within 'to' - // collect the allowed connectivity from the relevant rules into allowedConns - var policyAllowedConnectionsPerDirection *common.ConnectionSet - var err error - if isIngress { - // policy selecting dst (dst pod is real) - if pe.exposureAnalysisFlag && !ingressSet[dst] { + // in case of exposure-analysis: update exposure data for relevant pod + if pe.exposureAnalysisFlag { + if isIngress && !ingressSet[dst] { + // policy selecting dst (dst pod is real) // if this is first time scanning policies selecting this dst peer, // update its ingress entire cluster connection relying on policy data dst.GetPeerPod().UpdatePodXgressExposureToEntireClusterData(policy.IngressGeneralConns.EntireClusterConns, isIngress) } - policyAllowedConnectionsPerDirection, err = policy.GetIngressAllowedConns(src, dst) - } else { - // policy selecting src - if pe.exposureAnalysisFlag && !egressSet[src] { + if !isIngress && !egressSet[src] { + // policy selecting src // if this is first time scanning policies selecting this src peer, // update its egress entire cluster connection relying on policy data src.GetPeerPod().UpdatePodXgressExposureToEntireClusterData(policy.EgressGeneralConns.EntireClusterConns, isIngress) } - policyAllowedConnectionsPerDirection, err = policy.GetEgressAllowedConns(dst) } + // determine policy's allowed connections between the peers for the direction + // if isIngress: check for ingress rules that capture src within 'from' + // if not isIngress: check for egress rules that capture dst within 'to' + // collect the allowed connectivity from the relevant rules into allowedConns + policyAllowedConnectionsPerDirection, err := determineAllowedConnsPerDirection(policy, src, dst, isIngress) if err != nil { return allowedConns, err } @@ -309,6 +307,29 @@ func (pe *PolicyEngine) allallowedXgressConnections(src, dst k8s.Peer, isIngress return allowedConns, nil } +// determineAllowedConnsPerDirection - helping func, determine policy's allowed connections between the peers for the direction +func determineAllowedConnsPerDirection(policy *k8s.NetworkPolicy, src, dst k8s.Peer, isIngress bool) (*common.ConnectionSet, error) { + if isIngress { + switch { + case policy.IngressGeneralConns.AllDestinationsConns.AllowAll: + return policy.IngressGeneralConns.AllDestinationsConns, nil + case policy.IngressGeneralConns.EntireClusterConns.AllowAll && src.PeerType() == k8s.PodType: + return policy.IngressGeneralConns.EntireClusterConns, nil + default: + return policy.GetIngressAllowedConns(src, dst) + } + } + // else egress + switch { + case policy.EgressGeneralConns.AllDestinationsConns.AllowAll: + return policy.EgressGeneralConns.AllDestinationsConns, nil + case policy.EgressGeneralConns.EntireClusterConns.AllowAll && dst.PeerType() == k8s.PodType: + return policy.EgressGeneralConns.EntireClusterConns, nil + default: + return policy.GetEgressAllowedConns(dst) + } +} + // isPeerNodeIP returns true if peer1 is an IP address of a node and peer2 is a pod on that node func isPeerNodeIP(peer1, peer2 k8s.Peer) bool { return peer2.PeerType() == k8s.PodType && peer1.PeerType() == k8s.IPBlockType && From ae8b7d92266d17677498b96f0b13cbd611610f91 Mon Sep 17 00:00:00 2001 From: shireenf-ibm Date: Wed, 14 Feb 2024 14:53:10 +0200 Subject: [PATCH 9/9] todo comment --- pkg/netpol/eval/internal/k8s/netpol.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/netpol/eval/internal/k8s/netpol.go b/pkg/netpol/eval/internal/k8s/netpol.go index 577621b3..7516e5f3 100644 --- a/pkg/netpol/eval/internal/k8s/netpol.go +++ b/pkg/netpol/eval/internal/k8s/netpol.go @@ -48,6 +48,10 @@ type NetworkPolicy struct { EgressGeneralConns PolicyGeneralRulesConns } +// @todo might help if while pre-process, to check containment of all rules' connections; if all "specific" rules +// connections are contained in the "general" rules connections, then we can avoid iterating policy rules for computing +// connections between two peers + type PolicyGeneralRulesConns struct { // AllDestinationsConns contains the maximal connection-set which the policy's rules allow to all destinations // (all namespaces, pods and IP addresses)