From 7ea4efd217ced2fe7ed2e228cd8689f84bc49c19 Mon Sep 17 00:00:00 2001 From: Xie Zheng Date: Wed, 21 Sep 2022 18:29:07 +0800 Subject: [PATCH 1/2] IPPool CRD init Consider the stale IPPool in NCP, use v1alpha2. --- PROJECT | 7 + build/yaml/crd/nsx.vmware.com_ippools.yaml | 137 +++++++++++++++++++ cmd/main.go | 2 + go.mod | 7 +- go.sum | 13 +- pkg/apis/v1alpha2/groupversion_info.go | 23 ++++ pkg/apis/v1alpha2/ippool_types.go | 79 +++++++++++ pkg/apis/v1alpha2/zz_generated.deepcopy.go | 150 +++++++++++++++++++++ 8 files changed, 412 insertions(+), 6 deletions(-) create mode 100644 build/yaml/crd/nsx.vmware.com_ippools.yaml create mode 100644 pkg/apis/v1alpha2/groupversion_info.go create mode 100644 pkg/apis/v1alpha2/ippool_types.go create mode 100644 pkg/apis/v1alpha2/zz_generated.deepcopy.go diff --git a/PROJECT b/PROJECT index 9d3120800..1a6ca2a64 100644 --- a/PROJECT +++ b/PROJECT @@ -20,4 +20,11 @@ resources: kind: SecurityPolicy path: github.com/vmware-tanzu/nsx-operator/pkg/api/v1alpha1 version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + domain: nsx.vmware.com + kind: IPPool + path: github.com/vmware-tanzu/nsx-operator/pkg/api/v1alpha2 + version: v1alpha2 version: "3" diff --git a/build/yaml/crd/nsx.vmware.com_ippools.yaml b/build/yaml/crd/nsx.vmware.com_ippools.yaml new file mode 100644 index 000000000..cddf2593f --- /dev/null +++ b/build/yaml/crd/nsx.vmware.com_ippools.yaml @@ -0,0 +1,137 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.7.0 + creationTimestamp: null + name: ippools.nsx.vmware.com +spec: + group: nsx.vmware.com + names: + kind: IPPool + listKind: IPPoolList + plural: ippools + singular: ippool + scope: Namespaced + versions: + - name: v1alpha2 + schema: + openAPIV3Schema: + description: IPPool is the Schema for the ippools API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPPoolSpec defines the desired state of IPPool. + properties: + subnets: + description: Subnets defines set of subnets need to be allocated. + items: + description: SubnetRequest defines the subnet allocation request. + properties: + ipFamily: + default: IPv4 + description: IPFamily defines the IP family type for this subnet, + could be IPv4 or IPv6. This is optional, the default is IPv4. + enum: + - IPv4 + - IPv6 + type: string + name: + description: Name defines the name of this subnet. + type: string + prefixLength: + description: PrefixLength defines prefix length for this subnet. + type: integer + required: + - name + type: object + type: array + type: + default: public + description: Type defines the type of this IPPool, public or private. + enum: + - public + - private + type: string + required: + - type + type: object + status: + description: IPPoolStatus defines the observed state of IPPool. + properties: + conditions: + description: Conditions defines current state of the IPPool. + items: + description: Condition defines condition of custom resource. + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. This should be when the underlying condition changed. + If that is not known, then using the time when the API field + changed is acceptable. + format: date-time + type: string + message: + description: Message shows a human-readable message about condition. + type: string + reason: + description: Reason shows a brief reason of condition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type defines condition type. + type: string + required: + - status + - type + type: object + type: array + subnets: + description: Subnets defines subnets allocation result. + items: + description: SubnetResult defines the subnet allocation result. + properties: + cidr: + description: CIDR defines the allocated CIDR. + type: string + name: + description: Name defines the name of this subnet. + type: string + required: + - cidr + - name + type: object + type: array + required: + - conditions + - subnets + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/cmd/main.go b/cmd/main.go index 86ad02606..8c2a6f9c8 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -17,6 +17,7 @@ import ( logf "sigs.k8s.io/controller-runtime/pkg/log" "github.com/vmware-tanzu/nsx-operator/pkg/apis/v1alpha1" + "github.com/vmware-tanzu/nsx-operator/pkg/apis/v1alpha2" "github.com/vmware-tanzu/nsx-operator/pkg/config" "github.com/vmware-tanzu/nsx-operator/pkg/controllers" "github.com/vmware-tanzu/nsx-operator/pkg/logger" @@ -35,6 +36,7 @@ var ( func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(v1alpha1.AddToScheme(scheme)) + utilruntime.Must(v1alpha2.AddToScheme(scheme)) flag.StringVar(&probeAddr, "health-probe-bind-address", ":8384", "The address the probe endpoint binds to.") flag.StringVar(&metricsAddr, "metrics-bind-address", ":8093", "The address the metrics endpoint binds to.") config.AddFlags() diff --git a/go.mod b/go.mod index e86a785af..6cd6642bb 100644 --- a/go.mod +++ b/go.mod @@ -6,16 +6,18 @@ require ( github.com/agiledragon/gomonkey v2.0.2+incompatible github.com/agiledragon/gomonkey/v2 v2.4.0 github.com/deckarep/golang-set v1.8.0 + github.com/go-logr/logr v1.2.0 github.com/golang-jwt/jwt v3.2.2+incompatible github.com/golang/mock v1.6.0 + github.com/prometheus/client_golang v1.11.0 github.com/sirupsen/logrus v1.8.1 github.com/stretchr/testify v1.7.0 github.com/vmware/govmomi v0.27.4 github.com/vmware/vsphere-automation-sdk-go/lib v0.4.0 github.com/vmware/vsphere-automation-sdk-go/runtime v0.4.0 github.com/vmware/vsphere-automation-sdk-go/services/nsxt v0.6.0 + go.uber.org/zap v1.19.1 golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 - golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff gopkg.in/ini.v1 v1.66.4 k8s.io/api v0.23.4 k8s.io/apimachinery v0.23.4 @@ -31,7 +33,6 @@ require ( github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gibson042/canonicaljson-go v1.0.3 // indirect - github.com/go-logr/logr v1.2.0 // indirect github.com/go-logr/zapr v1.2.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect @@ -47,7 +48,6 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.11.0 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.28.0 // indirect github.com/prometheus/procfs v0.6.0 // indirect @@ -55,7 +55,6 @@ require ( github.com/stretchr/objx v0.1.1 // indirect go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect - go.uber.org/zap v1.19.1 // indirect golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8 // indirect diff --git a/go.sum b/go.sum index 10ef29261..f8980bd24 100644 --- a/go.sum +++ b/go.sum @@ -87,6 +87,7 @@ github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnweb github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -170,6 +171,7 @@ github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= @@ -360,16 +362,20 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWb github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= @@ -505,6 +511,7 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= @@ -601,6 +608,7 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -680,6 +688,7 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -770,13 +779,13 @@ golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82u golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff h1:VX/uD7MK0AHXGiScH3fsieUQUcpmRERPDYtqZdJnA+Q= golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -927,7 +936,6 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA= gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -976,6 +984,7 @@ sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtud sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/structured-merge-diff/v4 v4.2.0/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/pkg/apis/v1alpha2/groupversion_info.go b/pkg/apis/v1alpha2/groupversion_info.go new file mode 100644 index 000000000..7c78a791a --- /dev/null +++ b/pkg/apis/v1alpha2/groupversion_info.go @@ -0,0 +1,23 @@ +/* Copyright © 2022 VMware, Inc. All Rights Reserved. + SPDX-License-Identifier: Apache-2.0 */ + +// Package v1alpha2 contains API Schema definitions for the v1alpha2 API group +//+kubebuilder:object:generate=true +//+groupName=nsx.vmware.com +package v1alpha2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "nsx.vmware.com", Version: "v1alpha2"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/pkg/apis/v1alpha2/ippool_types.go b/pkg/apis/v1alpha2/ippool_types.go new file mode 100644 index 000000000..942f71246 --- /dev/null +++ b/pkg/apis/v1alpha2/ippool_types.go @@ -0,0 +1,79 @@ +/* Copyright © 2022 VMware, Inc. All Rights Reserved. + SPDX-License-Identifier: Apache-2.0 */ + +package v1alpha2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/vmware-tanzu/nsx-operator/pkg/apis/v1alpha1" +) + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// IPPool is the Schema for the ippools API +type IPPool struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec IPPoolSpec `json:"spec"` + Status IPPoolStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// IPPoolList contains a list of IPPool +type IPPoolList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []IPPool `json:"items"` +} + +// IPPoolSpec defines the desired state of IPPool. +type IPPoolSpec struct { + // Type defines the type of this IPPool, public or private. + // +kubebuilder:validation:Enum=public;private + // +kubebuilder:default=public + Type string `json:"type"` + // Subnets defines set of subnets need to be allocated. + // +optional + Subnets []SubnetRequest `json:"subnets"` +} + +// IPPoolStatus defines the observed state of IPPool. +type IPPoolStatus struct { + // Subnets defines subnets allocation result. + Subnets []SubnetResult `json:"subnets"` + // Conditions defines current state of the IPPool. + Conditions []v1alpha1.Condition `json:"conditions"` +} + +// SubnetRequest defines the subnet allocation request. +type SubnetRequest struct { + // PrefixLength defines prefix length for this subnet. + // +optional + PrefixLength int `json:"prefixLength,omitempty"` + + // IPFamily defines the IP family type for this subnet, could be IPv4 or IPv6. + // This is optional, the default is IPv4. + // +kubebuilder:validation:Enum=IPv4;IPv6 + // +kubebuilder:default=IPv4 + IPFamily string `json:"ipFamily,omitempty"` + + // Name defines the name of this subnet. + Name string `json:"name"` +} + +// SubnetResult defines the subnet allocation result. +type SubnetResult struct { + // CIDR defines the allocated CIDR. + CIDR string `json:"cidr"` + + // Name defines the name of this subnet. + Name string `json:"name"` +} + +func init() { + SchemeBuilder.Register(&IPPool{}, &IPPoolList{}) +} diff --git a/pkg/apis/v1alpha2/zz_generated.deepcopy.go b/pkg/apis/v1alpha2/zz_generated.deepcopy.go new file mode 100644 index 000000000..7b99fda69 --- /dev/null +++ b/pkg/apis/v1alpha2/zz_generated.deepcopy.go @@ -0,0 +1,150 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* Copyright © 2022 VMware, Inc. All Rights Reserved. + SPDX-License-Identifier: Apache-2.0 */ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + "github.com/vmware-tanzu/nsx-operator/pkg/apis/v1alpha1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPPool) DeepCopyInto(out *IPPool) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPPool. +func (in *IPPool) DeepCopy() *IPPool { + if in == nil { + return nil + } + out := new(IPPool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IPPool) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPPoolList) DeepCopyInto(out *IPPoolList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IPPool, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPPoolList. +func (in *IPPoolList) DeepCopy() *IPPoolList { + if in == nil { + return nil + } + out := new(IPPoolList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IPPoolList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPPoolSpec) DeepCopyInto(out *IPPoolSpec) { + *out = *in + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]SubnetRequest, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPPoolSpec. +func (in *IPPoolSpec) DeepCopy() *IPPoolSpec { + if in == nil { + return nil + } + out := new(IPPoolSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPPoolStatus) DeepCopyInto(out *IPPoolStatus) { + *out = *in + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]SubnetResult, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1alpha1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPPoolStatus. +func (in *IPPoolStatus) DeepCopy() *IPPoolStatus { + if in == nil { + return nil + } + out := new(IPPoolStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetRequest) DeepCopyInto(out *SubnetRequest) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetRequest. +func (in *SubnetRequest) DeepCopy() *SubnetRequest { + if in == nil { + return nil + } + out := new(SubnetRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetResult) DeepCopyInto(out *SubnetResult) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetResult. +func (in *SubnetResult) DeepCopy() *SubnetResult { + if in == nil { + return nil + } + out := new(SubnetResult) + in.DeepCopyInto(out) + return out +} From 723c94f340df6c6eb586a248c0207a4c4caceba0 Mon Sep 17 00:00:00 2001 From: Xie Zheng Date: Thu, 7 Jul 2022 11:00:30 +0800 Subject: [PATCH 2/2] Support named port feature Consider the below scenarios: * When a new added pod whose port name exists in security policy * When a deleted pod whose port name exists in security policy * When a pod's label is changed * When a namespace's label is changed Ensure named port and endPort not coexist, endPort can only be defined if port is also defined, and both ports must be numeric. Fix compare rule and compare group logic. When pod or namespace label change, the IPSet group may change, however, the groups compare logic still returns equal, the group misses Expression field and should also use DataValueToJsonEncoder to compare groups. Fix the bug no pod to be selected if only namespaceSelectors or podSelector. Fix the bug no right pod to be selected when there is *and* relationship in one target and *or* relationship in another target. --- cmd/main.go | 17 +- docs/security-policy.md | 2 +- go.mod | 7 +- go.sum | 1 - .../securitypolicy/namespace_controller.go | 91 +++ .../namespace_controller_test.go | 170 ++++++ .../securitypolicy/pod_controller.go | 120 ++++ .../securitypolicy/pod_controller_test.go | 270 +++++++++ .../securitypolicy_controller.go | 106 +++- .../securitypolicy_controller_test.go | 139 +++-- pkg/logger/logger.go | 22 +- pkg/metrics/metrics.go | 6 +- .../client/mock_workqueue.go | 124 ++++ pkg/nsx/endpoint.go | 2 +- pkg/nsx/services/builder.go | 571 +++++++++++------- pkg/nsx/services/builder_test.go | 30 +- pkg/nsx/services/compare.go | 90 +-- pkg/nsx/services/compare_test.go | 3 +- pkg/nsx/services/expand.go | 324 ++++++++++ pkg/nsx/services/expand_test.go | 223 +++++++ pkg/nsx/services/firewall.go | 70 ++- pkg/nsx/services/firewall_test.go | 11 +- pkg/nsx/services/store.go | 13 + pkg/nsx/util/errors.go | 16 + pkg/nsx/util/utils.go | 76 ++- pkg/nsx/util/utils_test.go | 5 +- pkg/util/utils.go | 46 ++ pkg/util/utils_test.go | 146 +++++ 28 files changed, 2284 insertions(+), 417 deletions(-) create mode 100644 pkg/controllers/securitypolicy/namespace_controller.go create mode 100644 pkg/controllers/securitypolicy/namespace_controller_test.go create mode 100644 pkg/controllers/securitypolicy/pod_controller.go create mode 100644 pkg/controllers/securitypolicy/pod_controller_test.go rename pkg/controllers/{ => securitypolicy}/securitypolicy_controller.go (78%) rename pkg/controllers/{ => securitypolicy}/securitypolicy_controller_test.go (80%) create mode 100644 pkg/mock/controller-runtime/client/mock_workqueue.go create mode 100644 pkg/nsx/services/expand.go create mode 100644 pkg/nsx/services/expand_test.go diff --git a/cmd/main.go b/cmd/main.go index 86ad02606..b91772267 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -18,7 +18,7 @@ import ( "github.com/vmware-tanzu/nsx-operator/pkg/apis/v1alpha1" "github.com/vmware-tanzu/nsx-operator/pkg/config" - "github.com/vmware-tanzu/nsx-operator/pkg/controllers" + "github.com/vmware-tanzu/nsx-operator/pkg/controllers/securitypolicy" "github.com/vmware-tanzu/nsx-operator/pkg/logger" "github.com/vmware-tanzu/nsx-operator/pkg/metrics" "github.com/vmware-tanzu/nsx-operator/pkg/nsx" @@ -45,7 +45,7 @@ func init() { if err != nil { os.Exit(1) } - logf.SetLogger(logger.ZapLogger(cf)) + logf.SetLogger(logger.ZapLogger()) log = logf.Log.WithName("main") if metrics.AreMetricsExposed(cf) { metrics.InitializePrometheusMetrics() @@ -65,24 +65,25 @@ func main() { log.Error(err, "failed to init manager") os.Exit(1) } - securityReconcile := &controllers.SecurityPolicyReconciler{ + securityReconcile := &securitypolicy.SecurityPolicyReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), } nsxClient := nsx.GetClient(cf) if nsxClient == nil { - log.Error(err, "unable to get nsx client") + log.Error(err, "failed to get nsx client") os.Exit(1) } if service, err := services.InitializeSecurityPolicy(nsxClient, cf); err != nil { - log.Error(err, "unable to initialize securitypolicy service", "controller", "SecurityPolicy") + log.Error(err, "failed to initialize securitypolicy service", "controller", "SecurityPolicy") os.Exit(1) } else { + service.Client = mgr.GetClient() securityReconcile.Service = service } if err = securityReconcile.Start(mgr); err != nil { - log.Error(err, "unable to create controller", "controller", "SecurityPolicy") + log.Error(err, "failed to create controller", "controller", "SecurityPolicy") os.Exit(1) } @@ -91,11 +92,11 @@ func main() { } if err := mgr.AddHealthzCheck("healthz", nsxClient.NSXChecker.CheckNSXHealth); err != nil { - log.Error(err, "unable to set up health check") + log.Error(err, "failed to set up health check") os.Exit(1) } if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { - log.Error(err, "unable to set up ready check") + log.Error(err, "failed to set up ready check") os.Exit(1) } diff --git a/docs/security-policy.md b/docs/security-policy.md index 9eb37032e..2afd12604 100644 --- a/docs/security-policy.md +++ b/docs/security-policy.md @@ -306,4 +306,4 @@ Limitations of SecurityPolicy CR: to support 'In' with limited counts. 7. Max IP elements in one security policy: 4000 8. Priority range of SecurityPolicy CR is [0, 1000]. -9. Do not support named port. \ No newline at end of file +9. Support named port for Pod, but not for VM. \ No newline at end of file diff --git a/go.mod b/go.mod index e86a785af..6cd6642bb 100644 --- a/go.mod +++ b/go.mod @@ -6,16 +6,18 @@ require ( github.com/agiledragon/gomonkey v2.0.2+incompatible github.com/agiledragon/gomonkey/v2 v2.4.0 github.com/deckarep/golang-set v1.8.0 + github.com/go-logr/logr v1.2.0 github.com/golang-jwt/jwt v3.2.2+incompatible github.com/golang/mock v1.6.0 + github.com/prometheus/client_golang v1.11.0 github.com/sirupsen/logrus v1.8.1 github.com/stretchr/testify v1.7.0 github.com/vmware/govmomi v0.27.4 github.com/vmware/vsphere-automation-sdk-go/lib v0.4.0 github.com/vmware/vsphere-automation-sdk-go/runtime v0.4.0 github.com/vmware/vsphere-automation-sdk-go/services/nsxt v0.6.0 + go.uber.org/zap v1.19.1 golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 - golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff gopkg.in/ini.v1 v1.66.4 k8s.io/api v0.23.4 k8s.io/apimachinery v0.23.4 @@ -31,7 +33,6 @@ require ( github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gibson042/canonicaljson-go v1.0.3 // indirect - github.com/go-logr/logr v1.2.0 // indirect github.com/go-logr/zapr v1.2.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect @@ -47,7 +48,6 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.11.0 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.28.0 // indirect github.com/prometheus/procfs v0.6.0 // indirect @@ -55,7 +55,6 @@ require ( github.com/stretchr/objx v0.1.1 // indirect go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect - go.uber.org/zap v1.19.1 // indirect golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8 // indirect diff --git a/go.sum b/go.sum index 10ef29261..d7fbc00e0 100644 --- a/go.sum +++ b/go.sum @@ -776,7 +776,6 @@ golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff h1:VX/uD7MK0AHXGiScH3fsieUQUcpmRERPDYtqZdJnA+Q= golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/pkg/controllers/securitypolicy/namespace_controller.go b/pkg/controllers/securitypolicy/namespace_controller.go new file mode 100644 index 000000000..2945a8e79 --- /dev/null +++ b/pkg/controllers/securitypolicy/namespace_controller.go @@ -0,0 +1,91 @@ +/* Copyright © 2022 VMware, Inc. All Rights Reserved. + SPDX-License-Identifier: Apache-2.0 */ + +package securitypolicy + +import ( + "context" + "reflect" + + v1 "k8s.io/api/core/v1" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + util2 "github.com/vmware-tanzu/nsx-operator/pkg/util" +) + +// We should consider the below scenarios: +// When a namespace's label is changed and if there are pods in this namespace, +// we should reconcile the corresponding security policy. + +type EnqueueRequestForNamespace struct { + Client client.Client +} + +func (e *EnqueueRequestForNamespace) Create(_ event.CreateEvent, _ workqueue.RateLimitingInterface) { + log.V(1).Info("namespace create event, do nothing") +} + +func (e *EnqueueRequestForNamespace) Delete(_ event.DeleteEvent, _ workqueue.RateLimitingInterface) { + log.V(1).Info("namespace delete event, do nothing") +} + +func (e *EnqueueRequestForNamespace) Generic(_ event.GenericEvent, _ workqueue.RateLimitingInterface) { + log.V(1).Info("namespace generic event, do nothing") +} + +func (e *EnqueueRequestForNamespace) Update(updateEvent event.UpdateEvent, l workqueue.RateLimitingInterface) { + obj := updateEvent.ObjectNew.(*v1.Namespace) + if isInSysNs, err := util2.IsSystemNamespace(nil, "", obj); err != nil { + log.Error(err, "failed to fetch namespace", "namespace", obj.Name) + return + } else if isInSysNs { + log.V(2).Info("namespace is in system namespace, ignore it", "namespace", obj.Name) + return + } + + podList := &v1.PodList{} + err := e.Client.List(context.Background(), podList, client.InNamespace(obj.Name)) + if err != nil { + log.Error(err, "failed to list pod in namespace", "namespace", obj.Name) + return + } + + shouldReconcile := false + for _, pod := range podList.Items { + if util2.CheckPodHasNamedPort(pod, "update") { + shouldReconcile = true + break + } + } + if !shouldReconcile { + log.Info("no pod in namespace is relevant", "namespace", obj.Name) + return + } + + err = reconcileSecurityPolicy(e.Client, podList.Items, l) + if err != nil { + log.Error(err, "failed to reconcile security policy") + } +} + +var PredicateFuncsNs = predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return false + }, + UpdateFunc: func(e event.UpdateEvent) bool { + oldObj := e.ObjectOld.(*v1.Namespace) + newObj := e.ObjectNew.(*v1.Namespace) + log.V(1).Info("receive namespace update event", "name", oldObj.Name) + if reflect.DeepEqual(oldObj.ObjectMeta.Labels, newObj.ObjectMeta.Labels) { + log.Info("label of namespace is not changed, ignore it", "name", oldObj.Name) + return false + } + return true + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return false + }, +} diff --git a/pkg/controllers/securitypolicy/namespace_controller_test.go b/pkg/controllers/securitypolicy/namespace_controller_test.go new file mode 100644 index 000000000..69def314c --- /dev/null +++ b/pkg/controllers/securitypolicy/namespace_controller_test.go @@ -0,0 +1,170 @@ +package securitypolicy + +import ( + "context" + "testing" + + "github.com/agiledragon/gomonkey" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + + mock_client "github.com/vmware-tanzu/nsx-operator/pkg/mock/controller-runtime/client" +) + +func TestEnqueueRequestForNamespace_Create(t *testing.T) { + type fields struct { + Client client.Client + } + type args struct { + createEvent event.CreateEvent + l workqueue.RateLimitingInterface + } + tests := []struct { + name string + fields fields + args args + }{ + {"1", fields{}, args{event.CreateEvent{}, nil}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := &EnqueueRequestForNamespace{ + Client: tt.fields.Client, + } + e.Create(tt.args.createEvent, tt.args.l) + }) + } +} + +func TestEnqueueRequestForNamespace_Delete(t *testing.T) { + type fields struct { + Client client.Client + } + type args struct { + deleteEvent event.DeleteEvent + l workqueue.RateLimitingInterface + } + tests := []struct { + name string + fields fields + args args + }{ + {"1", fields{}, args{event.DeleteEvent{}, nil}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := &EnqueueRequestForNamespace{ + Client: tt.fields.Client, + } + e.Delete(tt.args.deleteEvent, tt.args.l) + }) + } +} + +func TestEnqueueRequestForNamespace_Generic(t *testing.T) { + type fields struct { + Client client.Client + } + type args struct { + genericEvent event.GenericEvent + l workqueue.RateLimitingInterface + } + tests := []struct { + name string + fields fields + args args + }{ + {"1", fields{}, args{event.GenericEvent{}, nil}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := &EnqueueRequestForNamespace{ + Client: tt.fields.Client, + } + e.Generic(tt.args.genericEvent, tt.args.l) + }) + } +} + +func TestEnqueueRequestForNamespace_Update(t *testing.T) { + podList := v1.PodList{ + Items: []v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "test-namespace", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Ports: []v1.ContainerPort{ + {Name: "test-port", ContainerPort: 8080}, + {Name: "test-port-2", ContainerPort: 80}, + }, + }, + }, + }, + }, + }, + } + mockCtl := gomock.NewController(t) + k8sClient := mock_client.NewMockClient(mockCtl) + ctx := context.Background() + pList := &v1.PodList{} + ops := client.ListOption(client.InNamespace("test-ns-new")) + k8sClient.EXPECT().List(ctx, pList, ops).Return(nil).Do(func(_ context.Context, list client.ObjectList, + o ...client.ListOption, + ) error { + log.Info("listing pods", "options", o) + a := list.(*v1.PodList) + a.Items = podList.Items + return nil + }) + patches := gomonkey.ApplyFunc(reconcileSecurityPolicy, func(client client.Client, pods []v1.Pod, + q workqueue.RateLimitingInterface, + ) error { + return nil + }) + defer patches.Reset() + + type fields struct { + Client client.Client + } + type args struct { + updateEvent event.UpdateEvent + l workqueue.RateLimitingInterface + } + evt := event.UpdateEvent{ + ObjectOld: &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ns", + }, + }, + ObjectNew: &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ns-new", + }, + }, + } + tests := []struct { + name string + fields fields + args args + wantErr assert.ErrorAssertionFunc + }{ + {"1", fields{k8sClient}, args{updateEvent: evt}, assert.NoError}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := &EnqueueRequestForNamespace{ + Client: tt.fields.Client, + } + e.Update(tt.args.updateEvent, tt.args.l) + }) + } +} diff --git a/pkg/controllers/securitypolicy/pod_controller.go b/pkg/controllers/securitypolicy/pod_controller.go new file mode 100644 index 000000000..6aa25f3ca --- /dev/null +++ b/pkg/controllers/securitypolicy/pod_controller.go @@ -0,0 +1,120 @@ +/* Copyright © 2022 VMware, Inc. All Rights Reserved. + SPDX-License-Identifier: Apache-2.0 */ + +package securitypolicy + +import ( + "reflect" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + util2 "github.com/vmware-tanzu/nsx-operator/pkg/util" +) + +// We should consider the below scenarios: +// When a new added pod whose port name exists in security policy. +// When a deleted pod whose port name exists in security policy. +// When a pod's label is changed. +// In summary, we could roughly think if the port name of security policy exists in the +// new pod or old pod, we should reconcile the security policy. + +type EnqueueRequestForPod struct { + Client client.Client +} + +func (e *EnqueueRequestForPod) Create(createEvent event.CreateEvent, q workqueue.RateLimitingInterface) { + e.Raw(createEvent, q) +} + +func (e *EnqueueRequestForPod) Update(updateEvent event.UpdateEvent, q workqueue.RateLimitingInterface) { + e.Raw(updateEvent, q) +} + +func (e *EnqueueRequestForPod) Delete(deleteEvent event.DeleteEvent, q workqueue.RateLimitingInterface) { + e.Raw(deleteEvent, q) +} + +func (e *EnqueueRequestForPod) Generic(genericEvent event.GenericEvent, q workqueue.RateLimitingInterface) { + e.Raw(genericEvent, q) +} + +func (e *EnqueueRequestForPod) Raw(evt interface{}, q workqueue.RateLimitingInterface) { + var pods []v1.Pod + var obj client.Object + + switch et := evt.(type) { + case event.CreateEvent: + obj = et.Object.(*v1.Pod) + case event.UpdateEvent: + obj = et.ObjectNew.(*v1.Pod) + case event.DeleteEvent: + obj = et.Object.(*v1.Pod) + case event.GenericEvent: + obj = et.Object.(*v1.Pod) + default: + log.Error(nil, "unknown event type", "event", evt) + } + + pod := obj.(*v1.Pod) + if isInSysNs, err := util2.IsSystemNamespace(e.Client, pod.Namespace, nil); err != nil { + log.Error(err, "failed to fetch namespace", "namespace", pod.Namespace) + return + } else if isInSysNs { + log.V(2).Info("pod is in system namespace, do nothing") + return + } + pods = append(pods, *pod) + err := reconcileSecurityPolicy(e.Client, pods, q) + if err != nil { + log.Error(err, "failed to reconcile security policy") + } +} + +func getAllPodPortNames(pods []v1.Pod) sets.String { + podPortNames := sets.NewString() + for _, pod := range pods { + for _, container := range pod.Spec.Containers { + for _, port := range container.Ports { + if port.Name != "" { + podPortNames.Insert(port.Name) + } + } + } + } + return podPortNames +} + +var PredicateFuncsPod = predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + if p, ok := e.Object.(*v1.Pod); ok { + log.V(1).Info("receive pod create event", "namespace", p.Namespace, "name", p.Name) + return util2.CheckPodHasNamedPort(*p, "create") + } + return false + }, + UpdateFunc: func(e event.UpdateEvent) bool { + oldObj := e.ObjectOld.(*v1.Pod) + newObj := e.ObjectNew.(*v1.Pod) + log.V(1).Info("receive pod update event", "namespace", oldObj.Namespace, "name", oldObj.Name) + if reflect.DeepEqual(oldObj.ObjectMeta.Labels, newObj.ObjectMeta.Labels) { + log.V(1).Info("label of pod is not changed, ignore it", "name", oldObj.Name) + return false + } + if util2.CheckPodHasNamedPort(*newObj, "update") { + return true + } + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + if p, ok := e.Object.(*v1.Pod); ok { + log.V(1).Info("receive pod create event", "namespace", p.Namespace, "name", p.Name) + return util2.CheckPodHasNamedPort(*p, "delete") + } + return false + }, +} diff --git a/pkg/controllers/securitypolicy/pod_controller_test.go b/pkg/controllers/securitypolicy/pod_controller_test.go new file mode 100644 index 000000000..9c9ff876f --- /dev/null +++ b/pkg/controllers/securitypolicy/pod_controller_test.go @@ -0,0 +1,270 @@ +package securitypolicy + +import ( + "testing" + + "github.com/agiledragon/gomonkey" + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + + util2 "github.com/vmware-tanzu/nsx-operator/pkg/util" +) + +func Test_getAllPodPortNames(t *testing.T) { + pod := v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "test-namespace", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Ports: []v1.ContainerPort{ + {Name: "test-port", ContainerPort: 8080}, + {Name: "test-port-2", ContainerPort: 80}, + }, + }, + }, + }, + } + type args struct { + pods []v1.Pod + } + tests := []struct { + name string + args args + want sets.String + }{ + {"1", args{[]v1.Pod{pod}}, sets.NewString("test-port", "test-port-2")}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, getAllPodPortNames(tt.args.pods), "getAllPodPortNames(%v)", tt.args.pods) + }) + } +} + +func TestEnqueueRequestForPod_Raw(t *testing.T) { + evt := event.CreateEvent{ + Object: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + }, + }, + } + type fields struct { + Client client.Client + } + type args struct { + evt interface{} + q workqueue.RateLimitingInterface + } + tests := []struct { + name string + fields fields + args args + }{ + {"1", fields{}, args{evt, nil}}, + } + patches := gomonkey.ApplyFunc(reconcileSecurityPolicy, func(client client.Client, pods []v1.Pod, + q workqueue.RateLimitingInterface, + ) error { + return nil + }) + defer patches.Reset() + + patches2 := gomonkey.ApplyFunc(util2.IsSystemNamespace, func(client client.Client, ns string, obj *v1.Namespace, + ) (bool, error) { + return false, nil + }) + defer patches2.Reset() + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := &EnqueueRequestForPod{} + e.Raw(tt.args.evt, tt.args.q) + }) + } +} + +func TestEnqueueRequestForPod_Create(t *testing.T) { + evt := event.CreateEvent{ + Object: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + }, + }, + } + type fields struct { + Client client.Client + } + type args struct { + evt event.CreateEvent + q workqueue.RateLimitingInterface + } + tests := []struct { + name string + fields fields + args args + }{ + {"1", fields{}, args{evt, nil}}, + } + patches := gomonkey.ApplyFunc(reconcileSecurityPolicy, func(client client.Client, pods []v1.Pod, + q workqueue.RateLimitingInterface, + ) error { + return nil + }) + defer patches.Reset() + + patches2 := gomonkey.ApplyFunc(util2.IsSystemNamespace, func(client client.Client, ns string, obj *v1.Namespace, + ) (bool, error) { + return false, nil + }) + defer patches2.Reset() + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := &EnqueueRequestForPod{} + e.Create(tt.args.evt, tt.args.q) + }) + } +} + +func TestEnqueueRequestForPod_Update(t *testing.T) { + evt := event.UpdateEvent{ + ObjectOld: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + }, + }, + ObjectNew: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod-new", + }, + }, + } + type fields struct { + Client client.Client + } + type args struct { + evt event.UpdateEvent + q workqueue.RateLimitingInterface + } + tests := []struct { + name string + fields fields + args args + }{ + {"1", fields{}, args{evt, nil}}, + } + patches := gomonkey.ApplyFunc(reconcileSecurityPolicy, func(client client.Client, pods []v1.Pod, + q workqueue.RateLimitingInterface, + ) error { + return nil + }) + defer patches.Reset() + + patches2 := gomonkey.ApplyFunc(util2.IsSystemNamespace, func(client client.Client, ns string, obj *v1.Namespace, + ) (bool, error) { + return false, nil + }) + defer patches2.Reset() + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := &EnqueueRequestForPod{} + e.Update(tt.args.evt, tt.args.q) + }) + } +} + +func TestEnqueueRequestForPod_Delete(t *testing.T) { + evt := event.DeleteEvent{ + Object: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + }, + }, + } + type fields struct { + Client client.Client + } + type args struct { + evt event.DeleteEvent + q workqueue.RateLimitingInterface + } + tests := []struct { + name string + fields fields + args args + }{ + {"1", fields{}, args{evt, nil}}, + } + patches := gomonkey.ApplyFunc(reconcileSecurityPolicy, func(client client.Client, pods []v1.Pod, + q workqueue.RateLimitingInterface, + ) error { + return nil + }) + defer patches.Reset() + + patches2 := gomonkey.ApplyFunc(util2.IsSystemNamespace, func(client client.Client, ns string, obj *v1.Namespace, + ) (bool, error) { + return false, nil + }) + defer patches2.Reset() + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := &EnqueueRequestForPod{} + e.Delete(tt.args.evt, tt.args.q) + }) + } +} + +func TestEnqueueRequestForPod_Generic(t *testing.T) { + evt := event.GenericEvent{ + Object: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + }, + }, + } + type fields struct { + Client client.Client + } + type args struct { + evt event.GenericEvent + q workqueue.RateLimitingInterface + } + tests := []struct { + name string + fields fields + args args + }{ + {"1", fields{}, args{evt, nil}}, + } + patches := gomonkey.ApplyFunc(reconcileSecurityPolicy, func(client client.Client, pods []v1.Pod, + q workqueue.RateLimitingInterface, + ) error { + return nil + }) + defer patches.Reset() + + patches2 := gomonkey.ApplyFunc(util2.IsSystemNamespace, func(client client.Client, ns string, obj *v1.Namespace, + ) (bool, error) { + return false, nil + }) + defer patches2.Reset() + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := &EnqueueRequestForPod{} + e.Generic(tt.args.evt, tt.args.q) + }) + } +} diff --git a/pkg/controllers/securitypolicy_controller.go b/pkg/controllers/securitypolicy/securitypolicy_controller.go similarity index 78% rename from pkg/controllers/securitypolicy_controller.go rename to pkg/controllers/securitypolicy/securitypolicy_controller.go index 94c6bc4c8..f69fcc47e 100644 --- a/pkg/controllers/securitypolicy_controller.go +++ b/pkg/controllers/securitypolicy/securitypolicy_controller.go @@ -1,7 +1,7 @@ /* Copyright © 2021 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ -package controllers +package securitypolicy import ( "context" @@ -9,31 +9,35 @@ import ( "fmt" "reflect" "runtime" - "strings" "time" v1 "k8s.io/api/core/v1" apimachineryruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/util/workqueue" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/event" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" "github.com/vmware-tanzu/nsx-operator/pkg/apis/v1alpha1" "github.com/vmware-tanzu/nsx-operator/pkg/metrics" _ "github.com/vmware-tanzu/nsx-operator/pkg/nsx/ratelimiter" "github.com/vmware-tanzu/nsx-operator/pkg/nsx/services" + util2 "github.com/vmware-tanzu/nsx-operator/pkg/nsx/util" "github.com/vmware-tanzu/nsx-operator/pkg/util" ) const ( - WCP_SYSTEM_RESOURCE = "vmware-system-shared-t1" - METRIC_RES_TYPE = "securitypolicy" + METRIC_RES_TYPE = "securitypolicy" ) var ( @@ -55,6 +59,14 @@ func updateFail(r *SecurityPolicyReconciler, c *context.Context, o *v1alpha1.Sec metrics.CounterInc(r.Service.NSXConfig, metrics.ControllerUpdateFailTotal, METRIC_RES_TYPE) } +func k8sClient(mgr ctrl.Manager) client.Client { + var c client.Client + if mgr != nil { + c = mgr.GetClient() + } + return c +} + func deleteFail(r *SecurityPolicyReconciler, c *context.Context, o *v1alpha1.SecurityPolicy, e *error) { r.setSecurityPolicyReadyStatusFalse(c, o, e) metrics.CounterInc(r.Service.NSXConfig, metrics.ControllerDeleteFailTotal, METRIC_RES_TYPE) @@ -65,7 +77,7 @@ func updateSuccess(r *SecurityPolicyReconciler, c *context.Context, o *v1alpha1. metrics.CounterInc(r.Service.NSXConfig, metrics.ControllerUpdateSuccessTotal, METRIC_RES_TYPE) } -func deleteSuccess(r *SecurityPolicyReconciler, c *context.Context, o *v1alpha1.SecurityPolicy) { +func deleteSuccess(r *SecurityPolicyReconciler, _ *context.Context, _ *v1alpha1.SecurityPolicy) { metrics.CounterInc(r.Service.NSXConfig, metrics.ControllerDeleteSuccessTotal, METRIC_RES_TYPE) } @@ -100,7 +112,7 @@ func (r *SecurityPolicyReconciler) Reconcile(ctx context.Context, req ctrl.Reque log.V(1).Info("added finalizer on securitypolicy CR", "securitypolicy", req.NamespacedName) } - if isCRInSysNs, err := r.isCRRequestedInSystemNamespace(&ctx, &req); err != nil { + if isCRInSysNs, err := util.IsSystemNamespace(r.Client, req.Namespace, nil); err != nil { err = errors.New("fetch namespace associated with security policy CR failed") log.Error(err, "would retry exponentially", "securitypolicy", req.NamespacedName) updateFail(r, &ctx, obj, &err) @@ -113,6 +125,11 @@ func (r *SecurityPolicyReconciler) Reconcile(ctx context.Context, req ctrl.Reque } if err := r.Service.OperateSecurityPolicy(obj); err != nil { + if errors.As(err, &util2.RestrictionError{}) { + log.Error(err, err.Error(), "securitypolicy", req.NamespacedName) + updateFail(r, &ctx, obj, &err) + return resultNormal, nil + } log.Error(err, "operate failed, would retry exponentially", "securitypolicy", req.NamespacedName) updateFail(r, &ctx, obj, &err) return resultRequeue, err @@ -122,7 +139,7 @@ func (r *SecurityPolicyReconciler) Reconcile(ctx context.Context, req ctrl.Reque if controllerutil.ContainsFinalizer(obj, util.FinalizerName) { metrics.CounterInc(r.Service.NSXConfig, metrics.ControllerDeleteTotal, METRIC_RES_TYPE) if err := r.Service.DeleteSecurityPolicy(obj.UID); err != nil { - log.Error(err, "delete failed, would retry exponentially", "securitypolicy", req.NamespacedName) + log.Error(err, "deletion failed, would retry exponentially", "securitypolicy", req.NamespacedName) deleteFail(r, &ctx, obj, &err) return resultRequeue, err } @@ -143,21 +160,6 @@ func (r *SecurityPolicyReconciler) Reconcile(ctx context.Context, req ctrl.Reque return resultNormal, nil } -func (r *SecurityPolicyReconciler) isCRRequestedInSystemNamespace(ctx *context.Context, req *ctrl.Request) (bool, error) { - nsObj := &v1.Namespace{} - - if err := r.Client.Get(*ctx, types.NamespacedName{Namespace: req.Namespace, Name: req.Namespace}, nsObj); err != nil { - log.Error(err, "unable to fetch namespace associated with security policy CR", "req", req.NamespacedName) - return false, client.IgnoreNotFound(err) - } - - if isSysNs, ok := nsObj.Annotations[WCP_SYSTEM_RESOURCE]; ok && strings.ToLower(isSysNs) == "true" { - return true, nil - } - - return false, nil -} - func (r *SecurityPolicyReconciler) setSecurityPolicyReadyStatusTrue(ctx *context.Context, sec_policy *v1alpha1.SecurityPolicy) { newConditions := []v1alpha1.Condition{ { @@ -176,7 +178,10 @@ func (r *SecurityPolicyReconciler) setSecurityPolicyReadyStatusFalse(ctx *contex Type: v1alpha1.Ready, Status: v1.ConditionFalse, Message: "NSX Security Policy could not be created/updated", - Reason: fmt.Sprintf("Error occurred while processing the Security Policy CR. Please check the config and try again. Error: %v", *err), + Reason: fmt.Sprintf( + "error occurred while processing the Security Policy CR. Error: %v", + *err, + ), }, } r.updateSecurityPolicyStatusConditions(ctx, sec_policy, newConditions) @@ -191,7 +196,8 @@ func (r *SecurityPolicyReconciler) updateSecurityPolicyStatusConditions(ctx *con } if conditionsUpdated { r.Client.Status().Update(*ctx, sec_policy) - log.V(1).Info("Updated Security Policy CRD", "Name", sec_policy.Name, "Namespace", sec_policy.Namespace, "New Conditions", newConditions) + log.V(1).Info("updated Security Policy", "Name", sec_policy.Name, "Namespace", sec_policy.Namespace, + "New Conditions", newConditions) } } @@ -199,7 +205,7 @@ func (r *SecurityPolicyReconciler) mergeSecurityPolicyStatusCondition(ctx *conte matchedCondition := getExistingConditionOfType(newCondition.Type, sec_policy.Status.Conditions) if reflect.DeepEqual(matchedCondition, newCondition) { - log.V(2).Info("Conditions already match", "New Condition", newCondition, "Existing Condition", matchedCondition) + log.V(2).Info("conditions already match", "New Condition", newCondition, "Existing Condition", matchedCondition) return false } @@ -235,6 +241,16 @@ func (r *SecurityPolicyReconciler) setupWithManager(mgr ctrl.Manager) error { controller.Options{ MaxConcurrentReconciles: runtime.NumCPU(), }). + Watches( + &source.Kind{Type: &v1.Namespace{}}, + &EnqueueRequestForNamespace{Client: k8sClient(mgr)}, + builder.WithPredicates(PredicateFuncsNs), + ). + Watches( + &source.Kind{Type: &v1.Pod{}}, + &EnqueueRequestForPod{Client: k8sClient(mgr)}, + builder.WithPredicates(PredicateFuncsPod), + ). Complete(r) } @@ -291,3 +307,43 @@ func (r *SecurityPolicyReconciler) GarbageCollector(cancel chan bool, timeout ti } } } + +// It is triggered by associated controller like pod, namespace, etc. +func reconcileSecurityPolicy(client client.Client, pods []v1.Pod, q workqueue.RateLimitingInterface) error { + podPortNames := getAllPodPortNames(pods) + log.V(1).Info("pod named port", "podPortNames", podPortNames) + spList := &v1alpha1.SecurityPolicyList{} + err := client.List(context.Background(), spList) + if err != nil { + log.Error(err, "failed to list all the security policy") + return err + } + + for _, securityPolicy := range spList.Items { + shouldReconcile := false + for _, rule := range securityPolicy.Spec.Rules { + for _, port := range rule.Ports { + if port.Port.Type == intstr.String { + if podPortNames.Has(port.Port.StrVal) { + shouldReconcile = true + break + } + } + } + if shouldReconcile { + break + } + } + if shouldReconcile { + log.Info("reconcile security policy because of associated resource change", + "namespace", securityPolicy.Namespace, "name", securityPolicy.Name) + q.Add(reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: securityPolicy.Name, + Namespace: securityPolicy.Namespace, + }, + }) + } + } + return nil +} diff --git a/pkg/controllers/securitypolicy_controller_test.go b/pkg/controllers/securitypolicy/securitypolicy_controller_test.go similarity index 80% rename from pkg/controllers/securitypolicy_controller_test.go rename to pkg/controllers/securitypolicy/securitypolicy_controller_test.go index f77bbde86..36b7af6f2 100644 --- a/pkg/controllers/securitypolicy_controller_test.go +++ b/pkg/controllers/securitypolicy/securitypolicy_controller_test.go @@ -1,11 +1,12 @@ /* Copyright © 2021 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ -package controllers +package securitypolicy import ( "context" "errors" + "fmt" "reflect" "testing" "time" @@ -16,9 +17,11 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/util/workqueue" controllerruntime "sigs.k8s.io/controller-runtime" - ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -118,43 +121,6 @@ func TestSecurityPolicyController_updateSecurityPolicyStatusConditions(t *testin } } -func TestSecurityPolicyController_isCRRequestedInSystemNamespace(t *testing.T) { - r := NewFakeSecurityPolicyReconciler() - ctx := context.TODO() - dummyNs := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "dummy"}} - r.Client.Create(ctx, dummyNs) - req := &controllerruntime.Request{NamespacedName: types.NamespacedName{Namespace: "dummy", Name: "dummy"}} - - isCRInSysNs, err := r.isCRRequestedInSystemNamespace(&ctx, req) - if err != nil { - t.Fatalf(err.Error()) - } - if isCRInSysNs { - t.Fatalf("Non-system namespace identied as a system namespace") - } - r.Client.Delete(ctx, dummyNs) - - sysNs := &v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "sys-ns", - Namespace: "sys-ns", - Annotations: map[string]string{"vmware-system-shared-t1": "true"}, - }, - } - r.Client.Create(ctx, sysNs) - req = &ctrl.Request{NamespacedName: types.NamespacedName{Namespace: "sys-ns", Name: "dummy"}} - - isCRInSysNs, err = r.isCRRequestedInSystemNamespace(&ctx, req) - - if err != nil { - t.Fatalf(err.Error()) - } - if !isCRInSysNs { - t.Fatalf("System namespace not identied as a system namespace") - } - r.Client.Delete(ctx, sysNs) -} - func TestSecurityPolicyReconciler_Reconcile(t *testing.T) { mockCtl := gomock.NewController(t) k8sClient := mock_client.NewMockClient(mockCtl) @@ -172,7 +138,7 @@ func TestSecurityPolicyReconciler_Reconcile(t *testing.T) { Service: service, } ctx := context.Background() - req := ctrl.Request{NamespacedName: types.NamespacedName{Namespace: "dummy", Name: "dummy"}} + req := controllerruntime.Request{NamespacedName: types.NamespacedName{Namespace: "dummy", Name: "dummy"}} // not found errNotFound := errors.New("not found") @@ -191,7 +157,7 @@ func TestSecurityPolicyReconciler_Reconcile(t *testing.T) { }) defer patches.Reset() result, ret := r.Reconcile(ctx, req) - resultRequeueAfter5mins := ctrl.Result{Requeue: true, RequeueAfter: 5 * time.Minute} + resultRequeueAfter5mins := controllerruntime.Result{Requeue: true, RequeueAfter: 5 * time.Minute} assert.Equal(t, nil, ret) assert.Equal(t, resultRequeueAfter5mins, result) @@ -330,7 +296,7 @@ func TestSecurityPolicyReconciler_Start(t *testing.T) { mockCtl := gomock.NewController(t) k8sClient := mock_client.NewMockClient(mockCtl) service := &services.SecurityPolicyService{} - var mgr ctrl.Manager + var mgr controllerruntime.Manager r := &SecurityPolicyReconciler{ Client: k8sClient, Scheme: nil, @@ -339,3 +305,92 @@ func TestSecurityPolicyReconciler_Start(t *testing.T) { err := r.Start(mgr) assert.NotEqual(t, err, nil) } + +func TestReconcileSecurityPolicy(t *testing.T) { + rule := v1alpha1.SecurityPolicyRule{ + Name: "rule-with-pod-selector", + AppliedTo: []v1alpha1.SecurityPolicyTarget{ + {}, + }, + Sources: []v1alpha1.SecurityPolicyPeer{ + { + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"pod_selector_1": "pod_value_1"}, + }, + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"ns1": "spA"}, + }, + }, + }, + Ports: []v1alpha1.SecurityPolicyPort{ + { + Protocol: v1.ProtocolUDP, + Port: intstr.IntOrString{Type: intstr.String, StrVal: "named-port"}, + }, + }, + } + spList := &v1alpha1.SecurityPolicyList{ + Items: []v1alpha1.SecurityPolicy{ + { + Spec: v1alpha1.SecurityPolicySpec{ + Rules: []v1alpha1.SecurityPolicyRule{ + rule, + }, + }, + }, + }, + } + pods := []v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod-1", + Namespace: "spA", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "test-container-1", + Ports: []v1.ContainerPort{ + { + Name: "named-port", + }, + }, + }, + }, + }, + }, + } + + mockCtl := gomock.NewController(t) + k8sClient := mock_client.NewMockClient(mockCtl) + ctx := context.Background() + policyList := &v1alpha1.SecurityPolicyList{} + k8sClient.EXPECT().List(ctx, policyList).Return(nil).Do(func(_ context.Context, list client.ObjectList, + _ ...client.ListOption, + ) error { + a := list.(*v1alpha1.SecurityPolicyList) + a.Items = spList.Items + return nil + }) + + mockQueue := mock_client.NewMockInterface(mockCtl) + + type args struct { + client client.Client + pods []v1.Pod + q workqueue.RateLimitingInterface + } + tests := []struct { + name string + args args + wantErr assert.ErrorAssertionFunc + }{ + {"1", args{k8sClient, pods, mockQueue}, assert.NoError}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.wantErr(t, reconcileSecurityPolicy(tt.args.client, tt.args.pods, tt.args.q), + fmt.Sprintf("reconcileSecurityPolicy(%v, %v, %v)", tt.args.client, tt.args.pods, tt.args.q)) + }) + } +} diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index a89be72e4..ae6cba2bd 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -8,8 +8,6 @@ import ( "go.uber.org/zap" "go.uber.org/zap/zapcore" zapcr "sigs.k8s.io/controller-runtime/pkg/log/zap" - - "github.com/vmware-tanzu/nsx-operator/pkg/config" ) const logTmFmtWithMS = "2006-01-02 15:04:05.000" @@ -28,8 +26,7 @@ func init() { flag.IntVar(&logLevel, "log-level", 0, "Use zap-core log system.") } -func ZapLogger(cf *config.NSXOperatorConfig) logr.Logger { - +func ZapLogger() logr.Logger { encoderConf := zapcore.EncoderConfig{ CallerKey: "caller_line", LevelKey: "level_name", @@ -52,15 +49,14 @@ func ZapLogger(cf *config.NSXOperatorConfig) logr.Logger { } opts.BindFlags(flag.CommandLine) - if cf.Debug == true { - // In level.go of zapcore, higher levels are more important. - // However, in logr.go, a higher verbosity level means a log message is less important. - // So we need to reverse the order of the levels. - opts.Level = zapcore.Level(-1 * logLevel) - opts.ZapOpts = append(opts.ZapOpts, zap.AddCaller(), zap.AddCallerSkip(0)) - if logLevel > 0 { - opts.StacktraceLevel = zap.ErrorLevel - } + // In level.go of zapcore, higher levels are more important. + // However, in logr.go, a higher verbosity level means a log message is less important. + // So we need to reverse the order of the levels. + opts.Level = zapcore.Level(-1 * logLevel) + opts.ZapOpts = append(opts.ZapOpts, zap.AddCaller(), zap.AddCallerSkip(0)) + if logLevel > 0 { + opts.StacktraceLevel = zap.ErrorLevel } + return zapcr.New(zapcr.UseFlagOptions(&opts)) } diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go index 7880a8628..5ba9ac84a 100644 --- a/pkg/metrics/metrics.go +++ b/pkg/metrics/metrics.go @@ -24,9 +24,7 @@ const ( ScrapeTimeout = 30 ) -var ( - log = logf.Log.WithName("metrics") -) +var log = logf.Log.WithName("metrics") var ( NSXOperatorHealthStats = prometheus.NewGauge( @@ -113,7 +111,7 @@ func Register(m ...prometheus.Collector) { // Initialize Prometheus metrics collection. func InitializePrometheusMetrics() { - log.Info("Initializing prometheus metrics") + log.Info("initializing prometheus metrics") Register( NSXOperatorHealthStats, ControllerSyncTotal, diff --git a/pkg/mock/controller-runtime/client/mock_workqueue.go b/pkg/mock/controller-runtime/client/mock_workqueue.go new file mode 100644 index 000000000..8e5308bab --- /dev/null +++ b/pkg/mock/controller-runtime/client/mock_workqueue.go @@ -0,0 +1,124 @@ +package mock_client + +import ( + reflect "reflect" + "time" + + gomock "github.com/golang/mock/gomock" +) + +type MockInterface struct { + ctrl *gomock.Controller + recorder *MockInterfaceMockRecorder +} + +func (m *MockInterface) ShutDownWithDrain() { +} + +func (m *MockInterface) AddAfter(item interface{}, duration time.Duration) { +} + +func (m *MockInterface) AddRateLimited(item interface{}) { +} + +func (m *MockInterface) Forget(item interface{}) { +} + +func (m *MockInterface) NumRequeues(item interface{}) int { + return 0 +} + +// MockInterfaceMockRecorder is the mock recorder for MockInterface. +type MockInterfaceMockRecorder struct { + mock *MockInterface +} + +// NewMockInterface creates a new mock instance. +func NewMockInterface(ctrl *gomock.Controller) *MockInterface { + mock := &MockInterface{ctrl: ctrl} + mock.recorder = &MockInterfaceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder { + return m.recorder +} + +// Add mocks base method. +func (m *MockInterface) Add(arg0 interface{}) { + m.ctrl.T.Helper() +} + +// Add indicates an expected call of Add. +func (mr *MockInterfaceMockRecorder) Add(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockInterface)(nil).Add), arg0) +} + +// Done mocks base method. +func (m *MockInterface) Done(arg0 interface{}) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Done", arg0) +} + +// Done indicates an expected call of Done. +func (mr *MockInterfaceMockRecorder) Done(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Done", reflect.TypeOf((*MockInterface)(nil).Done), arg0) +} + +// Get mocks base method. +func (m *MockInterface) Get() (interface{}, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get") + ret0, _ := ret[0].(interface{}) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockInterfaceMockRecorder) Get() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get)) +} + +// Len mocks base method. +func (m *MockInterface) Len() int { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Len") + ret0, _ := ret[0].(int) + return ret0 +} + +// Len indicates an expected call of Len. +func (mr *MockInterfaceMockRecorder) Len() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Len", reflect.TypeOf((*MockInterface)(nil).Len)) +} + +// ShutDown mocks base method. +func (m *MockInterface) ShutDown() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "ShutDown") +} + +// ShutDown indicates an expected call of ShutDown. +func (mr *MockInterfaceMockRecorder) ShutDown() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ShutDown", reflect.TypeOf((*MockInterface)(nil).ShutDown)) +} + +// ShuttingDown mocks base method. +func (m *MockInterface) ShuttingDown() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ShuttingDown") + ret0, _ := ret[0].(bool) + return ret0 +} + +// ShuttingDown indicates an expected call of ShuttingDown. +func (mr *MockInterfaceMockRecorder) ShuttingDown() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ShuttingDown", reflect.TypeOf((*MockInterface)(nil).ShuttingDown)) +} diff --git a/pkg/nsx/endpoint.go b/pkg/nsx/endpoint.go index 8020120be..7b8ae787c 100644 --- a/pkg/nsx/endpoint.go +++ b/pkg/nsx/endpoint.go @@ -205,7 +205,7 @@ func (ep *Endpoint) XSRFToken() string { return ep.xXSRFToken } -// Status return status of endpoiont. +// Status return status of endpoint. func (ep *Endpoint) Status() EndpointStatus { ep.RLock() defer ep.RUnlock() diff --git a/pkg/nsx/services/builder.go b/pkg/nsx/services/builder.go index e1bb3e82d..8e42fce9b 100644 --- a/pkg/nsx/services/builder.go +++ b/pkg/nsx/services/builder.go @@ -12,13 +12,23 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/vmware-tanzu/nsx-operator/pkg/apis/v1alpha1" + util2 "github.com/vmware-tanzu/nsx-operator/pkg/nsx/util" "github.com/vmware-tanzu/nsx-operator/pkg/util" ) -func (service *SecurityPolicyService) buildSecurityPolicy(obj *v1alpha1.SecurityPolicy) (*model.SecurityPolicy, - *[]model.Group, error) { +func (service *SecurityPolicyService) buildSecurityPolicy(obj *v1alpha1.SecurityPolicy) (*model.SecurityPolicy, *[]model.Group, error) { var nsxRules []model.Rule var nsxGroups []model.Group + + contains := func(groups []model.Group, group model.Group) bool { + for _, a := range groups { + if *a.Id == *group.Id { + return true + } + } + return false + } + log.V(1).Info("building the model SecurityPolicy from CR SecurityPolicy", "object", *obj) nsxSecurityPolicy := &model.SecurityPolicy{} @@ -34,6 +44,7 @@ func (service *SecurityPolicyService) buildSecurityPolicy(obj *v1alpha1.Security policyGroup, policyGroupPath, err := service.buildPolicyGroup(obj) if err != nil { + log.Error(err, "failed to build policy group", "policy", *obj) return nil, nil, err } @@ -44,14 +55,23 @@ func (service *SecurityPolicyService) buildSecurityPolicy(obj *v1alpha1.Security for ruleIdx, rule := range obj.Spec.Rules { // A rule containing named port may expand to multiple rules if the name maps to multiple port numbers. - nsxRule, ruleGroups, err := service.buildRuleAndGroups(obj, &rule, ruleIdx) + expandRules, ruleGroups, err := service.buildRuleAndGroups(obj, &rule, ruleIdx) if err != nil { log.Error(err, "failed to build rule and groups", "rule", rule, "ruleIndex", ruleIdx) return nil, nil, err } - nsxRules = append(nsxRules, *nsxRule) - for _, ruleGroup := range *ruleGroups { - nsxGroups = append(nsxGroups, ruleGroup) + for _, nsxRule := range expandRules { + if nsxRule != nil { + nsxRules = append(nsxRules, *nsxRule) + } + } + for _, ruleGroup := range ruleGroups { + if ruleGroup != nil { + // If the group is already in the list, skip it. + if !contains(nsxGroups, *ruleGroup) { + nsxGroups = append(nsxGroups, *ruleGroup) + } + } } } nsxSecurityPolicy.Rules = nsxRules @@ -80,9 +100,14 @@ func (service *SecurityPolicyService) buildPolicyGroup(obj *v1alpha1.SecurityPol targetGroupCount, targetGroupTotalExprCount := 0, 0 criteriaCount, totalExprCount := 0, 0 var err error = nil - var errorMsg = "" + errorMsg := "" for i, target := range appliedTo { - criteriaCount, totalExprCount, err = service.updateTargetExpressions(obj, &target, &policyGroup, i) + criteriaCount, totalExprCount, err = service.updateTargetExpressions( + obj, + &target, + &policyGroup, + i, + ) if err == nil { targetGroupCount += criteriaCount targetGroupTotalExprCount += totalExprCount @@ -94,8 +119,11 @@ func (service *SecurityPolicyService) buildPolicyGroup(obj *v1alpha1.SecurityPol targetGroupCount, "total expressions of criteria", targetGroupTotalExprCount) if targetGroupCount > MaxCriteria { - errorMsg = fmt.Sprintf("total counts of policy target group criteria %d exceed NSX limit of %d", - targetGroupCount, MaxCriteria) + errorMsg = fmt.Sprintf( + "total counts of policy target group criteria %d exceed NSX limit of %d", + targetGroupCount, + MaxCriteria, + ) } else if targetGroupTotalExprCount > MaxTotalCriteriaExpressions { errorMsg = fmt.Sprintf("total expression counts in policy target group criteria %d exceed NSX limit of %d", targetGroupTotalExprCount, MaxTotalCriteriaExpressions) @@ -123,7 +151,7 @@ func (service *SecurityPolicyService) buildTargetTags(obj *v1alpha1.SecurityPoli }) serializedBytes, _ := json.Marshal(*targets) groupHash := util.Sha1(string(serializedBytes)) - var targetTags = []model.Tag{ + targetTags := []model.Tag{ { Scope: &tagScopeGroupType, Tag: &tagValueScope, @@ -156,7 +184,7 @@ func (service *SecurityPolicyService) buildBasicTags(obj *v1alpha1.SecurityPolic tagScopeNamespace := util.TagScopeNamespace tagScopeSecurityPolicyCRName := util.TagScopeSecurityPolicyCRName tagScopeSecurityPolicyCRUID := util.TagScopeSecurityPolicyCRUID - var tags = []model.Tag{ + tags := []model.Tag{ { Scope: &tagScopeCluster, Tag: &clusterTag, @@ -232,10 +260,9 @@ func (service *SecurityPolicyService) buildExpression(resource_type, member_type return expression } -func (service *SecurityPolicyService) buildExpressionsMatchExpression(matchExpressions []v1.LabelSelectorRequirement, - memberType string, expressions *data.ListValue) error { +func (service *SecurityPolicyService) buildExpressionsMatchExpression(matchExpressions []v1.LabelSelectorRequirement, memberType string, expressions *data.ListValue) error { var err error = nil - var errorMsg = "" + errorMsg := "" for _, expr := range matchExpressions { switch expr.Operator { @@ -291,191 +318,175 @@ func (service *SecurityPolicyService) buildPolicyGroupPath(obj *v1alpha1.Securit return fmt.Sprintf("/infra/domains/%s/groups/%s", getDomain(service), policyGroupID) } -func (service *SecurityPolicyService) buildRuleAndGroups(obj *v1alpha1.SecurityPolicy, - rule *v1alpha1.SecurityPolicyRule, ruleIdx int) (*model.Rule, *[]model.Group, error) { - sequenceNumber := int64(ruleIdx) - nsxRuleID := service.buildRuleID(obj, ruleIdx) - var nsxRuleName string - var ruleGroups []model.Group +func (service *SecurityPolicyService) buildRuleAndGroups(obj *v1alpha1.SecurityPolicy, rule *v1alpha1.SecurityPolicyRule, ruleIdx int) ([]*model.Rule, []*model.Group, error) { + var ruleGroups []*model.Group var nsxRuleAppliedGroup *model.Group var nsxRuleSrcGroup *model.Group var nsxRuleDstGroup *model.Group var nsxRuleAppliedGroupPath string var nsxRuleDstGroupPath string var nsxRuleSrcGroupPath string - var err error = nil + var err error - direction, err := getRuleDirection(rule) + ruleDirection, err := getRuleDirection(rule) if err != nil { return nil, nil, err } - ruleAction, err := getRuleAction(rule) + + // Since a named port may map to multiple port numbers, then it would return multiple rules. + // We use the destination port number of service entry to group the rules. + ipSetGroups, nsxRules, err := service.expandRule(obj, rule, ruleIdx) if err != nil { return nil, nil, err } - - if len(rule.Name) > 0 { - nsxRuleName = rule.Name - } else { - nsxRuleName = fmt.Sprintf("%s-%d", obj.ObjectMeta.Name, ruleIdx) + for _, g := range ipSetGroups { + ruleGroups = append(ruleGroups, g) } - nsxRule := model.Rule{ - Id: &nsxRuleID, - DisplayName: &nsxRuleName, - Direction: &direction, - SequenceNumber: &sequenceNumber, - Action: &ruleAction, - Services: []string{"ANY"}, - Tags: service.buildBasicTags(obj), - } - - if direction == "IN" { - if len(rule.Sources) > 0 { - nsxRuleSrcGroup, nsxRuleSrcGroupPath, err = service.buildRuleSrcGroup(obj, rule, ruleIdx) - if err == nil { - ruleGroups = append(ruleGroups, *nsxRuleSrcGroup) - } else { - log.Error(err, "failed to build rule source groups") + for _, nsxRule := range nsxRules { + if ruleDirection == "IN" { + nsxRuleSrcGroup, nsxRuleSrcGroupPath, nsxRuleDstGroupPath, err = service.buildRuleInGroup( + obj, + rule, + nsxRule, + ruleIdx, + ) + if err != nil { return nil, nil, err } - } else { - nsxRuleSrcGroupPath = "ANY" - } - nsxRuleDstGroupPath = "ANY" - } else if direction == "OUT" { - if len(rule.Destinations) > 0 { - nsxRuleDstGroup, nsxRuleDstGroupPath, err = service.buildRuleDstGroup(obj, rule, ruleIdx) - if err == nil { - ruleGroups = append(ruleGroups, *nsxRuleDstGroup) - } else { - log.Error(err, "failed to build rule destination groups") + ruleGroups = append(ruleGroups, nsxRuleSrcGroup) + } else if ruleDirection == "OUT" { + nsxRuleDstGroup, nsxRuleSrcGroupPath, nsxRuleDstGroupPath, err = service.buildRuleOutGroup(obj, rule, nsxRule, ruleIdx) + if err != nil { return nil, nil, err } - } else { - nsxRuleDstGroupPath = "ANY" - } - nsxRuleSrcGroupPath = "ANY" - } - nsxRule.SourceGroups = []string{nsxRuleSrcGroupPath} - nsxRule.DestinationGroups = []string{nsxRuleDstGroupPath} - ruleServiceEntries := service.buildRuleServiceEntries(&rule.Ports) - nsxRule.ServiceEntries = *ruleServiceEntries - - if len(rule.AppliedTo) > 0 { - nsxRuleAppliedGroup, nsxRuleAppliedGroupPath, err = service.buildRuleAppliedGroup(obj, rule, ruleIdx) - if err == nil { - ruleGroups = append(ruleGroups, *nsxRuleAppliedGroup) - } else { - log.Error(err, "failed to build rule applied groups") - return nil, nil, err + ruleGroups = append(ruleGroups, nsxRuleDstGroup) } - } else { - if len(obj.Spec.AppliedTo) == 0 { - err = errors.New("appliedTo needs to be set in either spec or rules") - log.Error(err, "error while validating appliedTo field") + nsxRule.SourceGroups = []string{nsxRuleSrcGroupPath} + nsxRule.DestinationGroups = []string{nsxRuleDstGroupPath} + + nsxRuleAppliedGroup, nsxRuleAppliedGroupPath, err = service.buildRuleAppliedToGroup( + obj, + rule, + ruleIdx, + nsxRuleSrcGroupPath, + nsxRuleDstGroupPath, + ) + if err != nil { return nil, nil, err - } else if nsxRuleSrcGroupPath == "ANY" && nsxRuleDstGroupPath == "ANY" { - // NSX-T manager will report error if all the rule's scope/src/dst are "ANY". - // So if the rule's scope is empty while policy's not, the rule's scope also - // will be set to the policy's scope to avoid this case. - nsxRuleAppliedGroupPath = service.buildPolicyGroupPath(obj) - } else { - nsxRuleAppliedGroupPath = "ANY" } + ruleGroups = append(ruleGroups, nsxRuleAppliedGroup) + nsxRule.Scope = []string{nsxRuleAppliedGroupPath} + + log.V(2).Info("built rule and groups", "nsxRuleAppliedGroup", nsxRuleAppliedGroup, + "~", nsxRuleSrcGroup, "nsxRuleDstGroup", nsxRuleDstGroup, + "action", *nsxRule.Action, "direction", *nsxRule.Direction) } - nsxRule.Scope = []string{nsxRuleAppliedGroupPath} + return nsxRules, ruleGroups, nil +} - log.V(1).Info("built rule and groups", "nsxRuleAppliedGroup", nsxRuleAppliedGroup, "nsxRuleSrcGroup", nsxRuleSrcGroup, "nsxRuleDstGroup", nsxRuleDstGroup, "action", *nsxRule.Action, "direction", *nsxRule.Direction) +func (service *SecurityPolicyService) buildRuleServiceEntries(port v1alpha1.SecurityPolicyPort, portAddress util2.PortAddress) *data.StructValue { + var portRange string + sourcePorts := data.NewListValue() + destinationPorts := data.NewListValue() - return &nsxRule, &ruleGroups, nil + // In case that the destination_port in NSX-T is 0. + endPort := port.EndPort + if endPort == 0 { + portRange = fmt.Sprint(portAddress.Port) + } else { + portRange = fmt.Sprintf("%d-%d", portAddress.Port, endPort) + } + destinationPorts.Add(data.NewStringValue(portRange)) + + serviceEntry := data.NewStructValue( + "", + map[string]data.DataValue{ + "source_ports": sourcePorts, + "destination_ports": destinationPorts, + "l4_protocol": data.NewStringValue(string(port.Protocol)), + "resource_type": data.NewStringValue("L4PortSetServiceEntry"), + // Adding the following default values to make it easy when compare the + // existing object from store and the new built object + "marked_for_delete": data.NewBooleanValue(false), + "overridden": data.NewBooleanValue(false), + }, + ) + log.V(2).Info("built service entry", "serviceEntry", serviceEntry) + return serviceEntry } -func (service *SecurityPolicyService) buildRuleServiceEntries(rulePorts *[]v1alpha1.SecurityPolicyPort) *[]*data.StructValue { - ruleServiceEntries := []*data.StructValue{} - for _, port := range *rulePorts { - var portRange string - startPort := port.Port.IntValue() - sourcePorts := data.NewListValue() - destinationPorts := data.NewListValue() - // In case that the destination_port in NSX-T is 0. - endPort := port.EndPort - if endPort == 0 { - portRange = fmt.Sprint(startPort) - } else { - portRange = fmt.Sprintf("%d-%d", startPort, endPort) - } - destinationPorts.Add(data.NewStringValue(portRange)) - serviceEntry := data.NewStructValue( - "", - map[string]data.DataValue{ - "source_ports": sourcePorts, - "destination_ports": destinationPorts, - "l4_protocol": data.NewStringValue(string(port.Protocol)), - "resource_type": data.NewStringValue("L4PortSetServiceEntry"), - // adding the following default values to make it easy when compare the existing object from store and the new built object - "marked_for_delete": data.NewBooleanValue(false), - "overridden": data.NewBooleanValue(false), - }, +func (service *SecurityPolicyService) buildRuleAppliedToGroup(obj *v1alpha1.SecurityPolicy, rule *v1alpha1.SecurityPolicyRule, ruleIdx int, nsxRuleSrcGroupPath string, nsxRuleDstGroupPath string) (*model.Group, string, error) { + var nsxRuleAppliedGroup *model.Group + var nsxRuleAppliedGroupPath string + var err error + if len(rule.AppliedTo) > 0 { + nsxRuleAppliedGroup, nsxRuleAppliedGroupPath, err = service.buildRuleAppliedGroupByRule( + obj, + rule, + ruleIdx, ) - ruleServiceEntries = append(ruleServiceEntries, serviceEntry) + if err != nil { + return nil, "", err + } + } else { + nsxRuleAppliedGroupPath, err = service.buildRuleAppliedGroupByPolicy(obj, + nsxRuleSrcGroupPath, nsxRuleDstGroupPath) + if err != nil { + return nil, "", err + } } - return &ruleServiceEntries + return nsxRuleAppliedGroup, nsxRuleAppliedGroupPath, nil } -func (service *SecurityPolicyService) buildRuleAppliedGroup(obj *v1alpha1.SecurityPolicy, rule *v1alpha1.SecurityPolicyRule, idx int) (*model.Group, string, error) { - var ruleAppliedGroupName string - appliedTo := rule.AppliedTo - ruleAppliedGroupID := fmt.Sprintf("sp_%s_%d_scope", obj.UID, idx) - if len(rule.Name) > 0 { - ruleAppliedGroupName = fmt.Sprintf("%s-scope", rule.Name) +func (service *SecurityPolicyService) buildRuleInGroup(obj *v1alpha1.SecurityPolicy, rule *v1alpha1.SecurityPolicyRule, nsxRule *model.Rule, ruleIdx int) (*model.Group, string, string, error) { + var nsxRuleSrcGroup *model.Group + var nsxRuleSrcGroupPath string + var nsxRuleDstGroupPath string + var err error + if len(rule.Sources) > 0 { + nsxRuleSrcGroup, nsxRuleSrcGroupPath, err = service.buildRuleSrcGroup(obj, rule, ruleIdx) + if err != nil { + return nil, "", "", err + } } else { - ruleAppliedGroupName = fmt.Sprintf("%s-%d-scope", obj.ObjectMeta.Name, idx) + nsxRuleSrcGroupPath = "ANY" } - targetTags := service.buildTargetTags(obj, &appliedTo, idx) - ruleAppliedGroupPath := fmt.Sprintf("/infra/domains/%s/groups/%s", getDomain(service), ruleAppliedGroupID) - ruleAppliedGroup := model.Group{ - Id: &ruleAppliedGroupID, - DisplayName: &ruleAppliedGroupName, - Tags: targetTags, + + if len(nsxRule.DestinationGroups) > 0 { + nsxRuleDstGroupPath = nsxRule.DestinationGroups[0] + } else { + nsxRuleDstGroupPath = "ANY" } + return nsxRuleSrcGroup, nsxRuleSrcGroupPath, nsxRuleDstGroupPath, nil +} - ruleGroupCount, ruleGroupTotalExprCount := 0, 0 - criteriaCount, totalExprCount := 0, 0 - var err error = nil - var errorMsg = "" - for i, target := range appliedTo { - criteriaCount, totalExprCount, err = service.updateTargetExpressions(obj, &target, &ruleAppliedGroup, i) - if err == nil { - ruleGroupCount += criteriaCount - ruleGroupTotalExprCount += totalExprCount +func (service *SecurityPolicyService) buildRuleOutGroup(obj *v1alpha1.SecurityPolicy, rule *v1alpha1.SecurityPolicyRule, nsxRule *model.Rule, ruleIdx int) (*model.Group, string, string, error) { + var nsxRuleDstGroup *model.Group + var nsxRuleSrcGroupPath string + var nsxRuleDstGroupPath string + var err error + if len(nsxRule.DestinationGroups) > 0 { + nsxRuleDstGroupPath = nsxRule.DestinationGroups[0] + } else { + if len(rule.Destinations) > 0 { + nsxRuleDstGroup, nsxRuleDstGroupPath, err = service.buildRuleDstGroup(obj, rule, ruleIdx) + if err != nil { + return nil, "", "", err + } } else { - return nil, "", err + nsxRuleDstGroupPath = "ANY" } } - log.V(1).Info("build rule applied group criteria", "total criteria", ruleGroupCount, "total expressions of criteria", ruleGroupTotalExprCount) - - if ruleGroupCount > MaxCriteria { - errorMsg = fmt.Sprintf("total counts of rule applied group criteria %d exceed NSX limit of %d", ruleGroupCount, MaxCriteria) - } else if ruleGroupTotalExprCount > MaxTotalCriteriaExpressions { - errorMsg = fmt.Sprintf("total expression counts in rule applied group criteria %d exceed NSX limit of %d", ruleGroupTotalExprCount, MaxTotalCriteriaExpressions) - } - - if len(errorMsg) != 0 { - err = errors.New(errorMsg) - log.Error(err, "validate rule applied group criteria nsx limit failed") - return nil, "", err - } - - return &ruleAppliedGroup, ruleAppliedGroupPath, nil + nsxRuleSrcGroupPath = "ANY" + return nsxRuleDstGroup, nsxRuleSrcGroupPath, nsxRuleDstGroupPath, nil } func (service *SecurityPolicyService) buildRuleID(obj *v1alpha1.SecurityPolicy, idx int) string { return fmt.Sprintf("sp_%s_%d", obj.UID, idx) } -func (service *SecurityPolicyService) buildRuleName(obj *v1alpha1.SecurityPolicy, - rule *v1alpha1.SecurityPolicyRule, idx int) string { +func (service *SecurityPolicyService) buildRuleName(obj *v1alpha1.SecurityPolicy, rule *v1alpha1.SecurityPolicyRule, idx int) string { if len(rule.Name) > 0 { return rule.Name } else { @@ -483,9 +494,7 @@ func (service *SecurityPolicyService) buildRuleName(obj *v1alpha1.SecurityPolicy } } -func (service *SecurityPolicyService) buildRuleAppliedGroupByPolicy( - obj *v1alpha1.SecurityPolicy, nsxRuleSrcGroupPath string, - nsxRuleDstGroupPath string) (string, error) { +func (service *SecurityPolicyService) buildRuleAppliedGroupByPolicy(obj *v1alpha1.SecurityPolicy, nsxRuleSrcGroupPath string, nsxRuleDstGroupPath string) (string, error) { var nsxRuleAppliedGroupPath string if len(obj.Spec.AppliedTo) == 0 { return "", errors.New("appliedTo needs to be set in either spec or rules") @@ -511,7 +520,11 @@ func (service *SecurityPolicyService) buildRuleAppliedGroupByRule(obj *v1alpha1. ruleAppliedGroupName = fmt.Sprintf("%s-%d-scope", obj.ObjectMeta.Name, idx) } targetTags := service.buildTargetTags(obj, &appliedTo, idx) - ruleAppliedGroupPath := fmt.Sprintf("/infra/domains/%s/groups/%s", getDomain(service), ruleAppliedGroupID) + ruleAppliedGroupPath := fmt.Sprintf( + "/infra/domains/%s/groups/%s", + getDomain(service), + ruleAppliedGroupID, + ) ruleAppliedGroup := model.Group{ Id: &ruleAppliedGroupID, DisplayName: &ruleAppliedGroupName, @@ -521,9 +534,14 @@ func (service *SecurityPolicyService) buildRuleAppliedGroupByRule(obj *v1alpha1. ruleGroupCount, ruleGroupTotalExprCount := 0, 0 criteriaCount, totalExprCount := 0, 0 var err error = nil - var errorMsg = "" + errorMsg := "" for i, target := range appliedTo { - criteriaCount, totalExprCount, err = service.updateTargetExpressions(obj, &target, &ruleAppliedGroup, i) + criteriaCount, totalExprCount, err = service.updateTargetExpressions( + obj, + &target, + &ruleAppliedGroup, + i, + ) if err == nil { ruleGroupCount += criteriaCount ruleGroupTotalExprCount += totalExprCount @@ -534,7 +552,11 @@ func (service *SecurityPolicyService) buildRuleAppliedGroupByRule(obj *v1alpha1. log.V(2).Info("build rule applied group criteria", "total criteria", ruleGroupCount, "total expressions of criteria", ruleGroupTotalExprCount) if ruleGroupCount > MaxCriteria { - errorMsg = fmt.Sprintf("total counts of rule applied group criteria %d exceed NSX limit of %d", ruleGroupCount, MaxCriteria) + errorMsg = fmt.Sprintf( + "total counts of rule applied group criteria %d exceed NSX limit of %d", + ruleGroupCount, + MaxCriteria, + ) } else if ruleGroupTotalExprCount > MaxTotalCriteriaExpressions { errorMsg = fmt.Sprintf("total expression counts in rule applied group criteria %d exceed NSX limit of %d", ruleGroupTotalExprCount, MaxTotalCriteriaExpressions) } @@ -556,8 +578,12 @@ func (service *SecurityPolicyService) buildRuleSrcGroup(obj *v1alpha1.SecurityPo } else { ruleSrcGroupName = fmt.Sprintf("%s-%d-src", obj.ObjectMeta.Name, idx) } - ruleSrcGroupPath := fmt.Sprintf("/infra/domains/%s/groups/%s", getDomain(service), ruleSrcGroupID) - peerTags := service.buildPeerTags(obj, &sources, idx) + ruleSrcGroupPath := fmt.Sprintf( + "/infra/domains/%s/groups/%s", + getDomain(service), + ruleSrcGroupID, + ) + peerTags := service.BuildPeerTags(obj, &sources, idx) ruleSrcGroup := model.Group{ Id: &ruleSrcGroupID, DisplayName: &ruleSrcGroupName, @@ -567,9 +593,14 @@ func (service *SecurityPolicyService) buildRuleSrcGroup(obj *v1alpha1.SecurityPo ruleSrcGroupCount, ruleSrcGroupTotalExprCount := 0, 0 criteriaCount, totalExprCount := 0, 0 var err error = nil - var errorMsg = "" + errorMsg := "" for i, peer := range sources { - criteriaCount, totalExprCount, err = service.updatePeerExpressions(obj, &peer, &ruleSrcGroup, i) + criteriaCount, totalExprCount, err = service.updatePeerExpressions( + obj, + &peer, + &ruleSrcGroup, + i, + ) if err == nil { ruleSrcGroupCount += criteriaCount ruleSrcGroupTotalExprCount += totalExprCount @@ -577,10 +608,14 @@ func (service *SecurityPolicyService) buildRuleSrcGroup(obj *v1alpha1.SecurityPo return nil, "", err } } - log.V(1).Info("build rule source group criteria", "total criteria", ruleSrcGroupCount, "total expressions of criteria", ruleSrcGroupTotalExprCount) + log.V(2).Info("build rule source group criteria", "total criteria", ruleSrcGroupCount, "total expressions of criteria", ruleSrcGroupTotalExprCount) if ruleSrcGroupCount > MaxCriteria { - errorMsg = fmt.Sprintf("total counts of rule source group criteria %d exceed NSX limit of %d", ruleSrcGroupCount, MaxCriteria) + errorMsg = fmt.Sprintf( + "total counts of rule source group criteria %d exceed NSX limit of %d", + ruleSrcGroupCount, + MaxCriteria, + ) } else if ruleSrcGroupTotalExprCount > MaxTotalCriteriaExpressions { errorMsg = fmt.Sprintf("total expression counts in source group criteria %d exceed NSX limit of %d", ruleSrcGroupTotalExprCount, MaxTotalCriteriaExpressions) } @@ -603,8 +638,12 @@ func (service *SecurityPolicyService) buildRuleDstGroup(obj *v1alpha1.SecurityPo } else { ruleDstGroupName = fmt.Sprintf("%s-%d-dst", obj.ObjectMeta.Name, idx) } - ruleDstGroupPath := fmt.Sprintf("/infra/domains/%s/groups/%s", getDomain(service), ruleDstGroupID) - peerTags := service.buildPeerTags(obj, &destinations, idx) + ruleDstGroupPath := fmt.Sprintf( + "/infra/domains/%s/groups/%s", + getDomain(service), + ruleDstGroupID, + ) + peerTags := service.BuildPeerTags(obj, &destinations, idx) ruleDstGroup := model.Group{ Id: &ruleDstGroupID, DisplayName: &ruleDstGroupName, @@ -614,9 +653,14 @@ func (service *SecurityPolicyService) buildRuleDstGroup(obj *v1alpha1.SecurityPo ruleDstGroupCount, ruleDstGroupTotalExprCount := 0, 0 criteriaCount, totalExprCount := 0, 0 var err error = nil - var errorMsg = "" + errorMsg := "" for i, peer := range destinations { - criteriaCount, totalExprCount, err = service.updatePeerExpressions(obj, &peer, &ruleDstGroup, i) + criteriaCount, totalExprCount, err = service.updatePeerExpressions( + obj, + &peer, + &ruleDstGroup, + i, + ) if err == nil { ruleDstGroupCount += criteriaCount ruleDstGroupTotalExprCount += totalExprCount @@ -624,10 +668,14 @@ func (service *SecurityPolicyService) buildRuleDstGroup(obj *v1alpha1.SecurityPo return nil, "", err } } - log.V(1).Info("build rule destination group criteria", "total criteria", ruleDstGroupCount, "total expressions of criteria", ruleDstGroupTotalExprCount) + log.V(2).Info("build rule destination group criteria", "total criteria", ruleDstGroupCount, "total expressions of criteria", ruleDstGroupTotalExprCount) if ruleDstGroupCount > MaxCriteria { - errorMsg = fmt.Sprintf("total counts of rule destination group criteria %d exceed NSX limit of %d", ruleDstGroupCount, MaxCriteria) + errorMsg = fmt.Sprintf( + "total counts of rule destination group criteria %d exceed NSX limit of %d", + ruleDstGroupCount, + MaxCriteria, + ) } else if ruleDstGroupTotalExprCount > MaxTotalCriteriaExpressions { errorMsg = fmt.Sprintf("total expression counts in rule destination group criteria %d exceed NSX limit of %d", ruleDstGroupTotalExprCount, MaxTotalCriteriaExpressions) } @@ -639,7 +687,38 @@ func (service *SecurityPolicyService) buildRuleDstGroup(obj *v1alpha1.SecurityPo return &ruleDstGroup, ruleDstGroupPath, err } -func (service *SecurityPolicyService) buildPeerTags(obj *v1alpha1.SecurityPolicy, peers *[]v1alpha1.SecurityPolicyPeer, idx int) []model.Tag { +// Build rule basic info, ruleIdx is the index of the rules of security policy, +// portIdx is the index of rule's ports, portAddressIdx is the index +// of multiple port number if one named port maps to multiple port numbers. +func (service *SecurityPolicyService) buildRuleBasicInfo(obj *v1alpha1.SecurityPolicy, rule *v1alpha1.SecurityPolicyRule, ruleIdx int, portIdx int, portAddressIdx int) (*model.Rule, error) { + nsxRuleID := service.buildRuleID(obj, ruleIdx) + nsxRuleName := service.buildRuleName(obj, rule, ruleIdx) + ruleAction, err := getRuleAction(rule) + if err != nil { + return nil, err + } + ruleDirection, err := getRuleDirection(rule) + if err != nil { + return nil, err + } + sequence := int64(ruleIdx) + rID := fmt.Sprintf("%s_%d_%d", nsxRuleID, portIdx, portAddressIdx) + rName := fmt.Sprintf("%s-%d-%d", nsxRuleName, portIdx, portAddressIdx) + + nsxRule := model.Rule{ + Id: &rID, + DisplayName: &rName, + Direction: &ruleDirection, + SequenceNumber: &sequence, + Action: &ruleAction, + Services: []string{"ANY"}, + Tags: service.buildBasicTags(obj), + } + log.V(1).Info("built rule basic info", "nsxRule", nsxRule) + return &nsxRule, nil +} + +func (service *SecurityPolicyService) BuildPeerTags(obj *v1alpha1.SecurityPolicy, peers *[]v1alpha1.SecurityPolicyPeer, idx int) []model.Tag { basicTags := service.buildBasicTags(obj) ruleID := service.buildRuleID(obj, idx) tagScopeGroupType := util.TagScopeGroupType @@ -654,7 +733,7 @@ func (service *SecurityPolicyService) buildPeerTags(obj *v1alpha1.SecurityPolicy }) serializedBytes, _ := json.Marshal(*peers) groupHash := util.Sha1(string(serializedBytes)) - var peerTags = []model.Tag{ + peerTags := []model.Tag{ { Scope: &tagScopeGroupType, Tag: &tagValueScope, @@ -677,12 +756,12 @@ func (service *SecurityPolicyService) buildPeerTags(obj *v1alpha1.SecurityPolicy func (service *SecurityPolicyService) updateTargetExpressions(obj *v1alpha1.SecurityPolicy, target *v1alpha1.SecurityPolicyTarget, group *model.Group, idx int) (int, int, error) { var err error = nil var tagValueExpression *data.StructValue = nil - var memberType = "SegmentPort" + memberType := "SegmentPort" var matchLabels map[string]string var matchExpressions *[]v1.LabelSelectorRequirement = nil var mergedMatchExpressions *[]v1.LabelSelectorRequirement = nil - var opInValueCount, totalCriteriaCount, totalExprCount = 0, 0, 0 - var matchLabelsCount, matchExpressionsCount = 0, 0 + opInValueCount, totalCriteriaCount, totalExprCount := 0, 0, 0 + matchLabelsCount, matchExpressionsCount := 0, 0 if target.PodSelector != nil && target.VMSelector != nil { errorMsg := "PodSelector and VMSelector are not allowed to set in one group" @@ -752,7 +831,12 @@ func (service *SecurityPolicyService) updateTargetExpressions(obj *v1alpha1.Secu } // Since cluster is set as default "Segment" memberType, So the final produced group criteria is always treated as a mixed criteria - totalCriteriaCount, totalExprCount, err = service.validateSelectorExpressions(matchLabelsCount, matchExpressionsCount, opInValueCount, true) + totalCriteriaCount, totalExprCount, err = service.validateSelectorExpressions( + matchLabelsCount, + matchExpressionsCount, + opInValueCount, + true, + ) if err != nil { return 0, 0, err } @@ -796,7 +880,7 @@ func (service *SecurityPolicyService) updateExpressionsMatchLabels(matchLabels m func (service *SecurityPolicyService) mergeSelectorMatchExpression(matchExpressions []v1.LabelSelectorRequirement) *[]v1.LabelSelectorRequirement { mergedMatchExpressions := make([]v1.LabelSelectorRequirement, 0) var mergedSelector v1.LabelSelectorRequirement - var labelSelectorMap = map[v1.LabelSelectorOperator]map[string][]string{} + labelSelectorMap := map[v1.LabelSelectorOperator]map[string][]string{} for _, d := range matchExpressions { _, exists := labelSelectorMap[d.Operator] @@ -804,10 +888,14 @@ func (service *SecurityPolicyService) mergeSelectorMatchExpression(matchExpressi labelSelectorMap[d.Operator] = map[string][]string{} } _, exists = labelSelectorMap[d.Operator][d.Key] - labelSelectorMap[d.Operator][d.Key] = append(labelSelectorMap[d.Operator][d.Key], d.Values...) + labelSelectorMap[d.Operator][d.Key] = append( + labelSelectorMap[d.Operator][d.Key], + d.Values...) if exists { - labelSelectorMap[d.Operator][d.Key] = util.RemoveDuplicateStr(labelSelectorMap[d.Operator][d.Key]) + labelSelectorMap[d.Operator][d.Key] = util.RemoveDuplicateStr( + labelSelectorMap[d.Operator][d.Key], + ) } } @@ -826,13 +914,12 @@ func (service *SecurityPolicyService) mergeSelectorMatchExpression(matchExpressi // Todo, refactor code when NSX support 'In' LabelSelector. // Given NSX currently doesn't support 'In' LabelSelector, to keep design simple, // only allow just one 'In' LabelSelector in matchExpressions with at most of five values in it. -func (service *SecurityPolicyService) validateSelectorOpIn(matchExpressions []v1.LabelSelectorRequirement, - matchLabels map[string]string) (int, error) { - var mexprInOpCount = 0 - var mexprInValueCount = 0 +func (service *SecurityPolicyService) validateSelectorOpIn(matchExpressions []v1.LabelSelectorRequirement, matchLabels map[string]string) (int, error) { + mexprInOpCount := 0 + mexprInValueCount := 0 var err error = nil - var errorMsg = "" - var exists = false + errorMsg := "" + exists := false var opInIndex int for i, expr := range matchExpressions { @@ -871,7 +958,7 @@ func (service *SecurityPolicyService) validateSelectorOpIn(matchExpressions []v1 func (service *SecurityPolicyService) validateNsSelectorOpNotIn(nsMatchExpressions []v1.LabelSelectorRequirement) error { var err error = nil - var errorMsg = "" + errorMsg := "" for _, expr := range nsMatchExpressions { if expr.Operator == v1.LabelSelectorOpNotIn { @@ -883,12 +970,11 @@ func (service *SecurityPolicyService) validateNsSelectorOpNotIn(nsMatchExpressio return err } -func (service *SecurityPolicyService) validateSelectorExpressions(matchLabelsCount int, - matchExpressionsCount int, opInValueCount int, mixedCriteria bool) (int, int, error) { +func (service *SecurityPolicyService) validateSelectorExpressions(matchLabelsCount int, matchExpressionsCount int, opInValueCount int, mixedCriteria bool) (int, int, error) { var err error = nil - var errorMsg = "" - var totalExprCount = 0 - var totalCriteria = 0 + errorMsg := "" + totalExprCount := 0 + totalCriteria := 0 // Check total count of expressions from LabelSelectors in one group criteria if matchExpressionsCount != 0 { @@ -898,8 +984,11 @@ func (service *SecurityPolicyService) validateSelectorExpressions(matchLabelsCou } if !mixedCriteria && totalExprCount > MaxCriteriaExpressions { - errorMsg = fmt.Sprintf("total count of labelSelectors expressions %d exceed NSX limit of %d in one criteria based on same member type", - totalExprCount, MaxCriteriaExpressions) + errorMsg = fmt.Sprintf( + "total count of labelSelectors expressions %d exceed NSX limit of %d in one criteria based on same member type", + totalExprCount, + MaxCriteriaExpressions, + ) } else if mixedCriteria && totalExprCount > MaxMixedCriteriaExpressions { errorMsg = fmt.Sprintf("total count of labelSelectors expressions %d exceed NSX limit of %d in one criteria inside a mixed member type", totalExprCount, MaxMixedCriteriaExpressions) @@ -924,9 +1013,11 @@ func (service *SecurityPolicyService) validateSelectorExpressions(matchLabelsCou } // Todo, refactor code when NSX support 'In' LabelSelector. -func (service *SecurityPolicyService) matchExpressionOpInExist(matchExpressions []v1.LabelSelectorRequirement) (bool, int) { - var operatorInIndex = -1 - var isFound = false +func (service *SecurityPolicyService) matchExpressionOpInExist( + matchExpressions []v1.LabelSelectorRequirement, +) (bool, int) { + operatorInIndex := -1 + isFound := false for i := 0; i < len(matchExpressions); i++ { // find operator 'In' if matchExpressions[i].Operator == v1.LabelSelectorOpIn { @@ -946,14 +1037,14 @@ func (service *SecurityPolicyService) matchExpressionOpInExist(matchExpressions // The above two expressions will be translated to: // => {k1 NotIn [a1,a2]} AND {k2 EQUALS a3} OR {k1 NotIn [a1,a2]} AND {k2 EQUALS a4} func (service *SecurityPolicyService) updateExpressionsMatchExpression(matchExpressions []v1.LabelSelectorRequirement, matchLabels map[string]string, - policyExpression *[]*data.StructValue, clusterExpression *data.StructValue, tagValueExpression *data.StructValue, - memberType string, expressions *data.ListValue) error { + policyExpression *[]*data.StructValue, clusterExpression *data.StructValue, tagValueExpression *data.StructValue, memberType string, expressions *data.ListValue, +) error { var err error = nil - var found, opInIdx = service.matchExpressionOpInExist(matchExpressions) + found, opInIdx := service.matchExpressionOpInExist(matchExpressions) if !found { err = service.buildExpressionsMatchExpression(matchExpressions, memberType, expressions) } else { - var expr = matchExpressions[opInIdx] + expr := matchExpressions[opInIdx] for i := 0; i < len(expr.Values); i++ { if i != 0 { service.appendOperatorIfNeeded(policyExpression, "OR") @@ -989,13 +1080,15 @@ func (service *SecurityPolicyService) updateExpressionsMatchExpression(matchExpr // Todo, refactor code when NSX support 'In' LabelSelector. // Support Pod/VM Selector mixed with NamespaceSelector -func (service *SecurityPolicyService) updateMixedExpressionsMatchExpression(nsMatchExpressions []v1.LabelSelectorRequirement, nsMatchLabels map[string]string, - matchExpressions []v1.LabelSelectorRequirement, matchLabels map[string]string, - policyExpression *[]*data.StructValue, clusterExpression *data.StructValue, tagValueExpression *data.StructValue, expressions *data.ListValue) error { +func (service *SecurityPolicyService) updateMixedExpressionsMatchExpression(nsMatchExpressions []v1.LabelSelectorRequirement, + nsMatchLabels map[string]string, matchExpressions []v1.LabelSelectorRequirement, matchLabels map[string]string, + policyExpression *[]*data.StructValue, clusterExpression *data.StructValue, tagValueExpression *data.StructValue, + expressions *data.ListValue, +) error { var err error = nil - var opInIdx = 0 + opInIdx := 0 var opInMatchExpressions []v1.LabelSelectorRequirement = nil - var memberType = "" + memberType := "" nsFound, opInIdx1 := service.matchExpressionOpInExist(nsMatchExpressions) portFound, opInIdx2 := service.matchExpressionOpInExist(matchExpressions) @@ -1019,10 +1112,14 @@ func (service *SecurityPolicyService) updateMixedExpressionsMatchExpression(nsMa if !nsFound && !portFound { err = service.buildExpressionsMatchExpression(matchExpressions, "SegmentPort", expressions) if err == nil { - err = service.buildExpressionsMatchExpression(nsMatchExpressions, "Segment", expressions) + err = service.buildExpressionsMatchExpression( + nsMatchExpressions, + "Segment", + expressions, + ) } } else { - var expr = opInMatchExpressions[opInIdx] + expr := opInMatchExpressions[opInIdx] for i := 0; i < len(expr.Values); i++ { if i != 0 { service.appendOperatorIfNeeded(policyExpression, "OR") @@ -1070,15 +1167,15 @@ func (service *SecurityPolicyService) updateMixedExpressionsMatchExpression(nsMa func (service *SecurityPolicyService) updatePeerExpressions(obj *v1alpha1.SecurityPolicy, peer *v1alpha1.SecurityPolicyPeer, group *model.Group, idx int) (int, int, error) { var err error = nil - var errorMsg = "" + errorMsg := "" var tagValueExpression *data.StructValue = nil var memberType string var matchLabels map[string]string var matchExpressions *[]v1.LabelSelectorRequirement = nil var mergedMatchExpressions *[]v1.LabelSelectorRequirement = nil - var opInValueCount, totalCriteriaCount, totalExprCount = 0, 0, 0 - var matchLabelsCount, matchExpressionsCount = 0, 0 - var mixedNsSelector = false + opInValueCount, totalCriteriaCount, totalExprCount := 0, 0, 0 + matchLabelsCount, matchExpressionsCount := 0, 0 + mixedNsSelector := false if len(peer.IPBlocks) > 0 { addresses := data.NewListValue() @@ -1119,7 +1216,8 @@ func (service *SecurityPolicyService) updatePeerExpressions(obj *v1alpha1.Securi clusterMemberType := "Segment" // Setting cluster member type to "SegmentPort" for NamespaceSelector ensure the criteria is mixed // because the following conditions must have condition whose memberType=Segment when NamespaceSelector isn't empty - if peer.PodSelector == nil && peer.VMSelector == nil && peer.NamespaceSelector != nil && peer.NamespaceSelector.Size() > 0 { + if peer.PodSelector == nil && peer.VMSelector == nil && peer.NamespaceSelector != nil && + peer.NamespaceSelector.Size() > 0 { clusterMemberType = "SegmentPort" } @@ -1134,7 +1232,13 @@ func (service *SecurityPolicyService) updatePeerExpressions(obj *v1alpha1.Securi memberType = "SegmentPort" service.addOperatorIfNeeded(expressions, "AND") podExpression := service.buildExpression( - "Condition", memberType, fmt.Sprintf("%s|", util.TagScopeNCPPod), "Tag", "EQUALS", "EQUALS") + "Condition", + memberType, + fmt.Sprintf("%s|", util.TagScopeNCPPod), + "Tag", + "EQUALS", + "EQUALS", + ) if peer.NamespaceSelector == nil { podExpression = service.buildExpression( @@ -1158,7 +1262,13 @@ func (service *SecurityPolicyService) updatePeerExpressions(obj *v1alpha1.Securi memberType = "SegmentPort" service.addOperatorIfNeeded(expressions, "AND") vmExpression := service.buildExpression( - "Condition", memberType, fmt.Sprintf("%s|", util.TagScopeNCPVNETInterface), "Tag", "EQUALS", "EQUALS") + "Condition", + memberType, + fmt.Sprintf("%s|", util.TagScopeNCPVNETInterface), + "Tag", + "EQUALS", + "EQUALS", + ) if peer.NamespaceSelector == nil { vmExpression = service.buildExpression( @@ -1261,14 +1371,24 @@ func (service *SecurityPolicyService) updatePeerExpressions(obj *v1alpha1.Securi mergedMatchExpressions = service.mergeSelectorMatchExpression(*matchExpressions) matchExpressionsCount = len(*mergedMatchExpressions) - opInValueCount, err = service.validateSelectorOpIn(*mergedMatchExpressions, matchLabels) + opInValueCount, err = service.validateSelectorOpIn( + *mergedMatchExpressions, + matchLabels, + ) if err != nil { return 0, 0, err } - err = service.updateExpressionsMatchExpression(*mergedMatchExpressions, matchLabels, - &group.Expression, clusterExpression, tagValueExpression, memberType, expressions) + err = service.updateExpressionsMatchExpression( + *mergedMatchExpressions, + matchLabels, + &group.Expression, + clusterExpression, + tagValueExpression, + memberType, + expressions, + ) if err != nil { return 0, 0, err } @@ -1276,7 +1396,12 @@ func (service *SecurityPolicyService) updatePeerExpressions(obj *v1alpha1.Securi } // Since cluster is set as "Segment" or "SegmentPort" memberType, So the final produced group criteria is always treated as a mixed criteria - totalCriteriaCount, totalExprCount, err = service.validateSelectorExpressions(matchLabelsCount, matchExpressionsCount, opInValueCount, true) + totalCriteriaCount, totalExprCount, err = service.validateSelectorExpressions( + matchLabelsCount, + matchExpressionsCount, + opInValueCount, + true, + ) if err != nil { return 0, 0, err } diff --git a/pkg/nsx/services/builder_test.go b/pkg/nsx/services/builder_test.go index a2b78bd88..3300af149 100644 --- a/pkg/nsx/services/builder_test.go +++ b/pkg/nsx/services/builder_test.go @@ -41,8 +41,8 @@ func TestBuildSecurityPolicy(t *testing.T) { SequenceNumber: &seq0, Rules: []model.Rule{ { - DisplayName: &ruleNameWithPodSelector, - Id: &ruleID0, + DisplayName: &ruleNameWithPodSelector00, + Id: &ruleIDPort000, DestinationGroups: []string{"ANY"}, Direction: &nsxDirectionIn, Scope: []string{"/infra/domains/k8scl-one/groups/sp_uidA_0_scope"}, @@ -50,12 +50,11 @@ func TestBuildSecurityPolicy(t *testing.T) { Services: []string{"ANY"}, SourceGroups: []string{"/infra/domains/k8scl-one/groups/sp_uidA_0_src"}, Action: &nsxActionAllow, - ServiceEntries: []*data.StructValue{}, Tags: basicTags, }, { - DisplayName: &ruleNameWithNsSelector, - Id: &ruleID1, + DisplayName: &ruleNameWithNsSelector00, + Id: &ruleIDPort100, DestinationGroups: []string{"ANY"}, Direction: &nsxDirectionIn, Scope: []string{"ANY"}, @@ -80,8 +79,8 @@ func TestBuildSecurityPolicy(t *testing.T) { SequenceNumber: &seq0, Rules: []model.Rule{ { - DisplayName: &ruleNameWithVMSelector, - Id: &ruleID0, + DisplayName: &ruleNameWithVMSelector00, + Id: &ruleIDPort000, DestinationGroups: []string{"/infra/domains/k8scl-one/groups/sp_uidA_0_dst"}, Direction: &nsxDirectionOut, Scope: []string{"/infra/domains/k8scl-one/groups/sp_uidA_0_scope"}, @@ -89,12 +88,11 @@ func TestBuildSecurityPolicy(t *testing.T) { Services: []string{"ANY"}, SourceGroups: []string{"ANY"}, Action: &nsxActionDrop, - ServiceEntries: []*data.StructValue{}, Tags: basicTags, }, { - DisplayName: &ruleNameWithNsSelector, - Id: &ruleID1, + DisplayName: &ruleNameWithNsSelector00, + Id: &ruleIDPort100, DestinationGroups: []string{"/infra/domains/k8scl-one/groups/sp_uidA_1_dst"}, Direction: &nsxDirectionOut, Scope: []string{"ANY"}, @@ -102,13 +100,12 @@ func TestBuildSecurityPolicy(t *testing.T) { Services: []string{"ANY"}, SourceGroups: []string{"ANY"}, Action: &nsxActionDrop, - ServiceEntries: []*data.StructValue{}, Tags: basicTags, }, { - DisplayName: &ruleNameWithIpBlock, - Id: &ruleID2, + DisplayName: &ruleNameWithIpBlock00, + Id: &ruleIDPort200, DestinationGroups: []string{"/infra/domains/k8scl-one/groups/sp_uidA_2_dst"}, Direction: &nsxDirectionOut, Scope: []string{"ANY"}, @@ -116,7 +113,6 @@ func TestBuildSecurityPolicy(t *testing.T) { Services: []string{"ANY"}, SourceGroups: []string{"ANY"}, Action: &nsxActionDrop, - ServiceEntries: []*data.StructValue{}, Tags: basicTags, }, }, @@ -271,7 +267,7 @@ func TestBuildPeerTags(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - assert.Equal(t, tt.expectedTags, service.buildPeerTags(tt.inputPolicy, &tt.inputPolicy.Spec.Rules[0].Sources, tt.inputIndex)) + assert.Equal(t, tt.expectedTags, service.BuildPeerTags(tt.inputPolicy, &tt.inputPolicy.Spec.Rules[0].Sources, tt.inputIndex)) }) } } @@ -406,7 +402,7 @@ func TestValidateSelectorExpressions(t *testing.T) { assert.Equal(t, 2, totalCriteriaCount) assert.Equal(t, 30, totalExprCount) - // Case: total count of expressions exceed NSX limit '15' in one criterion mixed criteria + // Case: total count of expressions exceed NSX limit '15' in one criterion with mixed member type matchExpressionsCount = 13 _, _, err = service.validateSelectorExpressions(matchLabelsCount, matchExpressionsCount, opInValueCount, true) assert.NotEqual(t, nil, err) @@ -466,7 +462,7 @@ func TestValidateSelectorOpIn(t *testing.T) { assert.NotEqual(t, nil, err) assert.Equal(t, 6, opInValueCount) - // Case: matchLabels has duplication expression with match-expression operator 'In' + // Case: matchLabels has duplication expression with matchExpressions operator 'In' matchLabels = make(map[string]string) matchLabels["k1"] = "a5" matchExpressions[0].Values = []string{ diff --git a/pkg/nsx/services/compare.go b/pkg/nsx/services/compare.go index 10049bc52..3bb308a62 100644 --- a/pkg/nsx/services/compare.go +++ b/pkg/nsx/services/compare.go @@ -2,10 +2,11 @@ package services import ( "encoding/json" - "sort" "github.com/vmware/vsphere-automation-sdk-go/runtime/data/serializers/cleanjson" "github.com/vmware/vsphere-automation-sdk-go/services/nsxt/model" + + util2 "github.com/vmware-tanzu/nsx-operator/pkg/util" ) func SecurityPolicyEqual(existingSecurityPolicy *model.SecurityPolicy, securityPolicy *model.SecurityPolicy) bool { @@ -14,32 +15,31 @@ func SecurityPolicyEqual(existingSecurityPolicy *model.SecurityPolicy, securityP if string(s1) == string(s2) { return true } - log.Info("security policy diff", "nsx sp", s1, "k8s sp", s2) + log.V(1).Info( + "security policies differ", + "current NSX security policy", simplifySecurityPolicy(existingSecurityPolicy), + "desired NSX security policy", simplifySecurityPolicy(securityPolicy), + ) return false } func RulesEqual(existingRules []model.Rule, rules []model.Rule) (bool, []model.Rule) { - // sort the rules by id, otherwise expandRule may return different results, only the sequence of the - // rule is different, so sort by port number, and it avoids the needless updates. - var sortRules = func(rules []model.Rule) { - sort.Slice(rules, func(i, j int) bool { - return *(rules[i].Id) > *(rules[j].Id) - }) - } - sortRules(existingRules) - sortRules(rules) - isEqual := true - // legacyRules means the rules that are not in the new rules, we should destroy them. var legacyRules []model.Rule + var newRuleIds []string + for _, rule := range rules { + newRuleIds = append(newRuleIds, *rule.Id) + } - diffIndex := len(existingRules) - len(rules) - if diffIndex != 0 { - isEqual = false - if diffIndex > 0 { - legacyRules = append(legacyRules, existingRules[diffIndex:]...) + for _, existingRule := range existingRules { + if !util2.Contains(newRuleIds, *existingRule.Id) { + isEqual = false + legacyRules = append(legacyRules, existingRule) } - return isEqual, legacyRules + } + + if !isEqual || len(existingRules) != len(rules) { + return false, legacyRules } isEqual = RulesEqualDetail(existingRules, rules) @@ -55,7 +55,11 @@ func RulesEqualDetail(existingRules []model.Rule, rules []model.Rule) bool { s1, _ := dataValueToJSONEncoder.Encode(r1) s2, _ := dataValueToJSONEncoder.Encode(r2) if s1 != s2 { - log.Info("", "nsx rule", s1, "k8s rule", s2) + log.V(1).Info( + "rules differ", + "current NSX rule", simplifyRule(&existingRules[i]), + "desired NSX rule", simplifyRule(&rules[i]), + ) isEqual = false break } @@ -63,26 +67,41 @@ func RulesEqualDetail(existingRules []model.Rule, rules []model.Rule) bool { return isEqual } -func GroupsEqual(existingGroups []model.Group, groups []model.Group) bool { - var sortGroups = func(groups []model.Group) { - sort.Slice(groups, func(i, j int) bool { - return *(groups[i].Id) > *(groups[j].Id) - }) +func GroupsEqual(existingGroups []model.Group, groups []model.Group) (bool, []model.Group) { + isEqual := true + var legacyGroups []model.Group + var newGroupIds []string + for _, group := range groups { + newGroupIds = append(newGroupIds, *group.Id) } - sortGroups(existingGroups) - sortGroups(groups) - if len(existingGroups) != len(groups) { - return false + for _, existingGroup := range existingGroups { + if !util2.Contains(newGroupIds, *existingGroup.Id) { + isEqual = false + legacyGroups = append(legacyGroups, existingGroup) + } + } + + if !isEqual || len(existingGroups) != len(groups) { + return false, legacyGroups } - for i := 0; i < len(existingGroups); i++ { - g1, _ := json.Marshal(simplifyGroup(&existingGroups[i])) - g2, _ := json.Marshal(simplifyGroup(&groups[i])) - if string(g1) != string(g2) { - return false + + for i := 0; i < len(groups); i++ { + g1, _ := simplifyGroup(&existingGroups[i]).GetDataValue__() + g2, _ := simplifyGroup(&groups[i]).GetDataValue__() + var dataValueToJSONEncoder = cleanjson.NewDataValueToJsonEncoder() + s1, _ := dataValueToJSONEncoder.Encode(g1) + s2, _ := dataValueToJSONEncoder.Encode(g2) + if s1 != s2 { + log.V(1).Info( + "groups differ", + "current NSX group", simplifyGroup(&existingGroups[i]), + "desired NSX group", simplifyGroup(&groups[i]), + ) + return false, legacyGroups } } - return true + return true, nil } // simplifySecurityPolicy is used for abstract the key properties from model.SecurityPolicy, so that @@ -119,5 +138,6 @@ func simplifyGroup(group *model.Group) *model.Group { Id: group.Id, DisplayName: group.Id, Tags: group.Tags, + Expression: group.Expression, } } diff --git a/pkg/nsx/services/compare_test.go b/pkg/nsx/services/compare_test.go index 6dfc29a87..fffdc113a 100644 --- a/pkg/nsx/services/compare_test.go +++ b/pkg/nsx/services/compare_test.go @@ -170,7 +170,8 @@ func TestGroupsEqual(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - assert.Equal(t, tt.expectedResult, GroupsEqual(tt.inputGroup1, tt.inputGroup2)) + isEqual, _ := GroupsEqual(tt.inputGroup1, tt.inputGroup2) + assert.Equal(t, tt.expectedResult, isEqual) }, ) } diff --git a/pkg/nsx/services/expand.go b/pkg/nsx/services/expand.go new file mode 100644 index 000000000..d84340cd5 --- /dev/null +++ b/pkg/nsx/services/expand.go @@ -0,0 +1,324 @@ +package services + +import ( + "context" + "errors" + "fmt" + + "github.com/vmware/vsphere-automation-sdk-go/runtime/data" + "github.com/vmware/vsphere-automation-sdk-go/services/nsxt/model" + v1 "k8s.io/api/core/v1" + meta1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/vmware-tanzu/nsx-operator/pkg/apis/v1alpha1" + util2 "github.com/vmware-tanzu/nsx-operator/pkg/nsx/util" + "github.com/vmware-tanzu/nsx-operator/pkg/util" +) + +// When a rule contains named port, we should consider whether the rule should be expanded to +// multiple rules if the port name maps to conflicted port numbers. +func (service *SecurityPolicyService) expandRule(obj *v1alpha1.SecurityPolicy, rule *v1alpha1.SecurityPolicyRule, + ruleIdx int) ([]*model.Group, []*model.Rule, error) { + var nsxRules []*model.Rule + var nsxGroups []*model.Group + + if len(rule.Ports) == 0 { + nsxRule, err := service.buildRuleBasicInfo(obj, rule, ruleIdx, 0, 0) + if err != nil { + return nil, nil, err + } + nsxRules = append(nsxRules, nsxRule) + } + for portIdx, port := range rule.Ports { + nsxGroups2, nsxRules2, err := service.expandRuleByPort(obj, rule, ruleIdx, port, portIdx) + if err != nil { + return nil, nil, err + } + nsxGroups = append(nsxGroups, nsxGroups2...) + nsxRules = append(nsxRules, nsxRules2...) + } + return nsxGroups, nsxRules, nil +} + +func (service *SecurityPolicyService) expandRuleByPort(obj *v1alpha1.SecurityPolicy, rule *v1alpha1.SecurityPolicyRule, + ruleIdx int, port v1alpha1.SecurityPolicyPort, portIdx int) ([]*model.Group, []*model.Rule, error) { + var err error + var startPort []util2.PortAddress + var nsxGroups []*model.Group + var nsxRules []*model.Rule + + // Use PortAddress to handle normal port and named port, if it only contains int value Port, + // then it is a normal port. If it contains a list of IPs, it is a named port. + if port.Port.Type == intstr.Int { + startPort = append(startPort, util2.PortAddress{Port: port.Port.IntValue()}) + } else { + // endPort can only be defined if port is also defined. Both ports must be numeric. + if port.EndPort != 0 { + return nil, nil, util2.RestrictionError{Desc: "endPort can only be defined if port is also numeric."} + } + startPort, err = service.resolveNamedPort(obj, rule, port) + if err != nil { + // Update the stale ip set group if stale ips exist + if errors.As(err, &util2.NoEffectiveOption{}) { + indexResults, err2 := service.GroupStore.ByIndex(util.TagScopeRuleID, service.buildRuleID(obj, ruleIdx)) + if err2 != nil { + return nil, nil, err2 + } + var ipSetGroup model.Group + for _, indexResult := range indexResults { + ipSetGroup = indexResult.(model.Group) + ipSetGroup.Expression = nil // clear the stale ips + err3 := service.createOrUpdateGroups([]model.Group{ipSetGroup}) + if err3 != nil { + return nil, nil, err3 + } + } + } + return nil, nil, err + } + } + + for portAddressIdx, portAddress := range startPort { + gs, r, err := service.expandRuleByService(obj, rule, ruleIdx, port, portIdx, portAddress, portAddressIdx) + if err != nil { + return nil, nil, err + } + nsxRules = append(nsxRules, r) + nsxGroups = append(nsxGroups, gs...) + } + return nsxGroups, nsxRules, nil +} + +func (service *SecurityPolicyService) expandRuleByService(obj *v1alpha1.SecurityPolicy, rule *v1alpha1.SecurityPolicyRule, ruleIdx int, + port v1alpha1.SecurityPolicyPort, portIdx int, portAddress util2.PortAddress, portAddressIdx int) ([]*model.Group, *model.Rule, error) { + var nsxGroups []*model.Group + + nsxRule, err := service.buildRuleBasicInfo(obj, rule, ruleIdx, portIdx, portAddressIdx) + if err != nil { + return nil, nil, err + } + + var ruleServiceEntries []*data.StructValue + serviceEntry := service.buildRuleServiceEntries(port, portAddress) + ruleServiceEntries = append(ruleServiceEntries, serviceEntry) + nsxRule.ServiceEntries = ruleServiceEntries + + // If portAddress contains a list of IPs, we should build an ip set group for the rule. + if len(portAddress.IPs) > 0 { + ruleIPSetGroup := service.buildRuleIPSetGroup(obj, rule, nsxRule, portAddress.IPs, ruleIdx) + groupPath := fmt.Sprintf( + "/infra/domains/%s/groups/%s", + getDomain(service), + *ruleIPSetGroup.Id, + ) + nsxRule.DestinationGroups = []string{groupPath} + log.V(2).Info("built ruleIPSetGroup", "ruleIPSetGroup", ruleIPSetGroup) + nsxGroups = append(nsxGroups, ruleIPSetGroup) + } + log.V(2).Info("built rule by service entry", "rule", nsxRule) + return nsxGroups, nsxRule, nil +} + +// Resolve a named port to port number by rule and policy selector. +// e.g. "http" -> [{"80":['1.1.1.1', '2.2.2.2']}, {"443":['3.3.3.3']}] +func (service *SecurityPolicyService) resolveNamedPort(obj *v1alpha1.SecurityPolicy, rule *v1alpha1.SecurityPolicyRule, + spPort v1alpha1.SecurityPolicyPort) ([]util2.PortAddress, error) { + var portAddress []util2.PortAddress + + podSelectors, err := service.getPodSelectors(obj, rule) + if err != nil { + return nil, err + } + + for _, podSelector := range podSelectors { + podsList := &v1.PodList{} + log.V(2).Info("port", "podSelector", podSelector) + err := service.Client.List(context.Background(), podsList, &podSelector) + if err != nil { + return nil, err + } + for _, pod := range podsList.Items { + addr, err := service.resolvePodPort(pod, &spPort) + if errors.As(err, &util2.PodIPNotFound{}) || errors.As(err, &util2.PodNotRunning{}) { + return nil, err + } + portAddress = append(portAddress, addr...) + } + } + + if len(portAddress) == 0 { + return nil, util2.NoFilteredPod{ + Desc: "no pod has the corresponding named port, please check the pod selector or labels of CR", + } + } + return util2.MergeAddressByPort(portAddress), nil +} + +// Check port name and protocol, only when the pod is really running, and it does have effective ip. +func (service *SecurityPolicyService) resolvePodPort(pod v1.Pod, spPort *v1alpha1.SecurityPolicyPort) ([]util2.PortAddress, error) { + var addr []util2.PortAddress + for _, container := range pod.Spec.Containers { + for _, port := range container.Ports { + log.V(2).Info("resolvePodPort", "namespace", pod.Namespace, "pod_name", pod.Name, + "port_name", port.Name, "containerPort", port.ContainerPort, + "protocol", port.Protocol, "podIP", pod.Status.PodIP) + if port.Name == spPort.Port.String() && port.Protocol == spPort.Protocol { + if pod.Status.Phase != "Running" { + errMsg := fmt.Sprintf("pod %s/%s is not running", pod.Namespace, pod.Name) + return nil, util2.PodNotRunning{Desc: errMsg} + } + if pod.Status.PodIP == "" { + errMsg := fmt.Sprintf("pod %s/%s ip not initialized", pod.Namespace, pod.Name) + return nil, util2.PodIPNotFound{Desc: errMsg} + } + addr = append( + addr, + util2.PortAddress{Port: int(port.ContainerPort), IPs: []string{pod.Status.PodIP}}, + ) + } + } + } + return addr, nil +} + +// Build an ip set group for NSX. +func (service *SecurityPolicyService) buildRuleIPSetGroup(obj *v1alpha1.SecurityPolicy, rule *v1alpha1.SecurityPolicyRule, ruleModel *model.Rule, + ips []string, ruleIdx int) *model.Group { + ipSetGroup := model.Group{} + + ipSetGroupID := fmt.Sprintf("%s_ipset", *ruleModel.Id) + ipSetGroup.Id = &ipSetGroupID + ipSetGroupName := fmt.Sprintf("%s-ipset", *ruleModel.DisplayName) + ipSetGroup.DisplayName = &ipSetGroupName + peerTags := service.BuildPeerTags(obj, &rule.Destinations, ruleIdx) + ipSetGroup.Tags = peerTags + + addresses := data.NewListValue() + for _, ip := range ips { + addresses.Add(data.NewStringValue(ip)) + } + + blockExpression := data.NewStructValue( + "", + map[string]data.DataValue{ + "resource_type": data.NewStringValue("IPAddressExpression"), + "ip_addresses": addresses, + }, + ) + ipSetGroup.Expression = append(ipSetGroup.Expression, blockExpression) + return &ipSetGroup +} + +// Different direction rule decides different target of the traffic, we should carefully get +// the destination pod selector and namespaces. Named port only cares about the destination +// pod selector, so we use policy's AppliedTo or rule's AppliedTo when "IN" direction and +// rule's DestinationSelector when "OUT" direction. +func (service *SecurityPolicyService) getPodSelectors(obj *v1alpha1.SecurityPolicy, rule *v1alpha1.SecurityPolicyRule) ([]client.ListOptions, error) { + // Port means the target of traffic, so we should select the pod by rule direction, + // as for IN direction, we judge by rule's or policy's AppliedTo, + // as for OUT direction, then by rule's destinations. + // LabelSelect may return multiple namespaces + var finalSelectors []client.ListOptions + ruleDirection, err := getRuleDirection(rule) + if err != nil { + return nil, err + } + + if ruleDirection == "IN" { + if len(obj.Spec.AppliedTo) > 0 { + for _, target := range obj.Spec.AppliedTo { + selector := client.ListOptions{} + if target.PodSelector != nil { + label, err := meta1.LabelSelectorAsSelector(target.PodSelector) + if err != nil { + return nil, err + } + selector.LabelSelector = label + selector.Namespace = obj.Namespace + finalSelectors = append(finalSelectors, selector) + } + } + } else if len(rule.AppliedTo) > 0 { + for _, target := range rule.AppliedTo { + // We only consider named port for PodSelector, not VMSelector + selector := client.ListOptions{} + if target.PodSelector != nil { + label, err := meta1.LabelSelectorAsSelector(target.PodSelector) + if err != nil { + return nil, err + } + selector.LabelSelector = label + selector.Namespace = obj.Namespace + finalSelectors = append(finalSelectors, selector) + } + } + } + } else if ruleDirection == "OUT" { + if len(rule.Destinations) > 0 { + for _, target := range rule.Destinations { + var namespaceSelectors []client.ListOptions // ResolveNamespace may return multiple namespaces + var labelSelector client.ListOptions + var namespaceSelector client.ListOptions + if target.PodSelector != nil { + label, err := meta1.LabelSelectorAsSelector(target.PodSelector) + if err != nil { + return nil, err + } + labelSelector.LabelSelector = label + } + if target.NamespaceSelector != nil { + ns, err := service.ResolveNamespace(target.NamespaceSelector) + if err != nil { + return nil, err + } + for _, nsItem := range ns.Items { + namespaceSelector.Namespace = nsItem.Name + namespaceSelectors = append(namespaceSelectors, namespaceSelector) + } + } else { + namespaceSelector.Namespace = obj.Namespace + namespaceSelectors = append(namespaceSelectors, namespaceSelector) + } + // calculate the union of labelSelector and namespaceSelectors + for _, nsSelector := range namespaceSelectors { + if labelSelector.LabelSelector != nil { + finalSelectors = append(finalSelectors, client.ListOptions{ + LabelSelector: labelSelector.LabelSelector, + Namespace: nsSelector.Namespace, + }) + } else { + finalSelectors = append(finalSelectors, client.ListOptions{ + Namespace: nsSelector.Namespace, + }) + } + } + } + } + } + if len(finalSelectors) == 0 { + return nil, util2.NoEffectiveOption{ + Desc: "no effective options filtered by the rule and security policy", + } + } + return finalSelectors, nil +} + +// ResolveNamespace Get namespace name when the rule has namespace selector. +func (service *SecurityPolicyService) ResolveNamespace(lbs *meta1.LabelSelector) (*v1.NamespaceList, error) { + ctx := context.Background() + nsList := &v1.NamespaceList{} + nsOptions := &client.ListOptions{} + labelMap, err := meta1.LabelSelectorAsMap(lbs) + if err != nil { + return nil, err + } + nsOptions.LabelSelector = labels.SelectorFromSet(labelMap) + err = service.Client.List(ctx, nsList, nsOptions) + if err != nil { + return nil, err + } + return nsList, err +} diff --git a/pkg/nsx/services/expand_test.go b/pkg/nsx/services/expand_test.go new file mode 100644 index 000000000..51da379da --- /dev/null +++ b/pkg/nsx/services/expand_test.go @@ -0,0 +1,223 @@ +package services + +import ( + "fmt" + "reflect" + "testing" + + "github.com/agiledragon/gomonkey" + "github.com/stretchr/testify/assert" + "github.com/vmware/vsphere-automation-sdk-go/runtime/data" + "github.com/vmware/vsphere-automation-sdk-go/services/nsxt/model" + v12 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/vmware-tanzu/nsx-operator/pkg/apis/v1alpha1" + "github.com/vmware-tanzu/nsx-operator/pkg/config" + "github.com/vmware-tanzu/nsx-operator/pkg/nsx" +) + +func TestSecurityPolicyService_buildRuleIPGroup(t *testing.T) { + sp := &v1alpha1.SecurityPolicy{} + r := v1alpha1.SecurityPolicyRule{} + rule := model.Rule{ + DisplayName: &ruleNameWithPodSelector00, + Id: &ruleIDPort000, + DestinationGroups: []string{"ANY"}, + Direction: &nsxDirectionIn, + Scope: []string{"/infra/domains/k8scl-one/groups/sp_uidA_0_scope"}, + SequenceNumber: &seq0, + Services: []string{"ANY"}, + SourceGroups: []string{"/infra/domains/k8scl-one/groups/sp_uidA_0_src"}, + Action: &nsxActionAllow, + ServiceEntries: []*data.StructValue{}, + Tags: basicTags, + } + ips := []string{"1.1.1.1", "2.2.2.2"} + + policyGroupID := fmt.Sprintf("%s_ipset", ruleIDPort000) + policyGroupName := fmt.Sprintf("%s-ipset", ruleNameWithPodSelector00) + addresses := data.NewListValue() + for _, ip := range ips { + addresses.Add(data.NewStringValue(ip)) + } + blockExpression := data.NewStructValue( + "", + map[string]data.DataValue{ + "resource_type": data.NewStringValue("IPAddressExpression"), + "ip_addresses": addresses, + }, + ) + ipGroup := model.Group{ + Id: &policyGroupID, + DisplayName: &policyGroupName, + Expression: []*data.StructValue{blockExpression}, + Tags: []model.Tag{{Scope: nil, Tag: nil}}, + } + + var s *SecurityPolicyService + patches := gomonkey.ApplyMethod(reflect.TypeOf(s), "BuildPeerTags", + func(s *SecurityPolicyService, v *v1alpha1.SecurityPolicy, p *[]v1alpha1.SecurityPolicyPeer, i int) []model.Tag { + peerTags := []model.Tag{ + {Scope: nil, Tag: nil}, + } + return peerTags + }) + defer patches.Reset() + + type args struct { + obj *model.Rule + ips []string + } + tests := []struct { + name string + args args + want *model.Group + }{ + {"1", args{&rule, ips}, &ipGroup}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + service := &SecurityPolicyService{} + assert.Equalf(t, tt.want, service.buildRuleIPSetGroup(sp, &r, tt.args.obj, tt.args.ips, 0), "buildRuleIPSetGroup(%v, %v)", + tt.args.obj, tt.args.ips) + }) + } +} + +func TestSecurityPolicyService_getPodSelectors(t *testing.T) { + podSelector2 := &v1.LabelSelector{ + MatchLabels: map[string]string{"pod_selector_1": "pod_value_1"}, + MatchExpressions: podSelectorMatchExpression, + } + sp := v1alpha1.SecurityPolicy{ + ObjectMeta: v1.ObjectMeta{Namespace: "ns1", Name: "spA", UID: "uidA"}, + Spec: v1alpha1.SecurityPolicySpec{ + AppliedTo: []v1alpha1.SecurityPolicyTarget{ + { + PodSelector: podSelector2, + }, + }, + }, + } + + podSelector := &v1.LabelSelector{ + MatchLabels: map[string]string{"pod_selector_1": "pod_value_1"}, + MatchExpressions: podSelectorMatchExpression, + } + rule := v1alpha1.SecurityPolicyRule{ + Action: &allowAction, + Direction: &directionIn, + Name: "rule-with-pod-selector", + AppliedTo: []v1alpha1.SecurityPolicyTarget{ + { + PodSelector: podSelector, + }, + }, + Sources: []v1alpha1.SecurityPolicyPeer{ + { + PodSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{"pod_selector_1": "pod_value_1"}, + MatchExpressions: podSelectorMatchExpression, + }, + NamespaceSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{"ns1": "spA"}, + MatchExpressions: nsSelectorMatchExpression, + }, + }, + }, + } + rule2 := v1alpha1.SecurityPolicyRule{ + Action: &allowAction, + Direction: &directionIn, + Name: "rule-with-pod-selector-2", + AppliedTo: []v1alpha1.SecurityPolicyTarget{}, + Sources: []v1alpha1.SecurityPolicyPeer{ + { + PodSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{"pod_selector_1": "pod_value_1"}, + MatchExpressions: podSelectorMatchExpression, + }, + NamespaceSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{"ns1": "spA"}, + MatchExpressions: nsSelectorMatchExpression, + }, + }, + }, + } + rule3 := v1alpha1.SecurityPolicyRule{ + Action: &allowAction, + Direction: &directionOut, + Name: "rule-with-pod-selector-3", + AppliedTo: []v1alpha1.SecurityPolicyTarget{}, + Destinations: []v1alpha1.SecurityPolicyPeer{ + { + PodSelector: podSelector, + NamespaceSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{"ns1": "spA"}, + MatchExpressions: nsSelectorMatchExpression, + }, + }, + }, + } + type fields struct { + Client client.Client + NSXClient *nsx.Client + NSXConfig *config.NSXOperatorConfig + GroupStore cache.Indexer + SecurityPolicyStore cache.Indexer + RuleStore cache.Indexer + } + type args struct { + obj *v1alpha1.SecurityPolicy + rule *v1alpha1.SecurityPolicyRule + } + + labelSelector, _ := v1.LabelSelectorAsSelector(podSelector) + labelSelector2, _ := v1.LabelSelectorAsSelector(podSelector2) + var s *SecurityPolicyService + patches := gomonkey.ApplyMethod(reflect.TypeOf(s), "ResolveNamespace", + func(s *SecurityPolicyService, _ *v1.LabelSelector) (*v12.NamespaceList, error) { + ns := v12.NamespaceList{ + Items: []v12.Namespace{ + { + TypeMeta: v1.TypeMeta{}, + ObjectMeta: v1.ObjectMeta{ + Name: "ns1", + Labels: map[string]string{"ns1": "spA"}, + }, + }, + { + TypeMeta: v1.TypeMeta{}, + ObjectMeta: v1.ObjectMeta{ + Name: "ns2", + Labels: map[string]string{"ns2": "spA"}, + }, + }, + }, + } + return &ns, nil + }) + defer patches.Reset() + + tests := []struct { + name string + fields fields + args args + want client.ListOptions + wantErr assert.ErrorAssertionFunc + }{ + {"1", fields{}, args{&sp, &rule}, client.ListOptions{LabelSelector: labelSelector, Namespace: "ns1"}, nil}, + {"2", fields{}, args{&sp, &rule2}, client.ListOptions{LabelSelector: labelSelector2, Namespace: "ns1"}, nil}, + {"3", fields{}, args{&sp, &rule3}, client.ListOptions{LabelSelector: labelSelector, Namespace: "ns1"}, nil}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + service := &SecurityPolicyService{} + got, _ := service.getPodSelectors(tt.args.obj, tt.args.rule) + assert.Equalf(t, tt.want, got[0], "getPodSelector(%v, %v)", tt.args.obj, tt.args.rule) + }) + } +} diff --git a/pkg/nsx/services/firewall.go b/pkg/nsx/services/firewall.go index 4611c1b54..2cc85929b 100644 --- a/pkg/nsx/services/firewall.go +++ b/pkg/nsx/services/firewall.go @@ -7,6 +7,7 @@ import ( "github.com/vmware/vsphere-automation-sdk-go/services/nsxt/model" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/client" logf "sigs.k8s.io/controller-runtime/pkg/log" "github.com/vmware-tanzu/nsx-operator/pkg/apis/v1alpha1" @@ -28,6 +29,7 @@ const ( ) type SecurityPolicyService struct { + Client client.Client NSXClient *nsx.Client NSXConfig *config.NSXOperatorConfig GroupStore cache.Indexer @@ -35,9 +37,7 @@ type SecurityPolicyService struct { RuleStore cache.Indexer } -var ( - log = logf.Log.WithName("service").WithName("firewall") -) +var log = logf.Log.WithName("service").WithName("firewall") // InitializeSecurityPolicy sync NSX resources func InitializeSecurityPolicy(NSXClient *nsx.Client, cf *config.NSXOperatorConfig) (*SecurityPolicyService, error) { @@ -50,6 +50,7 @@ func InitializeSecurityPolicy(NSXClient *nsx.Client, cf *config.NSXOperatorConfi service.GroupStore = cache.NewIndexer(keyFunc, cache.Indexers{ util.TagScopeNamespace: namespaceIndexFunc, util.TagScopeSecurityPolicyCRUID: securityPolicyCRUIDScopeIndexFunc, + util.TagScopeRuleID: ruleCRUIDScopeIndexFunc, }) service.SecurityPolicyStore = cache.NewIndexer(keyFunc, cache.Indexers{ util.TagScopeSecurityPolicyCRUID: securityPolicyCRUIDScopeIndexFunc, @@ -90,31 +91,49 @@ func (service *SecurityPolicyService) OperateSecurityPolicy(obj *v1alpha1.Securi return err } - if GroupsEqual(existingGroups, *nsxGroups) { - log.Info("groups not changed, skip", "nsxSecurityPolicy.Id", nsxSecurityPolicy.Id) + // Caution! createOrUpdate can't delete the legacy groups. + // So we have to delete the legacy groups firstly by groupClient. + groupEqual, legacyGroups := GroupsEqual(existingGroups, *nsxGroups) + if groupEqual { + log.Info("NSGroups are not changed, skip updating them", "nsxSecurityPolicy.Id", nsxSecurityPolicy.Id) } else { err = service.createOrUpdateGroups(*nsxGroups) if err != nil { return err } } - // Caution! Patch can't delete the legacy rules. - // So we have to delete the legacy rules manually by rulesClient. + // Caution! createOrUpdate can't delete the legacy rules. + // So we have to delete the legacy rules firstly by rulesClient. spEqual := SecurityPolicyEqual(existingSecurityPolicy, nsxSecurityPolicy) ruleEqual, legacyRules := RulesEqual(existingRules, nsxSecurityPolicy.Rules) if spEqual && ruleEqual { - log.Info("security policy and rules not changed, skip", "nsxSecurityPolicy.Id", nsxSecurityPolicy.Id) + log.Info("security policy and rules are not changed, skip updating them", "nsxSecurityPolicy.Id", nsxSecurityPolicy.Id) } else { err := service.createOrUpdateSecurityPolicy(nsxSecurityPolicy) if err != nil { return err } - err = service.updateOrDeleteRules(nsxSecurityPolicy, legacyRules) + err = service.AddRulesToStore(nsxSecurityPolicy) if err != nil { return err } log.Info("successfully operate", "nsxSecurityPolicy", nsxSecurityPolicy) } + + if len(legacyRules) > 0 { + err := service.DeleteRules(nsxSecurityPolicy, legacyRules) + if err != nil { + return err + } + } + + // The reason why delete legacy groups at last is that some rules may have reference to the legacy groups. + if len(legacyGroups) > 0 { + err := service.DeleteGroups(legacyGroups) + if err != nil { + return err + } + } return nil } @@ -125,10 +144,12 @@ func (service *SecurityPolicyService) createOrUpdateGroups(nsxGroups []model.Gro return err } err = service.GroupStore.Add(group) + log.V(2).Info("add group to store", "group", group.Id) if err != nil { return err } } + log.Info("successfully create or update group", "groups", nsxGroups) return nil } @@ -144,24 +165,45 @@ func (service *SecurityPolicyService) createOrUpdateSecurityPolicy(sp *model.Sec return nil } -func (service *SecurityPolicyService) updateOrDeleteRules(sp *model.SecurityPolicy, legacyRules []model.Rule) error { +func (service *SecurityPolicyService) DeleteRules(sp *model.SecurityPolicy, legacyRules []model.Rule) error { + // Delete legacy rules + for _, rule := range legacyRules { + err := service.NSXClient.RuleClient.Delete(getDomain(service), *sp.Id, *rule.Id) + if err != nil { + return err + } + err = service.RuleStore.Delete(rule) + log.V(1).Info("delete rule from store", "rule", rule) + if err != nil { + return err + } + } + return nil +} + +func (service *SecurityPolicyService) AddRulesToStore(sp *model.SecurityPolicy) error { for _, rule := range sp.Rules { err := service.RuleStore.Add(rule) + log.V(1).Info("add rule to store", "rule", rule) if err != nil { return err } } + return nil +} - // Delete legacy rules - for _, rule := range legacyRules { - err := service.RuleStore.Delete(rule) +func (service *SecurityPolicyService) DeleteGroups(legacyGroups []model.Group) error { + // Delete legacy groups + for _, group := range legacyGroups { + err := service.deleteGroup(service.NSXClient.GroupClient, &group) if err != nil { return err } - err = service.NSXClient.RuleClient.Delete(getDomain(service), *sp.Id, *rule.Id) + err = service.GroupStore.Delete(group) if err != nil { return err } + log.Info("successfully delete group", "group", group) } return nil } diff --git a/pkg/nsx/services/firewall_test.go b/pkg/nsx/services/firewall_test.go index 446685453..a41b2a55d 100644 --- a/pkg/nsx/services/firewall_test.go +++ b/pkg/nsx/services/firewall_test.go @@ -36,14 +36,17 @@ var ( seq0 = int64(0) seq1 = int64(1) seq2 = int64(2) - ruleNameWithPodSelector = "rule-with-pod-selector" - ruleNameWithVMSelector = "rule-with-VM-selector" - ruleNameWithNsSelector = "rule-with-ns-selector" - ruleNameWithIpBlock = "rule-with-ip-block" + ruleNameWithPodSelector00 = "rule-with-pod-selector-0-0" + ruleNameWithVMSelector00 = "rule-with-VM-selector-0-0" + ruleNameWithNsSelector00 = "rule-with-ns-selector-0-0" + ruleNameWithIpBlock00 = "rule-with-ip-block-0-0" cidr = "192.168.1.1/24" ruleID0 = "sp_uidA_0" ruleID1 = "sp_uidA_1" ruleID2 = "sp_uidA_2" + ruleIDPort000 = "sp_uidA_0_0_0" + ruleIDPort100 = "sp_uidA_1_0_0" + ruleIDPort200 = "sp_uidA_2_0_0" nsxDirectionIn = "IN" nsxActionAllow = "ALLOW" nsxDirectionOut = "OUT" diff --git a/pkg/nsx/services/store.go b/pkg/nsx/services/store.go index 552275a8d..ec5ff1719 100644 --- a/pkg/nsx/services/store.go +++ b/pkg/nsx/services/store.go @@ -42,6 +42,19 @@ func securityPolicyCRUIDScopeIndexFunc(obj interface{}) ([]string, error) { return res, nil } +func ruleCRUIDScopeIndexFunc(obj interface{}) ([]string, error) { + res := make([]string, 0, 5) + switch v := obj.(type) { + case model.Group: + for _, tag := range v.Tags { + if *tag.Scope == util.TagScopeRuleID { + res = append(res, *tag.Tag) + } + } + } + return res, nil +} + func appendTag(v []model.Tag, res []string) []string { for _, tag := range v { if *tag.Scope == util.TagScopeSecurityPolicyCRUID { diff --git a/pkg/nsx/util/errors.go b/pkg/nsx/util/errors.go index d1886d366..7d276824d 100644 --- a/pkg/nsx/util/errors.go +++ b/pkg/nsx/util/errors.go @@ -562,3 +562,19 @@ type NoFilteredPod struct { func (err NoFilteredPod) Error() string { return err.Desc } + +type NoEffectiveOption struct { + Desc string +} + +func (err NoEffectiveOption) Error() string { + return err.Desc +} + +type RestrictionError struct { + Desc string +} + +func (err RestrictionError) Error() string { + return err.Desc +} diff --git a/pkg/nsx/util/utils.go b/pkg/nsx/util/utils.go index e8a8cc1f0..e4f7d611d 100644 --- a/pkg/nsx/util/utils.go +++ b/pkg/nsx/util/utils.go @@ -10,17 +10,16 @@ import ( "io/ioutil" "net/http" "reflect" + "sort" "strconv" "strings" logf "sigs.k8s.io/controller-runtime/pkg/log" ) -var ( - log = logf.Log.WithName("nsx").WithName("utils") -) +var log = logf.Log.WithName("nsx").WithName("utils") -// ErrorDetail is error detail which info extracted from http.Reponse.Body. +// ErrorDetail is error detail which info extracted from http.Response.Body. type ErrorDetail struct { StatusCode int ErrorCode int @@ -29,6 +28,14 @@ type ErrorDetail struct { Details string } +// PortAddress is used when named port is specified. +type PortAddress struct { + // Port is the port number. + Port int `json:"port"` + // IPs is a list of IPs associated to port number. + IPs []string `json:"ips"` +} + func (e *ErrorDetail) Error() string { msg := fmt.Sprintf("StatusCode is %d,", e.StatusCode) if e.ErrorCode > 0 { @@ -141,12 +148,15 @@ type errmap map[string]NsxError var ( errorTable = map[string]errmap{ - "404": //http.StatusNotFound - {"202": &BackendResourceNotFound{}, + "404": // http.StatusNotFound + { + "202": &BackendResourceNotFound{}, "500090": &StaleRevision{}, - "default": &ResourceNotFound{}}, - "400": //http.StatusBadRequest - {"60508": &NsxIndexingInProgress{}, + "default": &ResourceNotFound{}, + }, + "400": // http.StatusBadRequest + { + "60508": &NsxIndexingInProgress{}, "60514": &NsxSearchTimeout{}, "60515": &NsxSearchOutOfSync{}, "8327": &NsxOverlapVlan{}, @@ -156,25 +166,30 @@ var ( "500105": &NsxOverlapAddresses{}, "500232": &StaleRevision{}, "503040": &NsxSegemntWithVM{}, - "100148": &StaleRevision{}}, - "500": //http.StatusInternalServerError - {"98": &CannotConnectToServer{}, + "100148": &StaleRevision{}, + }, + "500": // http.StatusInternalServerError + { + "98": &CannotConnectToServer{}, "99": &ClientCertificateNotTrusted{}, - "607": &APITransactionAborted{}}, - "403": //http.StatusForbidden - {"98": &BadXSRFToken{}, + "607": &APITransactionAborted{}, + }, + "403": // http.StatusForbidden + { + "98": &BadXSRFToken{}, "403": &InvalidCredentials{}, - "505": &InvalidLicense{}}, + "505": &InvalidLicense{}, + }, } errorTable1 = map[string]NsxError{ - "409"://http.StatusConflict + "409":// http.StatusConflict &StaleRevision{}, - "412"://http.StatusPreconditionFailed + "412":// http.StatusPreconditionFailed &StaleRevision{}, - "429"://http.statusTooManyRequests + "429":// http.statusTooManyRequests &TooManyRequests{}, - "503"://http.StatusServiceUnavailable + "503":// http.StatusServiceUnavailable &ServiceUnavailable{}, } ) @@ -232,8 +247,27 @@ func HandleHTTPResponse(response *http.Response, result interface{}, debug bool) log.V(2).Info("received HTTP response", "response", string(body)) } if err := json.Unmarshal(body, result); err != nil { - log.Error(err, "Error converting HTTP response to result", "result type", result) + log.Error(err, "error converting HTTP response to result", "result type", result) return err, body } return nil, body } + +func MergeAddressByPort(portAddressOriginal []PortAddress) []PortAddress { + var portAddress []PortAddress + var sortKeys []int + mappedPorts := make(map[int][]string) + for _, pa := range portAddressOriginal { + if _, ok := mappedPorts[pa.Port]; !ok { + sortKeys = append(sortKeys, pa.Port) + mappedPorts[pa.Port] = pa.IPs + } else { + mappedPorts[pa.Port] = append(mappedPorts[pa.Port], pa.IPs...) + } + } + sort.Ints(sortKeys) + for _, key := range sortKeys { + portAddress = append(portAddress, PortAddress{Port: key, IPs: mappedPorts[key]}) + } + return portAddress +} diff --git a/pkg/nsx/util/utils_test.go b/pkg/nsx/util/utils_test.go index 97185520f..589412da0 100644 --- a/pkg/nsx/util/utils_test.go +++ b/pkg/nsx/util/utils_test.go @@ -47,7 +47,6 @@ func TestHttpErrortoNSXError(t *testing.T) { err = httpErrortoNSXError(&testdatas[4]) e4, ok := err.(ManagerError) assert.True(ok, fmt.Sprintf("Transform error : %v", e4)) - } func TestInitErrorFromResponse(t *testing.T) { @@ -148,12 +147,12 @@ func TestUtil_InitErrorFromResponse(t *testing.T) { assert.Equal(t, result, false) result = ShouldRetry(err) assert.Equal(t, result, true) - } func TestUtil_setDetail(t *testing.T) { nsxerr := CreateCannotConnectToServer() - detail := ErrorDetail{ErrorCode: 287, + detail := ErrorDetail{ + ErrorCode: 287, StatusCode: 400, RelatedErrorCodes: []int{123, 222}, RelatedStatusCodes: []string{"error1", "erro2"}, diff --git a/pkg/util/utils.go b/pkg/util/utils.go index 66859e899..e4a8053b9 100644 --- a/pkg/util/utils.go +++ b/pkg/util/utils.go @@ -4,13 +4,22 @@ package util import ( + "context" "crypto/sha1" "fmt" "strings" mapset "github.com/deckarep/golang-set" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" ) +const wcpSystemResource = "vmware-system-shared-t1" + +var log = logf.Log.WithName("pkg").WithName("utils") + func NormalizeLabels(matchLabels *map[string]string) *map[string]string { newLabels := make(map[string]string) for k, v := range *matchLabels { @@ -63,3 +72,40 @@ func ToUpper(obj interface{}) string { str := fmt.Sprintf("%s", obj) return strings.ToUpper(str) } + +func IsSystemNamespace(c client.Client, ns string, obj *v1.Namespace) (bool, error) { + nsObj := &v1.Namespace{} + if obj != nil { + nsObj = obj + } else if err := c.Get(context.Background(), types.NamespacedName{Namespace: ns, Name: ns}, nsObj); err != nil { + return false, client.IgnoreNotFound(err) + } + if isSysNs, ok := nsObj.Annotations[wcpSystemResource]; ok && strings.ToLower(isSysNs) == "true" { + return true, nil + } + return false, nil +} + +// CheckPodHasNamedPort checks if the pod has a named port, it filters the pod events +// we don't want give concern. +func CheckPodHasNamedPort(pod v1.Pod, reason string) bool { + for _, container := range pod.Spec.Containers { + for _, port := range container.Ports { + if port.Name != "" { + log.V(1).Info(fmt.Sprintf("%s pod %s has a named port %s", reason, pod.Name, port.Name)) + return true + } + } + } + log.V(1).Info(fmt.Sprintf("%s pod %s has no named port", reason, pod.Name)) + return false +} + +func Contains(s []string, str string) bool { + for _, v := range s { + if v == str { + return true + } + } + return false +} diff --git a/pkg/util/utils_test.go b/pkg/util/utils_test.go index 83ec4dfd8..ec11dc01c 100644 --- a/pkg/util/utils_test.go +++ b/pkg/util/utils_test.go @@ -4,11 +4,16 @@ package util import ( + "context" "fmt" "strings" "testing" "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client/fake" ) func TestSha1(t *testing.T) { @@ -63,3 +68,144 @@ func TestNormalizeLabels(t *testing.T) { }) } } + +func TestUtil_IsNsInSystemNamespace(t *testing.T) { + client := fake.NewClientBuilder().Build() + ctx := context.TODO() + dummyNs := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "dummy"}} + client.Create(ctx, dummyNs) + ns := types.NamespacedName{Namespace: "dummy", Name: "dummy"} + + isCRInSysNs, err := IsSystemNamespace(client, ns.Namespace, nil) + if err != nil { + t.Fatalf(err.Error()) + } + if isCRInSysNs { + t.Fatalf("Non-system namespace identied as a system namespace") + } + client.Delete(ctx, dummyNs) + + sysNs := &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "sys-ns", + Namespace: "sys-ns", + Annotations: map[string]string{"vmware-system-shared-t1": "true"}, + }, + } + client.Create(ctx, sysNs) + ns = types.NamespacedName{Namespace: "sys-ns", Name: "dummy"} + + isCRInSysNs, err = IsSystemNamespace(client, ns.Namespace, nil) + + if err != nil { + t.Fatalf(err.Error()) + } + if !isCRInSysNs { + t.Fatalf("System namespace not identied as a system namespace") + } + client.Delete(ctx, sysNs) +} + +func Test_CheckPodHasNamedPort(t *testing.T) { + pod := v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "test-namespace", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Ports: []v1.ContainerPort{ + {Name: "test-port", ContainerPort: 8080}, + }, + }, + }, + }, + } + pod3 := v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod-3", + Namespace: "test", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Ports: []v1.ContainerPort{}, + }, + }, + }, + } + type args struct { + pod *v1.Pod + } + tests := []struct { + name string + args args + want bool + }{ + {"1", args{&pod}, true}, + {"3", args{&pod3}, false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, CheckPodHasNamedPort(*tt.args.pod, ""), "checkPodHasNamedPort(%v)", tt.args.pod) + }) + } +} + +func TestToUpper(t *testing.T) { + type args struct { + obj interface{} + } + tests := []struct { + name string + args args + want string + }{ + {"1", args{"test"}, "TEST"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, ToUpper(tt.args.obj), "ToUpper(%v)", tt.args.obj) + }) + } +} + +func TestContains(t *testing.T) { + type args struct { + s []string + str string + } + tests := []struct { + name string + args args + want bool + }{ + {"1", args{[]string{"test", "test2"}, "test"}, true}, + {"2", args{[]string{"test2", "test3"}, "test"}, false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, Contains(tt.args.s, tt.args.str), "Contains(%v, %v)", tt.args.s, tt.args.str) + assert.Equal(t, tt.want, Contains(tt.args.s, tt.args.str), "Contains(%v, %v)", tt.args.s, tt.args.str) + }) + } +} + +func TestRemoveDuplicateStr(t *testing.T) { + type args struct { + strSlice []string + } + tests := []struct { + name string + args args + want []string + }{ + {"1", args{[]string{"test", "test", "test"}}, []string{"test"}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, RemoveDuplicateStr(tt.args.strSlice), "RemoveDuplicateStr(%v)", tt.args.strSlice) + }) + } +}