diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ac5c259e0..c63a73ec9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -9,7 +9,8 @@ concurrency: jobs: verify: name: verify - runs-on: [self-hosted, ecs] + runs-on: ubuntu-22.04 +# runs-on: [self-hosted, ecs] if: ${{ always() }} env: #GOPATH: ${{ github.workspace }} @@ -27,6 +28,8 @@ jobs: uses: actions/setup-go@v4 with: go-version: '1.20' + - name: Set up GOPROXY for Go + run: go env -w GOPROXY=https://goproxy.cn,direct - name: lint run: hack/verify-staticcheck.sh - name: vendor diff --git a/.github/workflows/release_binaries.yml b/.github/workflows/release_binaries.yml index 049640223..461f15544 100644 --- a/.github/workflows/release_binaries.yml +++ b/.github/workflows/release_binaries.yml @@ -30,6 +30,9 @@ jobs: - name: Install Go uses: actions/setup-go@v3 + - name: Set up GOPROXY for Go + run: go env -w GOPROXY=https://goproxy.cn,direct + # step3: make binaries - name: Make binaries run: make release diff --git a/.gitignore b/.gitignore index 112c4f5f6..c74279251 100644 --- a/.gitignore +++ b/.gitignore @@ -36,4 +36,6 @@ cmd/kubenest/node-agent/cert.pem cmd/kubenest/node-agent/key.pem cmd/kubenest/node-agent/agent.env hack/k8s-in-k8s/nodes.txt -develop \ No newline at end of file +develop + +cmd/kubenest/node-agent/app/client/app.log diff --git a/.golangci.yml b/.golangci.yml index e217edb77..e74eab63c 100755 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,25 +1,33 @@ run: timeout: 10m modules-download-mode: vendor + skip-dirs: + - (^|/)vendor($|/) + - pkg/utils/lifted + - pkg/scheduler/lifted linters: disable-all: true enable: - - whitespace - - bodyclose - - dupl - - errcheck - - gci + # linters maintained by golang.org - gofmt + - govet - goimports - - misspell - - unused + # linters default enabled by golangci-lint . + - errcheck + - gosimple - typecheck - staticcheck - - gosimple - - govet - ineffassign - - typecheck + - unused + # other linters supported by golangci-lint. + - gci + - misspell + - bodyclose + - gocyclo - gosec + - dupl + - revive + - whitespace linters-settings: goimports: local-prefixes: github.com/kosmos.io/kosmos @@ -31,6 +39,9 @@ linters-settings: - Standard - Default - Prefix(github.com/kosmos.io/kosmos) + gocyclo: + # minimal cyclomatic complexity to report + min-complexity: 40 # The recommended value is 15 output: sort-results: true diff --git a/Makefile b/Makefile index 0971fd4ff..046f2434b 100644 --- a/Makefile +++ b/Makefile @@ -151,7 +151,7 @@ lint-fix: golangci-lint golangci-lint: ifeq (, $(shell which golangci-lint)) - GO111MODULE=on go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.54.2 + GO111MODULE=on go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.55.2 GOLANGLINT_BIN=$(shell go env GOPATH)/bin/golangci-lint else GOLANGLINT_BIN=$(shell which golangci-lint) diff --git a/cmd/clusterlink/agent/app/agent.go b/cmd/clusterlink/agent/app/agent.go index 68447f5c2..6f8d5132a 100644 --- a/cmd/clusterlink/agent/app/agent.go +++ b/cmd/clusterlink/agent/app/agent.go @@ -37,10 +37,7 @@ func NewAgentCommand(ctx context.Context) *cobra.Command { if errs := opts.Validate(); len(errs) != 0 { return errs.ToAggregate() } - if err := run(ctx, opts); err != nil { - return err - } - return nil + return run(ctx, opts) }, Args: func(cmd *cobra.Command, args []string) error { for _, arg := range args { diff --git a/cmd/clusterlink/clusterlink-operator/app/operator.go b/cmd/clusterlink/clusterlink-operator/app/operator.go index e61bef7e6..721f02bf4 100644 --- a/cmd/clusterlink/clusterlink-operator/app/operator.go +++ b/cmd/clusterlink/clusterlink-operator/app/operator.go @@ -36,10 +36,7 @@ func NewLinkOperatorCommand(ctx context.Context) *cobra.Command { if errs := opts.Validate(); len(errs) != 0 { return errs.ToAggregate() } - if err := Run(ctx, opts); err != nil { - return err - } - return nil + return Run(ctx, opts) }, Args: func(cmd *cobra.Command, args []string) error { for _, arg := range args { diff --git a/cmd/clusterlink/controller-manager/app/controller-manager.go b/cmd/clusterlink/controller-manager/app/controller-manager.go index f7b83b10d..b01a602fd 100644 --- a/cmd/clusterlink/controller-manager/app/controller-manager.go +++ b/cmd/clusterlink/controller-manager/app/controller-manager.go @@ -110,7 +110,7 @@ func Run(ctx context.Context, opts *options.ControllerManagerOptions) error { return nil } -func setupControllers(mgr ctrl.Manager, opts *options.ControllerManagerOptions, ctx context.Context) []ctrlcontext.CleanFunc { +func setupControllers(ctx context.Context, mgr ctrl.Manager, opts *options.ControllerManagerOptions) []ctrlcontext.CleanFunc { controlPanelConfig, err := clientcmd.BuildConfigFromFlags("", opts.ControlPanelConfig) if err != nil { klog.Fatalf("build controlpanel config err: %v", err) diff --git a/cmd/clusterlink/controller-manager/app/controllerstarter.go b/cmd/clusterlink/controller-manager/app/controllerstarter.go index e47dd923b..d5e3ebd43 100644 --- a/cmd/clusterlink/controller-manager/app/controllerstarter.go +++ b/cmd/clusterlink/controller-manager/app/controllerstarter.go @@ -98,7 +98,7 @@ func (c *Controller) OnAdd(obj interface{}) { } // OnUpdate handles object update event and push the object to queue. -func (c *Controller) OnUpdate(oldObj, newObj interface{}) { +func (c *Controller) OnUpdate(_, newObj interface{}) { runtimeObj, ok := newObj.(runtime.Object) if !ok { return @@ -111,7 +111,7 @@ func (c *Controller) OnDelete(obj interface{}) { c.OnAdd(obj) } -func (c *Controller) Reconcile(key lifted.QueueKey) error { +func (c *Controller) Reconcile(_ lifted.QueueKey) error { cluster, err := c.clusterLister.Get(c.opts.ClusterName) if err != nil { return err @@ -180,7 +180,7 @@ func (c *Controller) removeFinalizer(cluster *clusterlinkv1alpha1.Cluster) error } func (c *Controller) setupControllers() { subCtx, cancelFunc := context.WithCancel(c.ctx) - cleanFuns := setupControllers(c.mgr, c.opts, subCtx) + cleanFuns := setupControllers(subCtx, c.mgr, c.opts) c.cancelFunc = cancelFunc c.cleanFuncs = cleanFuns } diff --git a/cmd/clusterlink/controller-manager/app/core.go b/cmd/clusterlink/controller-manager/app/core.go index 9b7fc856d..86379037a 100644 --- a/cmd/clusterlink/controller-manager/app/core.go +++ b/cmd/clusterlink/controller-manager/app/core.go @@ -81,7 +81,7 @@ func startCalicoPoolController(ctx ctrlcontext.Context) (bool, ctrlcontext.Clean func startNodeCIDRController(ctx ctrlcontext.Context) (bool, ctrlcontext.CleanFunc, error) { mgr := ctx.Mgr - nodeCIDRCtl := nodecidr.NewNodeCIDRController(mgr.GetConfig(), ctx.Opts.ClusterName, ctx.ClusterLinkClient, ctx.Opts.RateLimiterOpts, ctx.Ctx) + nodeCIDRCtl := nodecidr.NewNodeCIDRController(ctx.Ctx, mgr.GetConfig(), ctx.Opts.ClusterName, ctx.ClusterLinkClient, ctx.Opts.RateLimiterOpts) if err := mgr.Add(nodeCIDRCtl); err != nil { klog.Fatalf("Failed to setup node CIDR Controller: %v", err) return true, nil, nil diff --git a/cmd/clusterlink/elector/app/elector.go b/cmd/clusterlink/elector/app/elector.go index e805615f1..670dc2b03 100644 --- a/cmd/clusterlink/elector/app/elector.go +++ b/cmd/clusterlink/elector/app/elector.go @@ -37,10 +37,7 @@ func NewElectorCommand(ctx context.Context) *cobra.Command { if errs := opts.Validate(); len(errs) != 0 { return errs.ToAggregate() } - if err := run(ctx, opts); err != nil { - return err - } - return nil + return run(ctx, opts) }, Args: func(cmd *cobra.Command, args []string) error { for _, arg := range args { diff --git a/cmd/clusterlink/floater/app/floater.go b/cmd/clusterlink/floater/app/floater.go index 52534a01c..a56ed7838 100644 --- a/cmd/clusterlink/floater/app/floater.go +++ b/cmd/clusterlink/floater/app/floater.go @@ -33,10 +33,7 @@ func NewFloaterCommand(ctx context.Context) *cobra.Command { if errs := opts.Validate(); len(errs) != 0 { return errs.ToAggregate() } - if err := Run(ctx, opts); err != nil { - return err - } - return nil + return Run(ctx, opts) }, Args: func(cmd *cobra.Command, args []string) error { for _, arg := range args { diff --git a/cmd/clusterlink/floater/app/options/options.go b/cmd/clusterlink/floater/app/options/options.go index d4109755c..c70947421 100644 --- a/cmd/clusterlink/floater/app/options/options.go +++ b/cmd/clusterlink/floater/app/options/options.go @@ -19,5 +19,5 @@ func NewOptions() *Options { } // AddFlags adds flags of agent to the specified FlagSet -func (o *Options) AddFlags(fs *pflag.FlagSet) { +func (o *Options) AddFlags(_ *pflag.FlagSet) { } diff --git a/cmd/clusterlink/network-manager/app/manager.go b/cmd/clusterlink/network-manager/app/manager.go index 5c630082d..ef46bba25 100644 --- a/cmd/clusterlink/network-manager/app/manager.go +++ b/cmd/clusterlink/network-manager/app/manager.go @@ -26,10 +26,7 @@ func NewNetworkManagerCommand(ctx context.Context) *cobra.Command { if errs := opts.Validate(); len(errs) != 0 { return errs.ToAggregate() } - if err := run(ctx, opts); err != nil { - return err - } - return nil + return run(ctx, opts) }, } diff --git a/cmd/clusterlink/proxy/app/clusterlink-proxy.go b/cmd/clusterlink/proxy/app/clusterlink-proxy.go index 654c2e46e..85433b9c6 100644 --- a/cmd/clusterlink/proxy/app/clusterlink-proxy.go +++ b/cmd/clusterlink/proxy/app/clusterlink-proxy.go @@ -26,10 +26,7 @@ func NewClusterLinkProxyCommand(ctx context.Context) *cobra.Command { return errs.ToAggregate() } */ - if err := run(ctx, opts); err != nil { - return err - } - return nil + return run(ctx, opts) }, Args: func(cmd *cobra.Command, args []string) error { for _, arg := range args { diff --git a/cmd/clusterlink/proxy/app/options/options.go b/cmd/clusterlink/proxy/app/options/options.go index ba13a74a2..6c63f6e16 100644 --- a/cmd/clusterlink/proxy/app/options/options.go +++ b/cmd/clusterlink/proxy/app/options/options.go @@ -166,9 +166,5 @@ func (o *Options) genericOptionsApplyTo(config *genericapiserver.RecommendedConf if err := o.CoreAPI.ApplyTo(config); err != nil { return err } - if err := o.Admission.ApplyTo(&config.Config, config.SharedInformerFactory, config.ClientConfig, o.FeatureGate); err != nil { - return err - } - - return nil + return o.Admission.ApplyTo(&config.Config, config.SharedInformerFactory, config.ClientConfig, o.FeatureGate) } diff --git a/cmd/clustertree/cluster-manager/app/manager.go b/cmd/clustertree/cluster-manager/app/manager.go index 19435289a..0e7f4651f 100644 --- a/cmd/clustertree/cluster-manager/app/manager.go +++ b/cmd/clustertree/cluster-manager/app/manager.go @@ -45,10 +45,7 @@ func NewClusterManagerCommand(ctx context.Context) (*cobra.Command, error) { if errs := opts.Validate(); len(errs) != 0 { return errs.ToAggregate() } - if err := leaderElectionRun(ctx, opts); err != nil { - return err - } - return nil + return leaderElectionRun(ctx, opts) }, } diff --git a/cmd/kubenest/node-agent/app/client/client.go b/cmd/kubenest/node-agent/app/client/client.go index 9f3ec64f3..932832f19 100644 --- a/cmd/kubenest/node-agent/app/client/client.go +++ b/cmd/kubenest/node-agent/app/client/client.go @@ -67,7 +67,7 @@ var ( operation string // operation for client to execute ) -func cmdCheckRun(cmd *cobra.Command, args []string) error { +func cmdCheckRun(cmd *cobra.Command, _ []string) error { if len(params) != 1 { log.Errorf("port list is required and port list size must not be greater than 1") return fmt.Errorf("port list is required and port list size must not be greater than 1") @@ -143,7 +143,7 @@ func init() { ClientCmd.AddCommand(checkCmd) } -func cmdTtyRun(cmd *cobra.Command, args []string) error { +func cmdTtyRun(cmd *cobra.Command, _ []string) error { auth, err := getAuth(cmd) if err != nil { return err @@ -242,7 +242,7 @@ func connectTty(wsURL string, headers http.Header) error { } } -func cmdCmdRun(cmd *cobra.Command, args []string) error { +func cmdCmdRun(cmd *cobra.Command, _ []string) error { if len(operation) == 0 { log.Errorf("operation is required") return fmt.Errorf("operation is required") @@ -255,7 +255,7 @@ func cmdCmdRun(cmd *cobra.Command, args []string) error { return executeWebSocketCommand(auth) } -func cmdUploadRun(cmd *cobra.Command, args []string) error { +func cmdUploadRun(cmd *cobra.Command, _ []string) error { auth, err := getAuth(cmd) if err != nil { return err @@ -316,7 +316,7 @@ func uploadFile(filePath, fileName, auth string) error { defer wg.Done() wsURL := fmt.Sprintf("wss://%s/upload/?file_name=%s&file_path=%s", addr, url.QueryEscape(filepath.Base(fileName)), url.QueryEscape(filePath)) fmt.Println("Uploading file:", fileName, "from", filePath, "to", addr) - err := connectAndSendFile(wsURL, headers, filePath, fileName) + err := connectAndSendFile(wsURL, headers, fileName) if err != nil { log.Errorf("failed to upload file: %v on %s: %v\n", err, addr, fileName) } @@ -344,7 +344,7 @@ func connectAndHandleMessages(wsURL string, headers http.Header) error { return nil } -func connectAndSendFile(wsURL string, headers http.Header, filePath, fileName string) error { +func connectAndSendFile(wsURL string, headers http.Header, fileName string) error { ws, resp, err := dialer.Dial(wsURL, headers) if err != nil { return fmt.Errorf("WebSocket dial error: %v", err) diff --git a/cmd/kubenest/node-agent/app/client/client_test.go b/cmd/kubenest/node-agent/app/client/client_test.go index 737990443..ff83f7dd0 100644 --- a/cmd/kubenest/node-agent/app/client/client_test.go +++ b/cmd/kubenest/node-agent/app/client/client_test.go @@ -45,7 +45,7 @@ func init() { time.Sleep(10 * time.Second) } -func TestCmd(t *testing.T) { +func TestCmd(_ *testing.T) { fmt.Println("Command test") command := url.QueryEscape("ls -l") ws, resp, err := dialer.Dial("wss://"+testAddr+"/cmd/?command="+command, headers) @@ -59,7 +59,7 @@ func TestCmd(t *testing.T) { handleMessages(ws) } -func TestUpload(t *testing.T) { +func TestUpload(_ *testing.T) { fmt.Println("Upload file test") fileName := url.QueryEscape("app.go") filePath := url.QueryEscape("/tmp/websocket") @@ -76,7 +76,7 @@ func TestUpload(t *testing.T) { handleMessages(ws) } -func TestShellScript(t *testing.T) { +func TestShellScript(_ *testing.T) { fmt.Println("Shell script test") ws, resp, err := dialer.Dial("wss://"+testAddr+"/sh/?args=10&&args=10", headers) @@ -91,7 +91,7 @@ func TestShellScript(t *testing.T) { handleMessages(ws) } -func TestPyScript(t *testing.T) { +func TestPyScript(_ *testing.T) { fmt.Println("Python script test") ws, resp, err := dialer.Dial("wss://"+testAddr+"/py/?args=10&&args=10", headers) if err != nil { diff --git a/cmd/kubenest/node-agent/app/serve/serve.go b/cmd/kubenest/node-agent/app/serve/serve.go index c3047d5c2..b7b4045f0 100644 --- a/cmd/kubenest/node-agent/app/serve/serve.go +++ b/cmd/kubenest/node-agent/app/serve/serve.go @@ -54,7 +54,7 @@ func init() { ServeCmd.PersistentFlags().StringVarP(&keyFile, "key", "k", "key.pem", "SSL key file") } -func serveCmdRun(cmd *cobra.Command, args []string) error { +func serveCmdRun(_ *cobra.Command, _ []string) error { user := viper.GetString("WEB_USER") password := viper.GetString("WEB_PASS") if len(user) == 0 || len(password) == 0 { diff --git a/cmd/kubenest/operator/app/operator.go b/cmd/kubenest/operator/app/operator.go index 42a3ddab5..35d38067c 100644 --- a/cmd/kubenest/operator/app/operator.go +++ b/cmd/kubenest/operator/app/operator.go @@ -38,10 +38,7 @@ func NewVirtualClusterOperatorCommand(ctx context.Context) *cobra.Command { Use: "virtual-cluster-operator", Long: `create virtual kubernetes control plane with VirtualCluster`, RunE: func(cmd *cobra.Command, args []string) error { - if err := runCommand(ctx, opts); err != nil { - return err - } - return nil + return runCommand(ctx, opts) }, } @@ -89,7 +86,7 @@ func SetupConfig(opts *options.Options) (*config.Config, error) { ko.KubeInKubeConfig.ETCDStorageClass = opts.DeprecatedOptions.KubeInKubeConfig.ETCDStorageClass ko.KubeInKubeConfig.AdmissionPlugins = opts.DeprecatedOptions.KubeInKubeConfig.AdmissionPlugins ko.KubeInKubeConfig.AnpMode = opts.DeprecatedOptions.KubeInKubeConfig.AnpMode - ko.KubeInKubeConfig.ApiServerReplicas = opts.DeprecatedOptions.KubeInKubeConfig.ApiServerReplicas + ko.KubeInKubeConfig.APIServerReplicas = opts.DeprecatedOptions.KubeInKubeConfig.APIServerReplicas ko.KubeInKubeConfig.ClusterCIDR = opts.DeprecatedOptions.KubeInKubeConfig.ClusterCIDR koc = *ko @@ -125,12 +122,12 @@ func SetupConfig(opts *options.Options) (*config.Config, error) { } // TODO -func printKubeNestConfiguration(koc v1alpha1.KubeNestConfiguration) { +func printKubeNestConfiguration(_ v1alpha1.KubeNestConfiguration) { } // TODO -func fillInForDefault(c *config.Config, koc v1alpha1.KubeNestConfiguration) { +func fillInForDefault(_ *config.Config, _ v1alpha1.KubeNestConfiguration) { } @@ -201,13 +198,13 @@ func startEndPointsControllers(mgr manager.Manager) error { return fmt.Errorf("error starting %s: %v", endpointscontroller.KonnectivitySyncControllerName, err) } - ApiServerExternalSyncController := endpointscontroller.ApiServerExternalSyncController{ + APIServerExternalSyncController := endpointscontroller.APIServerExternalSyncController{ Client: mgr.GetClient(), EventRecorder: mgr.GetEventRecorderFor(constants.GlobalNodeControllerName), } - if err := ApiServerExternalSyncController.SetupWithManager(mgr); err != nil { - return fmt.Errorf("error starting %s: %v", endpointscontroller.ApiServerExternalSyncControllerName, err) + if err := APIServerExternalSyncController.SetupWithManager(mgr); err != nil { + return fmt.Errorf("error starting %s: %v", endpointscontroller.APIServerExternalSyncControllerName, err) } return nil diff --git a/cmd/kubenest/operator/app/options/options.go b/cmd/kubenest/operator/app/options/options.go index 89603c161..83fe89bec 100644 --- a/cmd/kubenest/operator/app/options/options.go +++ b/cmd/kubenest/operator/app/options/options.go @@ -31,7 +31,7 @@ type KubeNestOptions struct { ForceDestroy bool AnpMode string AdmissionPlugins bool - ApiServerReplicas int + APIServerReplicas int ClusterCIDR string ETCDStorageClass string ETCDUnitSize string @@ -65,7 +65,7 @@ func (o *Options) AddFlags(flags *pflag.FlagSet) { flags.BoolVar(&o.DeprecatedOptions.KubeInKubeConfig.ForceDestroy, "kube-nest-force-destroy", false, "Force destroy the node.If it set true.If set to true, Kubernetes will not evict the existing nodes on the node when joining nodes to the tenant's control plane, but will instead force destroy.") flags.StringVar(&o.DeprecatedOptions.KubeInKubeConfig.AnpMode, "kube-nest-anp-mode", "tcp", "kube-apiserver network proxy mode, must be set to tcp or uds. uds mode the replicas for apiserver should be one, and tcp for multi apiserver replicas.") flags.BoolVar(&o.DeprecatedOptions.KubeInKubeConfig.AdmissionPlugins, "kube-nest-admission-plugins", false, "kube-apiserver network disable-admission-plugins, false for - --disable-admission-plugins=License, true for remove the --disable-admission-plugins=License flag .") - flags.IntVar(&o.DeprecatedOptions.KubeInKubeConfig.ApiServerReplicas, "kube-nest-apiserver-replicas", 1, "virtual-cluster kube-apiserver replicas. default is 2.") + flags.IntVar(&o.DeprecatedOptions.KubeInKubeConfig.APIServerReplicas, "kube-nest-apiserver-replicas", 1, "virtual-cluster kube-apiserver replicas. default is 2.") flags.StringVar(&o.DeprecatedOptions.KubeInKubeConfig.ClusterCIDR, "cluster-cidr", "10.244.0.0/16", "Used to set the cluster-cidr of kube-controller-manager and kube-proxy (configmap)") flags.StringVar(&o.DeprecatedOptions.KubeInKubeConfig.ETCDStorageClass, "etcd-storage-class", "openebs-hostpath", "Used to set the etcd storage class.") flags.StringVar(&o.DeprecatedOptions.KubeInKubeConfig.ETCDUnitSize, "etcd-unit-size", "1Gi", "Used to set the etcd unit size, each node is allocated storage of etcd-unit-size.") diff --git a/deploy/crds/kosmos.io_kubenestconfigurations.yaml b/deploy/crds/kosmos.io_kubenestconfigurations.yaml index a5b93063e..40c180383 100644 --- a/deploy/crds/kosmos.io_kubenestconfigurations.yaml +++ b/deploy/crds/kosmos.io_kubenestconfigurations.yaml @@ -77,6 +77,9 @@ spec: type: string type: array type: object + useTenantDns: + default: false + type: boolean type: object kubeNestType: type: string diff --git a/deploy/crds/kosmos.io_virtualclusters.yaml b/deploy/crds/kosmos.io_virtualclusters.yaml index aa7ff57c9..0d53fad43 100644 --- a/deploy/crds/kosmos.io_virtualclusters.yaml +++ b/deploy/crds/kosmos.io_virtualclusters.yaml @@ -95,6 +95,9 @@ spec: type: string type: array type: object + useTenantDns: + default: false + type: boolean type: object kubeconfig: description: Kubeconfig is the kubeconfig of the virtual kubernetes's diff --git a/deploy/virtual-cluster-components-manifest-cm.yaml b/deploy/virtual-cluster-components-manifest-cm.yaml index 60f69a752..857390457 100644 --- a/deploy/virtual-cluster-components-manifest-cm.yaml +++ b/deploy/virtual-cluster-components-manifest-cm.yaml @@ -5,6 +5,7 @@ data: {"name": "kube-proxy", "path": "/kosmos/manifest/kube-proxy/*.yaml"}, {"name": "calico", "path": "/kosmos/manifest/calico/*.yaml"}, {"name": "keepalived", "path": "/kosmos/manifest/keepalived/*.yaml"}, + {"name": "core-dns-tenant", "path": "/kosmos/manifest/core-dns/tenant/*.yaml"}, ] host-core-dns-components: | [ diff --git a/hack/k8s-in-k8s/g.env.sh b/hack/k8s-in-k8s/g.env.sh index 8431347b2..fd1f0d64e 100644 --- a/hack/k8s-in-k8s/g.env.sh +++ b/hack/k8s-in-k8s/g.env.sh @@ -25,7 +25,7 @@ PATH_KUBELET_CONF=. KUBELET_CONFIG_NAME= HOST_CORE_DNS=10.96.0.10 # kubeadm switch -USE_KUBEADM=true +USE_KUBEADM=false # Generate kubelet.conf TIMEOUT KUBELET_CONF_TIMEOUT=30 diff --git a/hack/verify-staticcheck.sh b/hack/verify-staticcheck.sh index 04bb30d76..077546165 100755 --- a/hack/verify-staticcheck.sh +++ b/hack/verify-staticcheck.sh @@ -5,21 +5,11 @@ set -o nounset set -o pipefail REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. -GOLANGCI_LINT_VER="v1.52.2" cd "${REPO_ROOT}" -source "hack/util.sh" +make golangci-lint -if util::cmd_exist golangci-lint ; then - echo "Using golangci-lint version:" - golangci-lint version -else - echo "Installing golangci-lint ${GOLANGCI_LINT_VER}" - # https://golangci-lint.run/usage/install/#other-ci - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin ${GOLANGCI_LINT_VER} -fi - -if golangci-lint run; then +if make lint; then echo 'Congratulations! All Go source files have passed staticcheck.' else echo # print one empty line, separate from warning messages. diff --git a/pkg/apis/kosmos/v1alpha1/defaults.go b/pkg/apis/kosmos/v1alpha1/defaults.go index 3c7a7c70e..4f116b0f8 100644 --- a/pkg/apis/kosmos/v1alpha1/defaults.go +++ b/pkg/apis/kosmos/v1alpha1/defaults.go @@ -5,6 +5,8 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" ) +// nolint:revive +// SetDefaults_ShadowDaemonSet sets the default values for the ShadowDaemonSet. func SetDefaults_ShadowDaemonSet(obj *ShadowDaemonSet) { updateStrategy := &obj.DaemonSetSpec.UpdateStrategy if updateStrategy.Type == "" { diff --git a/pkg/apis/kosmos/v1alpha1/kubenestconfiguration_types.go b/pkg/apis/kosmos/v1alpha1/kubenestconfiguration_types.go index 0ba3d5f5b..d6b39fd9d 100644 --- a/pkg/apis/kosmos/v1alpha1/kubenestconfiguration_types.go +++ b/pkg/apis/kosmos/v1alpha1/kubenestconfiguration_types.go @@ -11,11 +11,11 @@ const ( KosmosKube KubeNestType = "Kosmos in kube" ) -type ApiServerServiceType string +type APIServerServiceType string const ( - HostNetwork ApiServerServiceType = "hostNetwork" - NodePort ApiServerServiceType = "nodePort" + HostNetwork APIServerServiceType = "hostNetwork" + NodePort APIServerServiceType = "nodePort" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -51,7 +51,7 @@ type KubeInKubeConfig struct { // +optional AdmissionPlugins bool `yaml:"admissionPlugins" json:"admissionPlugins,omitempty"` // +optional - ApiServerReplicas int `yaml:"apiServerReplicas" json:"apiServerReplicas,omitempty"` + APIServerReplicas int `yaml:"apiServerReplicas" json:"apiServerReplicas,omitempty"` // +optional ClusterCIDR string `yaml:"clusterCIDR" json:"clusterCIDR,omitempty"` // +optional @@ -84,7 +84,11 @@ type KubeInKubeConfig struct { // +kubebuilder:validation:Enum=nodePort;hostNetwork // +kubebuilder:default=hostNetwork // +optional - ApiServerServiceType ApiServerServiceType `yaml:"apiServerServiceType" json:"apiServerServiceType,omitempty"` + APIServerServiceType APIServerServiceType `yaml:"apiServerServiceType" json:"apiServerServiceType,omitempty"` + + // +kubebuilder:default=false + // +optional + UseTenantDNS bool `yaml:"useTenantDNS" json:"useTenantDNS,omitempty"` } // TenantEntrypoint contains the configuration for the tenant entrypoint. diff --git a/pkg/clusterlink/agent-manager/agent_controller.go b/pkg/clusterlink/agent-manager/agent_controller.go index 835bcf0c1..0362ffcdd 100644 --- a/pkg/clusterlink/agent-manager/agent_controller.go +++ b/pkg/clusterlink/agent-manager/agent_controller.go @@ -2,6 +2,7 @@ package agent import ( "context" + "fmt" "time" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -21,6 +22,7 @@ import ( "github.com/kosmos.io/kosmos/pkg/clusterlink/controllers/node" "github.com/kosmos.io/kosmos/pkg/clusterlink/network" kosmosv1alpha1lister "github.com/kosmos.io/kosmos/pkg/generated/listers/kosmos/v1alpha1" + "github.com/kosmos.io/kosmos/pkg/utils" ) const ( @@ -93,6 +95,20 @@ func (r *Reconciler) logResult(nodeConfigSyncStatus networkmanager.NodeConfigSyn } } +func formatNodeConfig(nodeConfig *kosmosv1alpha1.NodeConfig) (*kosmosv1alpha1.NodeConfig, error) { + nodeConfigCopy := nodeConfig.DeepCopy() + + for i, route := range nodeConfigCopy.Spec.Routes { + ipNetStr, err := utils.FormatCIDR(route.CIDR) + if err != nil { + return nil, fmt.Errorf("failed to format nodeconfig route cidr, err: %s", err.Error()) + } + nodeConfigCopy.Spec.Routes[i].CIDR = ipNetStr + } + + return nodeConfigCopy, nil +} + func (r *Reconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { klog.Infof("============ agent starts to reconcile %s ============", request.NamespacedName) @@ -109,6 +125,12 @@ func (r *Reconciler) Reconcile(ctx context.Context, request reconcile.Request) ( return reconcile.Result{RequeueAfter: RequeueTime}, nil } + reconcileNodeCopy, err := formatNodeConfig(&reconcileNode) + if err != nil { + klog.Errorf("format nodeconfig %s error: %v", request.NamespacedName, err) + return reconcile.Result{RequeueAfter: RequeueTime}, nil + } + localCluster, err := r.ClusterLister.Get(r.ClusterName) if err != nil { klog.Errorf("could not get local cluster, clusterNode: %s, err: %v", r.NodeName, err) @@ -118,7 +140,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, request reconcile.Request) ( r.NetworkManager.UpdateConfig(localCluster) r.DebounceFunc(func() { - nodeConfigSyncStatus := r.NetworkManager.UpdateFromCRD(&reconcileNode) + nodeConfigSyncStatus := r.NetworkManager.UpdateFromCRD(reconcileNodeCopy) r.logResult(nodeConfigSyncStatus) }) diff --git a/pkg/clusterlink/agent-manager/agent_controller_test.go b/pkg/clusterlink/agent-manager/agent_controller_test.go new file mode 100644 index 000000000..ee38489d5 --- /dev/null +++ b/pkg/clusterlink/agent-manager/agent_controller_test.go @@ -0,0 +1,104 @@ +package agent + +import ( + "testing" + + kosmosv1alpha1 "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" +) + +func TestFormatNodeConfig(t *testing.T) { + tests := []struct { + name string + input *kosmosv1alpha1.NodeConfig + want *kosmosv1alpha1.NodeConfig + }{ + { + name: "test ipv4 and ipv6", + input: &kosmosv1alpha1.NodeConfig{ + Spec: kosmosv1alpha1.NodeConfigSpec{ + Routes: []kosmosv1alpha1.Route{ + { + CIDR: "2409:8c2f:3800:0011::0a18:0000/114", + }, + { + CIDR: "10.237.6.0/18", + }, + }, + }, + }, + want: &kosmosv1alpha1.NodeConfig{ + Spec: kosmosv1alpha1.NodeConfigSpec{ + Routes: []kosmosv1alpha1.Route{ + { + CIDR: "2409:8c2f:3800:11::a18:0/114", + }, + { + CIDR: "10.237.0.0/18", + }, + }, + }, + }, + }, + { + name: "test ipv6", + input: &kosmosv1alpha1.NodeConfig{ + Spec: kosmosv1alpha1.NodeConfigSpec{ + Routes: []kosmosv1alpha1.Route{ + { + CIDR: "2409:8c2f:3800:0011::0a18:0000/114", + }, + }, + }, + }, + want: &kosmosv1alpha1.NodeConfig{ + Spec: kosmosv1alpha1.NodeConfigSpec{ + Routes: []kosmosv1alpha1.Route{ + { + CIDR: "2409:8c2f:3800:11::a18:0/114", + }, + }, + }, + }, + }, + { + name: "test ipv4", + input: &kosmosv1alpha1.NodeConfig{ + Spec: kosmosv1alpha1.NodeConfigSpec{ + Routes: []kosmosv1alpha1.Route{ + { + CIDR: "10.237.6.0/18", + }, + }, + }, + }, + want: &kosmosv1alpha1.NodeConfig{ + Spec: kosmosv1alpha1.NodeConfigSpec{ + Routes: []kosmosv1alpha1.Route{ + { + CIDR: "10.237.0.0/18", + }, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + nodeconfig, err := formatNodeConfig(tt.input) + + if err != nil { + t.Errorf("formatNodeConfig() error = %v", err) + } + + if len(nodeconfig.Spec.Routes) != len(tt.want.Spec.Routes) { + t.Errorf("formatNodeConfig() = %v, want %v", nodeconfig.Spec.Routes, tt.want.Spec.Routes) + } + + for i := range nodeconfig.Spec.Routes { + if nodeconfig.Spec.Routes[i].CIDR != tt.want.Spec.Routes[i].CIDR { + t.Errorf("formatNodeConfig() = %v, want %v", nodeconfig.Spec.Routes[i].CIDR, tt.want.Spec.Routes[i].CIDR) + } + } + }) + } +} diff --git a/pkg/clusterlink/agent-manager/auto_detect_controller.go b/pkg/clusterlink/agent-manager/auto_detect_controller.go index 40c292506..20b3ec5a3 100644 --- a/pkg/clusterlink/agent-manager/auto_detect_controller.go +++ b/pkg/clusterlink/agent-manager/auto_detect_controller.go @@ -34,6 +34,7 @@ const ( ) const ( + // nolint:revive AUTODETECTION_METHOD_CAN_REACH = "can-reach=" ) @@ -97,6 +98,7 @@ func (r *AutoDetectReconciler) newClusterMapFunc() handler.MapFunc { } } +// nolint:revive func (r *AutoDetectReconciler) detectInterfaceName(ctx context.Context) (string, error) { var Cluster kosmosv1alpha1.Cluster @@ -221,9 +223,8 @@ func (r *AutoDetectReconciler) Reconcile(ctx context.Context, request reconcile. if err := r.Update(ctx, newClusterNode); err != nil { klog.Errorf("update clusternode %s error: %v", request.NamespacedName, err) return reconcile.Result{RequeueAfter: AutoDetectRequeueTime}, nil - } else { - klog.V(4).Infof("update clusternode interface: %s, ipv4: %s, ipv6:%s, successed!", newClusterNode.Spec.InterfaceName, newClusterNode.Spec.IP, newClusterNode.Spec.IP6) } + klog.V(4).Infof("update clusternode interface: %s, ipv4: %s, ipv6:%s, successed!", newClusterNode.Spec.InterfaceName, newClusterNode.Spec.IP, newClusterNode.Spec.IP6) } else { klog.V(4).Info("clusternode is not need to update") } diff --git a/pkg/clusterlink/clusterlink-operator/agent/agent.go b/pkg/clusterlink/clusterlink-operator/agent/agent.go index 8199abfd6..2d5e55b60 100644 --- a/pkg/clusterlink/clusterlink-operator/agent/agent.go +++ b/pkg/clusterlink/clusterlink-operator/agent/agent.go @@ -24,6 +24,7 @@ const ( ResourceName = "clusterlink-agent" ) +// nolint:revive type AgentInstaller struct { } diff --git a/pkg/clusterlink/clusterlink-operator/elector/elector.go b/pkg/clusterlink/clusterlink-operator/elector/elector.go index d22365896..5873aa5f6 100644 --- a/pkg/clusterlink/clusterlink-operator/elector/elector.go +++ b/pkg/clusterlink/clusterlink-operator/elector/elector.go @@ -22,6 +22,7 @@ const ( ResourceName = "clusterlink-elector" ) +// nolint:revive type ElectorInstaller struct { } diff --git a/pkg/clusterlink/clusterlink-operator/global/global.go b/pkg/clusterlink/clusterlink-operator/global/global.go index de2691684..021ef8411 100644 --- a/pkg/clusterlink/clusterlink-operator/global/global.go +++ b/pkg/clusterlink/clusterlink-operator/global/global.go @@ -48,7 +48,7 @@ func (i *Installer) Install(opt *option.AddonOption) error { } // Uninstall resources related to CR:cluster -func (i *Installer) Uninstall(opt *option.AddonOption) error { +func (i *Installer) Uninstall(_ *option.AddonOption) error { klog.Infof("Don't remove clusterlink namespace on cluster for test") // nsClient := opt.KubeClientSet.CoreV1().Namespaces() // if err := nsClient.Delete(context.TODO(), opt.GetSpecNamespace(), metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { diff --git a/pkg/clusterlink/clusterlink-operator/install.go b/pkg/clusterlink/clusterlink-operator/install.go index e5003b334..cb9d0535d 100644 --- a/pkg/clusterlink/clusterlink-operator/install.go +++ b/pkg/clusterlink/clusterlink-operator/install.go @@ -1,3 +1,4 @@ +// nolint:revive package clusterlink_operator import ( diff --git a/pkg/clusterlink/clusterlink-operator/manager/manager.go b/pkg/clusterlink/clusterlink-operator/manager/manager.go index bd701b250..273b8f7db 100644 --- a/pkg/clusterlink/clusterlink-operator/manager/manager.go +++ b/pkg/clusterlink/clusterlink-operator/manager/manager.go @@ -18,6 +18,7 @@ import ( kosmosutils "github.com/kosmos.io/kosmos/pkg/utils" ) +// nolint:revive type ManagerInstaller struct { } @@ -157,10 +158,7 @@ func (i *ManagerInstaller) Install(opt *option.AddonOption) error { return err } - if err := applyClusterRoleBinding(opt); err != nil { - return err - } - return nil + return applyClusterRoleBinding(opt) } // Uninstall resources related to CR:cluster diff --git a/pkg/clusterlink/clusterlink-operator/operator_controller.go b/pkg/clusterlink/clusterlink-operator/operator_controller.go index 43d05fa40..e17f3295c 100644 --- a/pkg/clusterlink/clusterlink-operator/operator_controller.go +++ b/pkg/clusterlink/clusterlink-operator/operator_controller.go @@ -1,3 +1,4 @@ +//nolint:revive package clusterlink_operator import ( diff --git a/pkg/clusterlink/clusterlink-operator/proxy/proxy.go b/pkg/clusterlink/clusterlink-operator/proxy/proxy.go index 4d7af4b8b..18eaf2817 100644 --- a/pkg/clusterlink/clusterlink-operator/proxy/proxy.go +++ b/pkg/clusterlink/clusterlink-operator/proxy/proxy.go @@ -19,6 +19,7 @@ import ( kosmosutils "github.com/kosmos.io/kosmos/pkg/utils" ) +// nolint:revive type ProxyInstaller struct { } @@ -130,11 +131,7 @@ func (i *ProxyInstaller) Install(opt *option.AddonOption) error { return err } - if err := applyService(opt); err != nil { - return err - } - - return nil + return applyService(opt) } // Uninstall resources related to CR:cluster diff --git a/pkg/clusterlink/controllers/calicoippool/calicoippool_controller.go b/pkg/clusterlink/controllers/calicoippool/calicoippool_controller.go index f3edf14da..8a11856f4 100644 --- a/pkg/clusterlink/controllers/calicoippool/calicoippool_controller.go +++ b/pkg/clusterlink/controllers/calicoippool/calicoippool_controller.go @@ -280,7 +280,7 @@ func (c *Controller) OnAdd(obj interface{}) { } // OnUpdate handles object update event and push the object to queue. -func (c *Controller) OnUpdate(oldObj, newObj interface{}) { +func (c *Controller) OnUpdate(_, newObj interface{}) { c.OnAdd(newObj) } @@ -351,9 +351,8 @@ func (c *Controller) Reconcile(key lifted.QueueKey) error { getCIDR := func(cidr string, cidrMap map[string]string) string { if c, exist := cidrMap[cidr]; exist { return c - } else { - return cidr } + return cidr } cidrMap := cluster.Spec.ClusterLinkOptions.GlobalCIDRsMap podCIDRS := cluster.Status.ClusterLinkStatus.PodCIDRs diff --git a/pkg/clusterlink/controllers/calicoippool/calicoippool_controller_test.go b/pkg/clusterlink/controllers/calicoippool/calicoippool_controller_test.go index afe75624b..40d2a079c 100644 --- a/pkg/clusterlink/controllers/calicoippool/calicoippool_controller_test.go +++ b/pkg/clusterlink/controllers/calicoippool/calicoippool_controller_test.go @@ -42,7 +42,7 @@ func (f *fakeIPPoolClient) DeleteIPPool(ipPools []*ExternalClusterIPPool) error return nil } func (f *fakeIPPoolClient) ListIPPools() ([]*ExternalClusterIPPool, []IPPool, error) { - extClusterIpPools := make([]*ExternalClusterIPPool, 0, 5) + extClusterIPPools := make([]*ExternalClusterIPPool, 0, 5) var ippools []IPPool for _, pool := range f.ippools { if strings.HasPrefix(pool.Name, utils.ExternalIPPoolNamePrefix) { @@ -52,12 +52,12 @@ func (f *fakeIPPoolClient) ListIPPools() ([]*ExternalClusterIPPool, []IPPool, er ipPool: ipType, ipType: pool.Spec.CIDR, } - extClusterIpPools = append(extClusterIpPools, extPool) + extClusterIPPools = append(extClusterIPPools, extPool) } else { ippools = append(ippools, IPPool(pool.Spec.CIDR)) } } - return extClusterIpPools, ippools, nil + return extClusterIPPools, ippools, nil } func TestSyncIPPool(t *testing.T) { diff --git a/pkg/clusterlink/controllers/cluster/cluster_controller.go b/pkg/clusterlink/controllers/cluster/cluster_controller.go index 120e454f1..77f6b34b9 100644 --- a/pkg/clusterlink/controllers/cluster/cluster_controller.go +++ b/pkg/clusterlink/controllers/cluster/cluster_controller.go @@ -189,7 +189,7 @@ func (c *Controller) Reconcile(key lifted.QueueKey) error { var serviceCIDRS []string for i := range pods { pod := pods[i] - if isApiServer(pod) { + if isAPIServer(pod) { serviceCIDRS, err = ResolveServiceCIDRs(pod) if err != nil { klog.Errorf("get %s service cidr error: %v", pod.Name, err) @@ -511,6 +511,6 @@ func validIPPool(ippool *calicov3.IPPool) bool { return !ippool.Spec.Disabled && !strings.HasPrefix(utils.ExternalIPPoolNamePrefix, ippool.Name) } -func isApiServer(pod *corev1.Pod) bool { +func isAPIServer(pod *corev1.Pod) bool { return pod.Namespace == "kube-system" && strings.HasPrefix(pod.Name, "kube-apiserver") } diff --git a/pkg/clusterlink/controllers/cluster/helper.go b/pkg/clusterlink/controllers/cluster/helper.go index 42c395225..fa188212c 100644 --- a/pkg/clusterlink/controllers/cluster/helper.go +++ b/pkg/clusterlink/controllers/cluster/helper.go @@ -23,7 +23,7 @@ const ( DataStoreType = "datastoreType" EtcdV3 = "etcdv3" - ServiceClusterIpRange = "--service-cluster-ip-range" + ServiceClusterIPRange = "--service-cluster-ip-range" ) type CalicoConfig struct { @@ -67,12 +67,12 @@ func GetCalicoClient(cluster *clusterlinkv1alpha1.Cluster) (clientv3.Interface, calicoAPIConfig := apiconfig.NewCalicoAPIConfig() calicoData := clusterConfigMap.Data - calicoJsonStr, err := json.Marshal(calicoData) + calicoJSONStr, err := json.Marshal(calicoData) if err != nil { klog.Errorf("failed to marshal cluster configmap %s to json string.", cluster.Name) return nil, err } - err = json.Unmarshal(calicoJsonStr, &calicoConfig) + err = json.Unmarshal(calicoJSONStr, &calicoConfig) if err != nil { klog.Errorf("failed to unmarshal json string to calico config, cluster configmap is %s.", cluster.Name) return nil, err @@ -121,12 +121,21 @@ func ResolveServiceCIDRs(pod *corev1.Pod) ([]string, error) { command := container.Command for j := range command { line := command[j] - if strings.HasPrefix(line, ServiceClusterIpRange) { + if strings.HasPrefix(line, ServiceClusterIPRange) { idx := strings.Index(line, "=") - serviceIpRange := line[idx+1:] - serviceCIDRS = strings.Split(serviceIpRange, ",") + serviceIPRange := line[idx+1:] + serviceCIDRS = strings.Split(serviceIPRange, ",") } } } + + for i, cidr := range serviceCIDRS { + ipNetStr, err := utils.FormatCIDR(cidr) + if err != nil { + return nil, fmt.Errorf("failed to format service cidr %s, pod name is %s, err: %s", cidr, pod.Name, err.Error()) + } + serviceCIDRS[i] = ipNetStr + } + return serviceCIDRS, nil } diff --git a/pkg/clusterlink/controllers/cluster/helper_test.go b/pkg/clusterlink/controllers/cluster/helper_test.go new file mode 100644 index 000000000..490e9d540 --- /dev/null +++ b/pkg/clusterlink/controllers/cluster/helper_test.go @@ -0,0 +1,79 @@ +package cluster + +import ( + "fmt" + "strings" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func prepareData(crds string) *corev1.Pod { + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "test-namespace", + Labels: map[string]string{ + "app": "test-app", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test-container", + Image: "test-image", + Command: []string{ + "kube-apiserver", + fmt.Sprintf("--service-cluster-ip-range=%s", crds), + "--profiling=false", + }, + }, + }, + }, + } +} + +func TestResolveServiceCIDRs(t *testing.T) { + tests := []struct { + name string + input *corev1.Pod + want []string + }{ + { + name: "test ipv4 and ipv6", + input: prepareData("2409:8c2f:3800:0011::0a18:0000/114,10.237.6.0/18"), + want: []string{ + "2409:8c2f:3800:11::a18:0/114", + "10.237.0.0/18", + }, + }, + { + name: "test ipv4", + input: prepareData("10.237.6.0/18"), + want: []string{ + "10.237.0.0/18", + }, + }, + { + name: "test ipv6", + input: prepareData("2409:8c2f:3800:0011::0a18:0000/114"), + want: []string{ + "2409:8c2f:3800:11::a18:0/114", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ret, err := ResolveServiceCIDRs(tt.input) + if err != nil { + t.Fatalf("ResolveServiceCIDRs err: %s", err.Error()) + } + + if strings.Join(ret, ",") != strings.Join(tt.want, ",") { + t.Fatalf("value is incorretc!") + } + }) + } +} diff --git a/pkg/clusterlink/controllers/node/node_controller.go b/pkg/clusterlink/controllers/node/node_controller.go index 68a230aed..409451795 100644 --- a/pkg/clusterlink/controllers/node/node_controller.go +++ b/pkg/clusterlink/controllers/node/node_controller.go @@ -135,9 +135,8 @@ func CreateOrUpdateClusterNode(client versioned.Interface, node *clusterlinkv1al _, err := client.KosmosV1alpha1().ClusterNodes().Create(context.Background(), node, metav1.CreateOptions{}) if err != nil { return err - } else { - return nil } + return nil } if err := f(clusterNode); err != nil { return err diff --git a/pkg/clusterlink/controllers/nodecidr/adapter.go b/pkg/clusterlink/controllers/nodecidr/adapter.go index fff0744cb..8f8105d70 100644 --- a/pkg/clusterlink/controllers/nodecidr/adapter.go +++ b/pkg/clusterlink/controllers/nodecidr/adapter.go @@ -38,6 +38,7 @@ type commonAdapter struct { processor lifted.AsyncWorker } +// nolint:revive func NewCommonAdapter(config *rest.Config, clusterNodeLister clusterlister.ClusterNodeLister, processor lifted.AsyncWorker) *commonAdapter { @@ -97,7 +98,7 @@ func (c *commonAdapter) OnAdd(obj interface{}) { } // OnUpdate handles object update event and push the object to queue. -func (c *commonAdapter) OnUpdate(oldObj, newObj interface{}) { +func (c *commonAdapter) OnUpdate(_, newObj interface{}) { runtimeObj, ok := newObj.(*corev1.Node) if !ok { return @@ -122,6 +123,7 @@ type calicoAdapter struct { processor lifted.AsyncWorker } +// nolint:revive func NewCalicoAdapter(config *rest.Config, clusterNodeLister clusterlister.ClusterNodeLister, processor lifted.AsyncWorker) *calicoAdapter { @@ -217,7 +219,7 @@ func (c *calicoAdapter) OnAdd(obj interface{}) { } // OnUpdate handles object update event and push the object to queue. -func (c *calicoAdapter) OnUpdate(oldObj, newObj interface{}) { +func (c *calicoAdapter) OnUpdate(_, newObj interface{}) { klog.V(7).Info("update event") runtimeObj, ok := newObj.(*unstructured.Unstructured) if !ok { diff --git a/pkg/clusterlink/controllers/nodecidr/nodecidr_controller.go b/pkg/clusterlink/controllers/nodecidr/nodecidr_controller.go index 87195187d..4e42761e3 100644 --- a/pkg/clusterlink/controllers/nodecidr/nodecidr_controller.go +++ b/pkg/clusterlink/controllers/nodecidr/nodecidr_controller.go @@ -52,7 +52,7 @@ type Controller struct { ctx context.Context } -func NewNodeCIDRController(config *rest.Config, clusterName string, clusterLinkClient versioned.Interface, RateLimiterOptions lifted.RateLimitOptions, context context.Context) *Controller { +func NewNodeCIDRController(context context.Context, config *rest.Config, clusterName string, clusterLinkClient versioned.Interface, RateLimiterOptions lifted.RateLimitOptions) *Controller { return &Controller{ clusterLinkClient: clusterLinkClient, config: config, @@ -232,7 +232,7 @@ func (c *Controller) OnAdd(obj interface{}) { } // OnUpdate handles object update event and push the object to queue. -func (c *Controller) OnUpdate(oldObj, newObj interface{}) { +func (c *Controller) OnUpdate(_, newObj interface{}) { c.OnAdd(newObj) } @@ -241,7 +241,7 @@ func (c *Controller) OnDelete(obj interface{}) { c.OnAdd(obj) } -func (c *Controller) EventFilter(obj interface{}) bool { +func (c *Controller) EventFilter(_ interface{}) bool { //todo return true } diff --git a/pkg/clusterlink/network-manager/controller.go b/pkg/clusterlink/network-manager/controller.go index 50de9a4c9..8f7bfd283 100644 --- a/pkg/clusterlink/network-manager/controller.go +++ b/pkg/clusterlink/network-manager/controller.go @@ -1,3 +1,4 @@ +// nolint:revive package network_manager import ( diff --git a/pkg/clusterlink/network-manager/handlers/nodeconfig.go b/pkg/clusterlink/network-manager/handlers/nodeconfig.go index e352c6671..00509dda6 100644 --- a/pkg/clusterlink/network-manager/handlers/nodeconfig.go +++ b/pkg/clusterlink/network-manager/handlers/nodeconfig.go @@ -27,7 +27,7 @@ func (c *NodeConfig) ToString() string { return string(b) } -func (c *NodeConfig) ToJson() ([]byte, error) { +func (c *NodeConfig) ToJSON() ([]byte, error) { return json.Marshal(c) } diff --git a/pkg/clusterlink/network-manager/handlers/root_handler.go b/pkg/clusterlink/network-manager/handlers/root_handler.go index 9edafd693..9c03e620e 100644 --- a/pkg/clusterlink/network-manager/handlers/root_handler.go +++ b/pkg/clusterlink/network-manager/handlers/root_handler.go @@ -4,7 +4,7 @@ type RootHandler struct { Next } -func (h *RootHandler) Do(c *Context) (err error) { +func (h *RootHandler) Do(_ *Context) (err error) { return } diff --git a/pkg/clusterlink/network-manager/network_manager.go b/pkg/clusterlink/network-manager/network_manager.go index e3d2c2e89..054311bec 100644 --- a/pkg/clusterlink/network-manager/network_manager.go +++ b/pkg/clusterlink/network-manager/network_manager.go @@ -1,3 +1,4 @@ +// nolint:revive package network_manager import ( @@ -109,7 +110,7 @@ func (n *Manager) GetConfigsByNodeName(nodeName string) *handlers.NodeConfig { return n.NodeConfigs[nodeName] } -func (n *Manager) Apply(nodeName string) error { +func (n *Manager) Apply(_ string) error { return nil } diff --git a/pkg/clusterlink/network/adapter.go b/pkg/clusterlink/network/adapter.go index c8ce08b80..bcc9cd0ce 100644 --- a/pkg/clusterlink/network/adapter.go +++ b/pkg/clusterlink/network/adapter.go @@ -108,7 +108,7 @@ func (n *DefaultNetWork) UpdateArps([]clusterlinkv1alpha1.Arp) error { return ErrNotImplemented } -func (n *DefaultNetWork) UpdateFdbs(fdbs []clusterlinkv1alpha1.Fdb) error { +func (n *DefaultNetWork) UpdateFdbs(_ []clusterlinkv1alpha1.Fdb) error { return ErrNotImplemented } diff --git a/pkg/clusterlink/network/constant.go b/pkg/clusterlink/network/constant.go index baa65ad14..015ae6170 100644 --- a/pkg/clusterlink/network/constant.go +++ b/pkg/clusterlink/network/constant.go @@ -1,3 +1,4 @@ +// nolint:revive package network import ( @@ -13,13 +14,6 @@ const AutoSelectInterfaceFlag = "*" type VxlanType int -const ( - BRIDGE VxlanType = 0 - LOCAL VxlanType = 1 -) - -// type IPFamilySupport string - // const ( // IPFamilyTypeALL IPFamilySupport = "0" // IPFamilyTypeIPV4 IPFamilySupport = "1" diff --git a/pkg/clusterlink/network/device.go b/pkg/clusterlink/network/device.go index e4ae3e1f8..8c4704eb8 100644 --- a/pkg/clusterlink/network/device.go +++ b/pkg/clusterlink/network/device.go @@ -1,3 +1,4 @@ +// nolint:typecheck package network import ( @@ -53,7 +54,7 @@ func getIfaceIPByName(name string) (*IfaceInfo, error) { return devIface, nil } -func createNewVxlanIface(name string, addrIPWithMask *netlink.Addr, vxlanId int, vxlanPort int, hardwareAddr net.HardwareAddr, rIface *IfaceInfo, deviceIP string, vtepDevIndex int) error { +func createNewVxlanIface(name string, addrIPWithMask *netlink.Addr, vxlanID int, vxlanPort int, hardwareAddr net.HardwareAddr, rIface *IfaceInfo, deviceIP string, vtepDevIndex int) error { // srcAddr := rIface.ip klog.Infof("name %v ------------------------- %v", name, deviceIP) @@ -65,7 +66,7 @@ func createNewVxlanIface(name string, addrIPWithMask *netlink.Addr, vxlanId int, HardwareAddr: hardwareAddr, }, SrcAddr: net.ParseIP(deviceIP), - VxlanId: vxlanId, + VxlanId: vxlanID, Port: vxlanPort, Learning: false, VtepDevIndex: vtepDevIndex, @@ -104,6 +105,7 @@ func createNewVxlanIface(name string, addrIPWithMask *netlink.Addr, vxlanId int, } // load device info from environment +// nolint:revive func loadDevices() ([]clusterlinkv1alpha1.Device, error) { ret := []clusterlinkv1alpha1.Device{} @@ -112,9 +114,8 @@ func loadDevices() ([]clusterlinkv1alpha1.Device, error) { if err != nil { if errors.As(err, &netlink.LinkNotFoundError{}) { continue - } else { - return nil, err } + return nil, err } if vxlanIface.Type() != (&netlink.Vxlan{}).Type() { @@ -229,11 +230,7 @@ func addDevice(d clusterlinkv1alpha1.Device) error { return err } - if err := updateDeviceConfig(d.Name, family); err != nil { - return err - } - - return nil + return updateDeviceConfig(d.Name, family) } func deleteDevice(d clusterlinkv1alpha1.Device) error { @@ -257,14 +254,14 @@ func deleteDevice(d clusterlinkv1alpha1.Device) error { func updateDeviceConfig(name string, ipFamily int) error { if ipFamily == netlink.FAMILY_V6 { - if err := UpdateDefaultIp6tablesBehavior(name); err != nil { + if err := UpdateDefaultIP6tablesBehavior(name); err != nil { return err } if err := EnableDisableIPV6ByIFaceNmae(name); err != nil { return err } } else { - if err := UpdateDefaultIp4tablesBehavior(name); err != nil { + if err := UpdateDefaultIP4tablesBehavior(name); err != nil { return err } @@ -280,7 +277,7 @@ func UpdateDefaultIptablesAndKernalConfig(name string, ipFamily int) error { // ipv6 if ipFamily == netlink.FAMILY_V6 { - if err := UpdateDefaultIp6tablesBehavior(name); err != nil { + if err := UpdateDefaultIP6tablesBehavior(name); err != nil { return err } if err := EnableDisableIPV6ByIFaceNmae(name); err != nil { @@ -289,7 +286,7 @@ func UpdateDefaultIptablesAndKernalConfig(name string, ipFamily int) error { } if ipFamily == netlink.FAMILY_V4 { - if err := UpdateDefaultIp4tablesBehavior(name); err != nil { + if err := UpdateDefaultIP4tablesBehavior(name); err != nil { return err } if err := EnableLooseModeByIFaceNmae(name); err != nil { @@ -307,7 +304,7 @@ func UpdateDefaultIptablesAndKernalConfig(name string, ipFamily int) error { if len(nicName) == 0 { continue } - if err := UpdateDefaultIp4tablesBehavior(nicName); err != nil { + if err := UpdateDefaultIP4tablesBehavior(nicName); err != nil { klog.Errorf("Try to add iptables rule for %s: %v", nicName, err) } diff --git a/pkg/clusterlink/network/env.go b/pkg/clusterlink/network/env.go index d47f5fde0..9f2824eda 100644 --- a/pkg/clusterlink/network/env.go +++ b/pkg/clusterlink/network/env.go @@ -38,7 +38,7 @@ func (w *WatchDog) AddTask(path string, contents []byte) { w.WatchTasks = append(w.WatchTasks, WatchTask{Path: path, Contents: contents}) } -func (w *WatchDog) Watch(ctx context.Context) { +func (w *WatchDog) Watch(_ context.Context) { w.lock.Lock() defer w.lock.Unlock() @@ -60,7 +60,7 @@ func init() { go wait.UntilWithContext(context.Background(), watchDog.Watch, 30*time.Second) } -func UpdateDefaultIp6tablesBehavior(ifaceName string) error { +func UpdateDefaultIP6tablesBehavior(ifaceName string) error { iptableHandler, err := iptables.New(ipt.ProtocolIPv6) if err != nil { return err //nolint:wrapcheck // Let the caller wrap it @@ -75,7 +75,7 @@ func UpdateDefaultIp6tablesBehavior(ifaceName string) error { return nil } -func UpdateDefaultIp4tablesBehavior(ifaceName string) error { +func UpdateDefaultIP4tablesBehavior(ifaceName string) error { iptableHandler, err := iptables.New(ipt.ProtocolIPv4) if err != nil { return err //nolint:wrapcheck // Let the caller wrap it diff --git a/pkg/clusterlink/network/iptables.go b/pkg/clusterlink/network/iptables.go index 0d34b2b0b..99ed669e0 100644 --- a/pkg/clusterlink/network/iptables.go +++ b/pkg/clusterlink/network/iptables.go @@ -135,13 +135,11 @@ func translateChainName(key string, f bool) string { } if f { return chainMap[key] - } else { - if chainMap["PREROUTING"] == key { - return "PREROUTING" - } else { - return "POSTROUTING" - } } + if chainMap["PREROUTING"] == key { + return "PREROUTING" + } + return "POSTROUTING" } func groupByTableChain(records []IptablesRecord) map[string][]IptablesRecord { diff --git a/pkg/clusterlink/network/neigh.go b/pkg/clusterlink/network/neigh.go index bddab28de..06a96d8c1 100644 --- a/pkg/clusterlink/network/neigh.go +++ b/pkg/clusterlink/network/neigh.go @@ -12,11 +12,14 @@ import ( type NeighType int const ( + // nolint:revive NEIGH_FDB NeighType = iota + // nolint:revive NEIGH_ARP ) -var NEIGH_TYPE_MAP map[NeighType]string = map[NeighType]string{ +// nolint:revive +var NEIGH_TYPE_MAP = map[NeighType]string{ NEIGH_ARP: "arp", NEIGH_FDB: "fbd", } diff --git a/pkg/clusterlink/network/route.go b/pkg/clusterlink/network/route.go index 0ac83e772..34679c3e6 100644 --- a/pkg/clusterlink/network/route.go +++ b/pkg/clusterlink/network/route.go @@ -73,9 +73,8 @@ func loadRoutes() ([]clusterlinkv1alpha1.Route, error) { if err != nil { if errors.As(err, &netlink.LinkNotFoundError{}) { continue - } else { - return nil, err } + return nil, err } for _, r := range routes { ret = append(ret, clusterlinkv1alpha1.Route{ diff --git a/pkg/clusterlink/proxy/store/cluster_cache.go b/pkg/clusterlink/proxy/store/cluster_cache.go index 504e13193..fb3ee9993 100644 --- a/pkg/clusterlink/proxy/store/cluster_cache.go +++ b/pkg/clusterlink/proxy/store/cluster_cache.go @@ -104,7 +104,7 @@ func (c *Cache) Stop() { } } -func (c *Cache) GetResourceFromCache(ctx context.Context, gvr schema.GroupVersionResource, namespace, name string) (runtime.Object, string, error) { +func (c *Cache) GetResourceFromCache(_ context.Context, _ schema.GroupVersionResource, _, _ string) (runtime.Object, string, error) { return nil, "", nil } diff --git a/pkg/clusterlink/proxy/store/store.go b/pkg/clusterlink/proxy/store/store.go index 815de370b..f36d96c5b 100644 --- a/pkg/clusterlink/proxy/store/store.go +++ b/pkg/clusterlink/proxy/store/store.go @@ -122,7 +122,7 @@ func (s *store) Delete(context.Context, string, runtime.Object, *storage.Precond } // GuaranteedUpdate implements storage.Interface. -func (s *store) GuaranteedUpdate(ctx context.Context, key string, destination runtime.Object, ignoreNotFound bool, preconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, cachedExistingObject runtime.Object) error { +func (s *store) GuaranteedUpdate(_ context.Context, _ string, _ runtime.Object, _ bool, _ *storage.Preconditions, _ storage.UpdateFunc, _ runtime.Object) error { return fmt.Errorf("update is not suppported in proxy store") } diff --git a/pkg/clustertree/cluster-manager/cluster_controller.go b/pkg/clustertree/cluster-manager/cluster_controller.go index 5b883b315..01e17faf5 100644 --- a/pkg/clustertree/cluster-manager/cluster_controller.go +++ b/pkg/clustertree/cluster-manager/cluster_controller.go @@ -1,3 +1,4 @@ +// nolint:revive package clusterManager import ( diff --git a/pkg/clustertree/cluster-manager/controllers/common_controller.go b/pkg/clustertree/cluster-manager/controllers/common_controller.go index adaa8c651..c95979a4f 100644 --- a/pkg/clustertree/cluster-manager/controllers/common_controller.go +++ b/pkg/clustertree/cluster-manager/controllers/common_controller.go @@ -27,10 +27,16 @@ import ( const SyncResourcesRequeueTime = 10 * time.Second +// nolint:revive var SYNC_GVRS = []schema.GroupVersionResource{utils.GVR_CONFIGMAP, utils.GVR_SECRET} + +// nolint:revive var SYNC_OBJS = []client.Object{&corev1.ConfigMap{}, &corev1.Secret{}} +// nolint:revive const SYNC_KIND_CONFIGMAP = "ConfigMap" + +// nolint:revive const SYNC_KIND_SECRET = "Secret" type SyncResourcesReconciler struct { @@ -83,7 +89,7 @@ func (r *SyncResourcesReconciler) Reconcile(ctx context.Context, request reconci return reconcile.Result{}, nil } -func (r *SyncResourcesReconciler) SetupWithManager(mgr manager.Manager, gvr schema.GroupVersionResource) error { +func (r *SyncResourcesReconciler) SetupWithManager(mgr manager.Manager, _ schema.GroupVersionResource) error { if r.Client == nil { r.Client = mgr.GetClient() } @@ -99,7 +105,7 @@ func (r *SyncResourcesReconciler) SetupWithManager(mgr manager.Manager, gvr sche return true } - if err := ctrl.NewControllerManagedBy(mgr). + return ctrl.NewControllerManagedBy(mgr). Named(r.ControllerName). WithOptions(controller.Options{}). For(r.Object, builder.WithPredicates(predicate.Funcs{ @@ -116,10 +122,7 @@ func (r *SyncResourcesReconciler) SetupWithManager(mgr manager.Manager, gvr sche return false }, })). - Complete(r); err != nil { - return err - } - return nil + Complete(r) } func (r *SyncResourcesReconciler) SyncResource(ctx context.Context, request reconcile.Request, lr *leafUtils.LeafClientResource) error { diff --git a/pkg/clustertree/cluster-manager/controllers/mcs/serviceexport_controller.go b/pkg/clustertree/cluster-manager/controllers/mcs/serviceexport_controller.go index e0c4bbb43..98771d275 100644 --- a/pkg/clustertree/cluster-manager/controllers/mcs/serviceexport_controller.go +++ b/pkg/clustertree/cluster-manager/controllers/mcs/serviceexport_controller.go @@ -192,9 +192,8 @@ func (c *ServiceExportController) updateEndpointSlice(eps *discoveryv1.EndpointS } else { if apierrors.IsNotFound(getErr) { return nil - } else { - klog.Errorf("Failed to get updated endpointSlice %s/%s: %v", eps.Namespace, eps.Name, getErr) } + klog.Errorf("Failed to get updated endpointSlice %s/%s: %v", eps.Namespace, eps.Name, getErr) } return updateErr @@ -221,9 +220,8 @@ func (c *ServiceExportController) updateServiceExport(export *mcsv1alpha1.Servic } else { if apierrors.IsNotFound(getErr) { return nil - } else { - klog.Errorf("Failed to get serviceExport %s/%s: %v", export.Namespace, export.Name, getErr) } + klog.Errorf("Failed to get serviceExport %s/%s: %v", export.Namespace, export.Name, getErr) } return updateErr diff --git a/pkg/clustertree/cluster-manager/controllers/mcs/serviceimport_controller.go b/pkg/clustertree/cluster-manager/controllers/mcs/serviceimport_controller.go index 7a8e1bf63..e7d5f036e 100644 --- a/pkg/clustertree/cluster-manager/controllers/mcs/serviceimport_controller.go +++ b/pkg/clustertree/cluster-manager/controllers/mcs/serviceimport_controller.go @@ -294,9 +294,8 @@ func (c *ServiceImportController) updateEndpointSlice(eps *discoveryv1.EndpointS } else { if apierrors.IsNotFound(getErr) { return nil - } else { - klog.Errorf("Failed to get updated endpointSlice %s/%s: %v", eps.Namespace, eps.Name, getErr) } + klog.Errorf("Failed to get updated endpointSlice %s/%s: %v", eps.Namespace, eps.Name, getErr) } return updateErr @@ -360,9 +359,8 @@ func (c *ServiceImportController) createOrUpdateServiceInClient(service *corev1. if err = c.LeafClient.Create(context.TODO(), service); err != nil { klog.Errorf("Create serviceImport service(%s/%s) in client cluster %s failed, Error: %v", service.Namespace, service.Name, c.LeafNodeName, err) return err - } else { - return nil } + return nil } klog.Errorf("Get service(%s/%s) from in cluster %s failed, Error: %v", service.Namespace, service.Name, c.LeafNodeName, err) return err @@ -387,7 +385,7 @@ func (c *ServiceImportController) OnAdd(obj interface{}) { c.processor.Enqueue(runtimeObj) } -func (c *ServiceImportController) OnUpdate(old interface{}, new interface{}) { +func (c *ServiceImportController) OnUpdate(_ interface{}, new interface{}) { runtimeObj, ok := new.(runtime.Object) if !ok { return @@ -561,9 +559,8 @@ func (c *ServiceImportController) updateServiceImport(serviceImport *mcsv1alpha1 } else { if apierrors.IsNotFound(getErr) { return nil - } else { - klog.Errorf("Failed to get updated serviceImport %s/%s in cluster %s: %v", serviceImport.Namespace, serviceImport.Name, c.LeafNodeName, getErr) } + klog.Errorf("Failed to get updated serviceImport %s/%s in cluster %s: %v", serviceImport.Namespace, serviceImport.Name, c.LeafNodeName, getErr) } return updateErr diff --git a/pkg/clustertree/cluster-manager/controllers/pod/leaf_pod_controller.go b/pkg/clustertree/cluster-manager/controllers/pod/leaf_pod_controller.go index 5144e691d..9281c10e7 100644 --- a/pkg/clustertree/cluster-manager/controllers/pod/leaf_pod_controller.go +++ b/pkg/clustertree/cluster-manager/controllers/pod/leaf_pod_controller.go @@ -73,7 +73,7 @@ func (dopt *rootDeleteOption) ApplyToDelete(opt *client.DeleteOptions) { opt.GracePeriodSeconds = dopt.GracePeriodSeconds } -func NewRootDeleteOption(pod *corev1.Pod) client.DeleteOption { +func NewRootDeleteOption(_ *corev1.Pod) client.DeleteOption { // TODO //gracePeriodSeconds := pod.DeletionGracePeriodSeconds // @@ -123,9 +123,8 @@ func DeletePodInRootCluster(ctx context.Context, rootnamespacedname types.Namesp if err != nil { if apierrors.IsNotFound(err) { return nil - } else { - return err } + return err } rPodCopy := rPod.DeepCopy() diff --git a/pkg/clustertree/cluster-manager/controllers/pod/root_pod_controller.go b/pkg/clustertree/cluster-manager/controllers/pod/root_pod_controller.go index 32f603c9f..62abc3873 100644 --- a/pkg/clustertree/cluster-manager/controllers/pod/root_pod_controller.go +++ b/pkg/clustertree/cluster-manager/controllers/pod/root_pod_controller.go @@ -212,13 +212,11 @@ func (r *RootPodReconciler) Reconcile(ctx context.Context, request reconcile.Req if err := r.CreatePodInLeafCluster(ctx, lr, &rootpod, r.GlobalLeafManager.GetClusterNode(rootpod.Spec.NodeName).LeafNodeSelector); err != nil { klog.Errorf("create pod inleaf error, err: %s", err) return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil - } else { - return reconcile.Result{}, nil } - } else { - klog.Errorf("get pod in leaf error[3]: %v, %s", err, request.NamespacedName) - return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil + return reconcile.Result{}, nil } + klog.Errorf("get pod in leaf error[3]: %v, %s", err, request.NamespacedName) + return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil } // update pod in leaf @@ -410,9 +408,8 @@ func (r *RootPodReconciler) createSATokenInLeafCluster(ctx context.Context, lr * if err := r.createStorageInLeafCluster(ctx, lr, utils.GVR_SECRET, []string{rootSecretName}, pod, clusterNodeInfo); err == nil { klog.Info("create secret rootSecretName in leaf cluster success") return true, nil - } else { - return false, err } + return false, err }); err != nil { ch <- fmt.Sprintf("could not create secret token %s in leaf cluster: %v", rootSecretName, err) } @@ -499,9 +496,8 @@ func (r *RootPodReconciler) createConfigMapInLeafCluster(ctx context.Context, lr if err = wait.PollImmediate(500*time.Millisecond, 30*time.Second, func() (bool, error) { if err = r.createStorageInLeafCluster(ctx, lr, utils.GVR_CONFIGMAP, []string{configMapName}, pod, clusterNodeInfo); err == nil { return true, nil - } else { - return false, err } + return false, err }); err != nil { ch <- fmt.Sprintf("could not create configmap %s in member cluster: %v", configMapName, err) } @@ -544,9 +540,8 @@ func (r *RootPodReconciler) createSecretInLeafCluster(ctx context.Context, lr *l if err = wait.PollImmediate(500*time.Millisecond, 30*time.Second, func() (bool, error) { if err = r.createStorageInLeafCluster(ctx, lr, utils.GVR_SECRET, []string{secretName}, pod, clusterNodeInfo); err == nil { return true, nil - } else { - return false, err } + return false, err }); err != nil { ch <- fmt.Sprintf("could not create secret %s in member cluster: %v", secretName, err) } @@ -965,19 +960,17 @@ func (r *RootPodReconciler) CreatePodInLeafCluster(ctx context.Context, lr *leaf if !errors.IsAlreadyExists(createErr) { klog.V(4).Infof("Namespace %s create failed error: %v", basicPod.Namespace, createErr) return err - } else { - // namespace already existed, skip create - klog.V(4).Info("Namespace %s already existed: %v", basicPod.Namespace, createErr) } + // namespace already existed, skip create + klog.V(4).Info("Namespace %s already existed: %v", basicPod.Namespace, createErr) } } if err := r.createVolumes(ctx, lr, basicPod, clusterNodeInfo); err != nil { klog.Errorf("Creating Volumes error %+v", basicPod) return err - } else { - klog.V(4).Infof("Creating Volumes successed %+v", basicPod) } + klog.V(4).Infof("Creating Volumes successed %+v", basicPod) r.projectedHandler(ctx, lr, basicPod) diff --git a/pkg/clustertree/cluster-manager/controllers/pod/storage_handler.go b/pkg/clustertree/cluster-manager/controllers/pod/storage_handler.go index 5a61aa19e..a03a17f43 100644 --- a/pkg/clustertree/cluster-manager/controllers/pod/storage_handler.go +++ b/pkg/clustertree/cluster-manager/controllers/pod/storage_handler.go @@ -35,7 +35,7 @@ type ConfigMapHandler struct { } // BeforeGetInLeaf The name of the host cluster kube-root-ca.crt in the leaf group is master-root-ca.crt -func (c *ConfigMapHandler) BeforeGetInLeaf(ctx context.Context, r *RootPodReconciler, lr *leafUtils.LeafResource, unstructuredObj *unstructured.Unstructured, rootpod *corev1.Pod, _ *leafUtils.ClusterNode) error { +func (c *ConfigMapHandler) BeforeGetInLeaf(_ context.Context, _ *RootPodReconciler, _ *leafUtils.LeafResource, unstructuredObj *unstructured.Unstructured, _ *corev1.Pod, _ *leafUtils.ClusterNode) error { if unstructuredObj.GetName() == utils.RooTCAConfigMapName { unstructuredObj.SetName(utils.MasterRooTCAName) klog.V(4).Infof("Modify the name of the configmap for the CA: %s", utils.MasterRooTCAName) @@ -43,18 +43,18 @@ func (c *ConfigMapHandler) BeforeGetInLeaf(ctx context.Context, r *RootPodReconc return nil } -func (c *ConfigMapHandler) BeforeCreateInLeaf(ctx context.Context, r *RootPodReconciler, lr *leafUtils.LeafResource, unstructuredObj *unstructured.Unstructured, rootpod *corev1.Pod, _ *leafUtils.ClusterNode) error { +func (c *ConfigMapHandler) BeforeCreateInLeaf(_ context.Context, _ *RootPodReconciler, _ *leafUtils.LeafResource, _ *unstructured.Unstructured, _ *corev1.Pod, _ *leafUtils.ClusterNode) error { return nil } type SecretHandler struct { } -func (s *SecretHandler) BeforeGetInLeaf(ctx context.Context, r *RootPodReconciler, lr *leafUtils.LeafResource, unstructuredObj *unstructured.Unstructured, rootpod *corev1.Pod, _ *leafUtils.ClusterNode) error { +func (s *SecretHandler) BeforeGetInLeaf(_ context.Context, _ *RootPodReconciler, _ *leafUtils.LeafResource, _ *unstructured.Unstructured, _ *corev1.Pod, _ *leafUtils.ClusterNode) error { return nil } -func (s *SecretHandler) BeforeCreateInLeaf(ctx context.Context, r *RootPodReconciler, lr *leafUtils.LeafResource, unstructuredObj *unstructured.Unstructured, rootpod *corev1.Pod, _ *leafUtils.ClusterNode) error { +func (s *SecretHandler) BeforeCreateInLeaf(ctx context.Context, r *RootPodReconciler, lr *leafUtils.LeafResource, unstructuredObj *unstructured.Unstructured, _ *corev1.Pod, _ *leafUtils.ClusterNode) error { secretObj := &corev1.Secret{} err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstructuredObj.Object, secretObj) if err != nil { @@ -72,11 +72,11 @@ func (s *SecretHandler) BeforeCreateInLeaf(ctx context.Context, r *RootPodReconc type PVCHandler struct { } -func (v *PVCHandler) BeforeGetInLeaf(_ context.Context, _ *RootPodReconciler, lr *leafUtils.LeafResource, unstructuredObj *unstructured.Unstructured, rootpod *corev1.Pod, cn *leafUtils.ClusterNode) error { +func (v *PVCHandler) BeforeGetInLeaf(_ context.Context, _ *RootPodReconciler, _ *leafUtils.LeafResource, _ *unstructured.Unstructured, _ *corev1.Pod, _ *leafUtils.ClusterNode) error { return nil } -func (v *PVCHandler) BeforeCreateInLeaf(_ context.Context, _ *RootPodReconciler, lr *leafUtils.LeafResource, unstructuredObj *unstructured.Unstructured, rootpod *corev1.Pod, cn *leafUtils.ClusterNode) error { +func (v *PVCHandler) BeforeCreateInLeaf(_ context.Context, _ *RootPodReconciler, _ *leafUtils.LeafResource, unstructuredObj *unstructured.Unstructured, rootpod *corev1.Pod, cn *leafUtils.ClusterNode) error { if rootpod == nil || len(rootpod.Spec.NodeName) == 0 { return nil } diff --git a/pkg/clustertree/cluster-manager/controllers/pvc/leaf_pvc_controller.go b/pkg/clustertree/cluster-manager/controllers/pvc/leaf_pvc_controller.go index 9224b133d..ff821d3e3 100644 --- a/pkg/clustertree/cluster-manager/controllers/pvc/leaf_pvc_controller.go +++ b/pkg/clustertree/cluster-manager/controllers/pvc/leaf_pvc_controller.go @@ -146,7 +146,7 @@ func (l *LeafPVCController) SetupWithManager(mgr manager.Manager) error { Complete(l) } -func filterPVC(leafPVC *v1.PersistentVolumeClaim, nodeName string) error { +func filterPVC(leafPVC *v1.PersistentVolumeClaim, _ string) error { labelSelector := leafPVC.Spec.Selector.DeepCopy() leafPVC.Spec.Selector = nil leafPVC.ObjectMeta.UID = "" diff --git a/pkg/clustertree/cluster-manager/controllers/pvc/oneway_pvc_controller.go b/pkg/clustertree/cluster-manager/controllers/pvc/oneway_pvc_controller.go index 519903ba2..b8c3ebf49 100644 --- a/pkg/clustertree/cluster-manager/controllers/pvc/oneway_pvc_controller.go +++ b/pkg/clustertree/cluster-manager/controllers/pvc/oneway_pvc_controller.go @@ -134,7 +134,7 @@ func (c *OnewayPVCController) Reconcile(ctx context.Context, request reconcile.R return c.ensureLeafPVC(ctx, leaf, lcr, rootPVC) } -func (c *OnewayPVCController) clearLeafPVC(ctx context.Context, leaf *leafUtils.LeafResource, leafClient *leafUtils.LeafClientResource, pvc *corev1.PersistentVolumeClaim) (reconcile.Result, error) { +func (c *OnewayPVCController) clearLeafPVC(_ context.Context, _ *leafUtils.LeafResource, _ *leafUtils.LeafClientResource, _ *corev1.PersistentVolumeClaim) (reconcile.Result, error) { return reconcile.Result{}, nil } diff --git a/pkg/clustertree/cluster-manager/controllers/svc/simple_sync_endpointslice_controller.go b/pkg/clustertree/cluster-manager/controllers/svc/simple_sync_endpointslice_controller.go index fc7fe6768..3e42d9453 100644 --- a/pkg/clustertree/cluster-manager/controllers/svc/simple_sync_endpointslice_controller.go +++ b/pkg/clustertree/cluster-manager/controllers/svc/simple_sync_endpointslice_controller.go @@ -77,10 +77,9 @@ func (c *SimpleSyncEPSController) Reconcile(ctx context.Context, request reconci if apierrors.IsNotFound(err) { klog.Errorf("Service %s/%s not found,ignore it, err: %v", request.Namespace, serviceName, err) return controllerruntime.Result{}, nil - } else { - klog.Errorf("Get service %s/%s failed, err: %v", request.Namespace, serviceName, err) - return controllerruntime.Result{Requeue: true}, err } + klog.Errorf("Get service %s/%s failed, err: %v", request.Namespace, serviceName, err) + return controllerruntime.Result{Requeue: true}, err } if !hasAutoMCSAnnotation(service) && !shouldEnqueueEps(eps, c.AutoCreateMCSPrefix, c.ReservedNamespaces) { klog.V(4).Infof("Service %s/%s does not have auto mcs annotation and should not be enqueued, ignore it", request.Namespace, serviceName) @@ -293,9 +292,8 @@ func (c *SimpleSyncEPSController) updateEndpointSlice(slice *discoveryv1.Endpoin } else { if apierrors.IsNotFound(getErr) { return nil - } else { - klog.Errorf("Failed to get updated endpointSlice %s/%s: %v", eps.Namespace, eps.Name, getErr) } + klog.Errorf("Failed to get updated endpointSlice %s/%s: %v", eps.Namespace, eps.Name, getErr) } return updateErr diff --git a/pkg/clustertree/cluster-manager/controllers/svc/simple_sync_service_controller.go b/pkg/clustertree/cluster-manager/controllers/svc/simple_sync_service_controller.go index 17fad2f5f..9f8863b78 100644 --- a/pkg/clustertree/cluster-manager/controllers/svc/simple_sync_service_controller.go +++ b/pkg/clustertree/cluster-manager/controllers/svc/simple_sync_service_controller.go @@ -299,9 +299,8 @@ func (c *SimpleSyncServiceController) createOrUpdateServiceInClient(service *cor if err = leafClient.Client.Create(context.TODO(), service); err != nil { klog.Errorf("Create serviceImport service(%s/%s) in client cluster %s failed, Error: %v", service.Namespace, service.Name, leafManger.Cluster.Name, err) return err - } else { - return nil } + return nil } klog.Errorf("Get service(%s/%s) from in cluster %s failed, Error: %v", service.Namespace, service.Name, leafManger.Cluster.Name, err) return err diff --git a/pkg/clustertree/cluster-manager/extensions/daemonset/daemonset_controller.go b/pkg/clustertree/cluster-manager/extensions/daemonset/daemonset_controller.go index cde09087d..5d6fab6fd 100644 --- a/pkg/clustertree/cluster-manager/extensions/daemonset/daemonset_controller.go +++ b/pkg/clustertree/cluster-manager/extensions/daemonset/daemonset_controller.go @@ -147,7 +147,7 @@ func (dsc *DaemonSetsController) addDaemonSet(obj interface{}) { dsc.processor.Enqueue(ds) } -func (dsc *DaemonSetsController) updateDaemonSet(oldObj, newObj interface{}) { +func (dsc *DaemonSetsController) updateDaemonSet(_, newObj interface{}) { newDS := newObj.(*kosmosv1alpha1.DaemonSet) klog.V(4).Infof("Updating daemon set %s", newDS.Name) dsc.processor.Enqueue(newDS) @@ -177,7 +177,7 @@ func (dsc *DaemonSetsController) addShadowDaemonSet(obj interface{}) { dsc.processShadowDaemonSet(sds) } -func (dsc *DaemonSetsController) updateShadowDaemonSet(oldObj, newObj interface{}) { +func (dsc *DaemonSetsController) updateShadowDaemonSet(_, newObj interface{}) { newSDS := newObj.(*kosmosv1alpha1.ShadowDaemonSet) klog.V(4).Infof("updating shadow daemon set %s", newSDS.Name) dsc.processShadowDaemonSet(newSDS) @@ -189,7 +189,7 @@ func (dsc *DaemonSetsController) deleteShadowDaemonSet(obj interface{}) { dsc.processShadowDaemonSet(sds) } -func (dsc *DaemonSetsController) processCluster(cluster *kosmosv1alpha1.Cluster) { +func (dsc *DaemonSetsController) processCluster() { //TODO add should run on node logic list, err := dsc.dsLister.List(labels.Everything()) if err != nil { @@ -201,19 +201,16 @@ func (dsc *DaemonSetsController) processCluster(cluster *kosmosv1alpha1.Cluster) } } -func (dsc *DaemonSetsController) addCluster(obj interface{}) { - cluster := obj.(*kosmosv1alpha1.Cluster) - dsc.processCluster(cluster) +func (dsc *DaemonSetsController) addCluster(_ interface{}) { + dsc.processCluster() } -func (dsc *DaemonSetsController) updateCluster(old interface{}, new interface{}) { - cluster := new.(*kosmosv1alpha1.Cluster) - dsc.processCluster(cluster) +func (dsc *DaemonSetsController) updateCluster(_ interface{}, _ interface{}) { + dsc.processCluster() } -func (dsc *DaemonSetsController) deleteKNode(obj interface{}) { - cluster := obj.(*kosmosv1alpha1.Cluster) - dsc.processCluster(cluster) +func (dsc *DaemonSetsController) deleteKNode(_ interface{}) { + dsc.processCluster() } func (dsc *DaemonSetsController) syncDaemonSet(key lifted.QueueKey) error { diff --git a/pkg/clustertree/cluster-manager/extensions/daemonset/daemonset_mirror_controller.go b/pkg/clustertree/cluster-manager/extensions/daemonset/daemonset_mirror_controller.go index a4c6b8505..757dad870 100644 --- a/pkg/clustertree/cluster-manager/extensions/daemonset/daemonset_mirror_controller.go +++ b/pkg/clustertree/cluster-manager/extensions/daemonset/daemonset_mirror_controller.go @@ -181,10 +181,9 @@ func (dmc *DaemonSetsMirrorController) syncDaemonSet(key lifted.QueueKey) error return err } return nil - } else { - klog.Errorf("failed to get kosmos daemon set %v", err) - return err } + klog.Errorf("failed to get kosmos daemon set %v", err) + return err } kds := kd.DeepCopy() if !isOwnedBy(kds.OwnerReferences, ds) { @@ -229,7 +228,7 @@ func (dmc *DaemonSetsMirrorController) AddDaemonSet(obj interface{}) { dmc.processor.Enqueue(ds) } -func (dmc *DaemonSetsMirrorController) UpdateDaemonSet(old interface{}, new interface{}) { +func (dmc *DaemonSetsMirrorController) UpdateDaemonSet(_ interface{}, new interface{}) { ds, ok := new.(*appsv1.DaemonSet) if !ok { return @@ -256,6 +255,6 @@ func (dmc *DaemonSetsMirrorController) DeleteKosmosDaemonSet(obj interface{}) { dmc.ProcessKosmosDaemonSet(obj) } -func (dmc *DaemonSetsMirrorController) UpdateKosmosDaemonSet(old interface{}, new interface{}) { +func (dmc *DaemonSetsMirrorController) UpdateKosmosDaemonSet(_ interface{}, new interface{}) { dmc.ProcessKosmosDaemonSet(new) } diff --git a/pkg/clustertree/cluster-manager/extensions/daemonset/distribute_controller.go b/pkg/clustertree/cluster-manager/extensions/daemonset/distribute_controller.go index 7a492dfe9..4547eb9f0 100644 --- a/pkg/clustertree/cluster-manager/extensions/daemonset/distribute_controller.go +++ b/pkg/clustertree/cluster-manager/extensions/daemonset/distribute_controller.go @@ -47,7 +47,7 @@ type DistributeController struct { shadowDaemonSetProcessor lifted.AsyncWorker - clusterDaemonSetManagerMap map[string]*clusterDaemonSetManager + clusterDaemonSetManagerMap map[string]*ClusterDaemonSetManager rateLimiterOptions lifted.RateLimitOptions @@ -66,7 +66,7 @@ func NewDistributeController( clusterLister: clusterInformer.Lister(), shadowDaemonSetSynced: sdsInformer.Informer().HasSynced, clusterSynced: clusterInformer.Informer().HasSynced, - clusterDaemonSetManagerMap: make(map[string]*clusterDaemonSetManager), + clusterDaemonSetManagerMap: make(map[string]*ClusterDaemonSetManager), rateLimiterOptions: rateLimiterOptions, } @@ -164,11 +164,11 @@ func (dc *DistributeController) syncCluster(key lifted.QueueKey) error { // nolint:errcheck dsInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ FilterFunc: func(obj interface{}) bool { - ds, ok := obj.(*appsv1.DaemonSet) - if !ok { + ds, newOK := obj.(*appsv1.DaemonSet) + if !newOK { return false } - if _, ok := ds.Labels[ManagedLabel]; ok { + if _, ok = ds.Labels[ManagedLabel]; ok { return true } return false @@ -180,30 +180,23 @@ func (dc *DistributeController) syncCluster(key lifted.QueueKey) error { }, }) - daemonsetSynced := dsInformer.Informer().HasSynced() - manager = NewClusterDaemonSetManager( - name, - kubeClient, - dsInformer, - kubeFactory, - daemonsetSynced, - ) + manager = NewClusterDaemonSetManager(name, kubeClient, dsInformer, kubeFactory) dc.clusterDaemonSetManagerMap[cluster.Name] = manager manager.Start() } if cluster.DeletionTimestamp != nil { - list, error := manager.dsLister.List(labels.Set{ManagedLabel: ""}.AsSelector()) - if error != nil { - klog.V(2).Infof("failed to list daemonsets from cluster %s: %v", cluster.Name, error) - return error + list, newErr := manager.dsLister.List(labels.Set{ManagedLabel: ""}.AsSelector()) + if newErr != nil { + klog.V(2).Infof("failed to list daemonsets from cluster %s: %v", cluster.Name, newErr) + return newErr } for i := range list { ds := list[i] - error := manager.kubeClient.AppsV1().DaemonSets(ds.Namespace).Delete(context.Background(), ds.Name, metav1.DeleteOptions{}) + newErr = manager.kubeClient.AppsV1().DaemonSets(ds.Namespace).Delete(context.Background(), ds.Name, metav1.DeleteOptions{}) if err != nil { - klog.V(2).Infof("failed to delete daemonset %s/%s from cluster %s: %v", ds.Namespace, ds.Name, cluster.Name, error) - return error + klog.V(2).Infof("failed to delete daemonset %s/%s from cluster %s: %v", ds.Namespace, ds.Name, cluster.Name, newErr) + return newErr } } err = dc.removeClusterFinalizer(cluster) @@ -267,7 +260,7 @@ func (dc *DistributeController) syncShadowDaemonSet(key lifted.QueueKey) error { klog.Errorf("failed to ensure finalizer for shadow daemonset %s/%s: %v", namespace, name, err) return err } - copy := sds.DeepCopy() + deepCopy := sds.DeepCopy() err = manager.tryCreateOrUpdateDaemonSet(sds) if err != nil { @@ -275,18 +268,18 @@ func (dc *DistributeController) syncShadowDaemonSet(key lifted.QueueKey) error { return err } - ds, error := manager.dsLister.DaemonSets(sds.Namespace).Get(sds.Name) + ds, newErr := manager.dsLister.DaemonSets(sds.Namespace).Get(sds.Name) - if error != nil { - klog.Errorf("failed to get daemonset %s/%s: %v", namespace, name, error) - return error + if newErr != nil { + klog.Errorf("failed to get daemonset %s/%s: %v", namespace, name, newErr) + return newErr } - error = dc.updateStatus(copy, ds) + newErr = dc.updateStatus(deepCopy, ds) - if error != nil { - klog.Errorf("failed to update status for shadow daemonset %s/%s: %v", namespace, name, error) - return error + if newErr != nil { + klog.Errorf("failed to update status for shadow daemonset %s/%s: %v", namespace, name, newErr) + return newErr } return nil } @@ -367,7 +360,7 @@ func (dc *DistributeController) addCluster(obj interface{}) { dc.clusterProcessor.Enqueue(ds) } -func (dc *DistributeController) updateCluster(oldObj, newObj interface{}) { +func (dc *DistributeController) updateCluster(_, newObj interface{}) { newDS := newObj.(*v1alpha1.Cluster) dc.clusterProcessor.Enqueue(newDS) } @@ -420,7 +413,7 @@ func (dc *DistributeController) deleteShadowDaemonSet(obj interface{}) { dc.shadowDaemonSetProcessor.Enqueue(ds) } -type clusterDaemonSetManager struct { +type ClusterDaemonSetManager struct { name string kubeClient clientset.Interface @@ -436,7 +429,7 @@ type clusterDaemonSetManager struct { daemonSetSynced cache.InformerSynced } -func (km *clusterDaemonSetManager) Start() { +func (km *ClusterDaemonSetManager) Start() { km.factory.Start(km.ctx.Done()) if !cache.WaitForNamedCacheSync("distribute controller", km.ctx.Done(), km.daemonSetSynced) { klog.Errorf("failed to wait for daemonSet caches to sync") @@ -444,13 +437,13 @@ func (km *clusterDaemonSetManager) Start() { } } -func (km *clusterDaemonSetManager) Stop() { +func (km *ClusterDaemonSetManager) Stop() { if km.cancelFun != nil { km.cancelFun() } } -func (km *clusterDaemonSetManager) tryCreateOrUpdateDaemonSet(sds *v1alpha1.ShadowDaemonSet) error { +func (km *ClusterDaemonSetManager) tryCreateOrUpdateDaemonSet(sds *v1alpha1.ShadowDaemonSet) error { err := km.ensureNameSpace(sds.Namespace) if err != nil { klog.V(4).Infof("ensure namespace %s failed: %v", sds.Namespace, err) @@ -483,10 +476,9 @@ func (km *clusterDaemonSetManager) tryCreateOrUpdateDaemonSet(sds *v1alpha1.Shad return err } return nil - } else { - klog.Errorf("failed to get daemonset %s/%s: %v", sds.Namespace, sds.Name, err) - return err } + klog.Errorf("failed to get daemonset %s/%s: %v", sds.Namespace, sds.Name, err) + return err } ds.Spec.Template = sds.DaemonSetSpec.Template @@ -519,7 +511,7 @@ func (km *clusterDaemonSetManager) tryCreateOrUpdateDaemonSet(sds *v1alpha1.Shad return nil } -func (km *clusterDaemonSetManager) ensureNameSpace(namespace string) error { +func (km *ClusterDaemonSetManager) ensureNameSpace(namespace string) error { ns := &corev1.Namespace{} ns.Name = namespace _, err := km.kubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) @@ -534,7 +526,7 @@ func (km *clusterDaemonSetManager) ensureNameSpace(namespace string) error { return nil } -func (km *clusterDaemonSetManager) getDaemonSet(namespace, name string) (*appsv1.DaemonSet, error) { +func (km *ClusterDaemonSetManager) getDaemonSet(namespace, name string) (*appsv1.DaemonSet, error) { ds, err := km.dsLister.DaemonSets(namespace).Get(name) if err != nil { ds, err = km.kubeClient.AppsV1().DaemonSets(namespace).Get(context.Background(), name, metav1.GetOptions{}) @@ -546,10 +538,10 @@ func (km *clusterDaemonSetManager) getDaemonSet(namespace, name string) (*appsv1 return ds.DeepCopy(), nil } -func NewClusterDaemonSetManager(name string, client *clientset.Clientset, dsInformer appsinformers.DaemonSetInformer, factory informer.SharedInformerFactory, synced bool) *clusterDaemonSetManager { +func NewClusterDaemonSetManager(name string, client *clientset.Clientset, dsInformer appsinformers.DaemonSetInformer, factory informer.SharedInformerFactory) *ClusterDaemonSetManager { ctx := context.TODO() ctx, cancel := context.WithCancel(ctx) - return &clusterDaemonSetManager{ + return &ClusterDaemonSetManager{ name: name, kubeClient: client, dsLister: dsInformer.Lister(), diff --git a/pkg/clustertree/cluster-manager/extensions/daemonset/host_daemon_controller.go b/pkg/clustertree/cluster-manager/extensions/daemonset/host_daemon_controller.go index 7e602b33a..affdeb0d3 100644 --- a/pkg/clustertree/cluster-manager/extensions/daemonset/host_daemon_controller.go +++ b/pkg/clustertree/cluster-manager/extensions/daemonset/host_daemon_controller.go @@ -300,6 +300,7 @@ func (dsc *HostDaemonSetsController) Run(ctx context.Context, workers int) { <-ctx.Done() } +// nolint:revive func (dsc *HostDaemonSetsController) runWorker(ctx context.Context) { for dsc.processNextWorkItem(ctx) { } @@ -971,11 +972,7 @@ func (dsc *HostDaemonSetsController) manage(ctx context.Context, ds *kosmosv1alp podsToDelete = append(podsToDelete, getUnscheduledPodsWithoutNode(nodeList, nodeToDaemonPods)...) // Label new pods using the hash label value of the current history when creating them - if err = dsc.syncNodes(ctx, ds, podsToDelete, nodesNeedingDaemonPods, hash); err != nil { - return err - } - - return nil + return dsc.syncNodes(ctx, ds, podsToDelete, nodesNeedingDaemonPods, hash) } // syncNodes deletes given pods and creates new daemon set pods on the given nodes diff --git a/pkg/clustertree/cluster-manager/extensions/daemonset/pod_reflect_controller.go b/pkg/clustertree/cluster-manager/extensions/daemonset/pod_reflect_controller.go index 213d313b4..1b28dc2fe 100644 --- a/pkg/clustertree/cluster-manager/extensions/daemonset/pod_reflect_controller.go +++ b/pkg/clustertree/cluster-manager/extensions/daemonset/pod_reflect_controller.go @@ -107,7 +107,7 @@ func NewPodReflectorController(kubeClient clientset.Interface, return pc } -func (pc *PodReflectorController) Run(ctx context.Context, workers int) { +func (pc *PodReflectorController) Run(ctx context.Context, _ int) { defer utilruntime.HandleCrash() klog.Infof("Starting pod reflector controller") @@ -258,7 +258,7 @@ func (pc *PodReflectorController) addPod(obj interface{}) { pc.podProcessor.Enqueue(pod) } -func (pc *PodReflectorController) updatePod(old interface{}, new interface{}) { +func (pc *PodReflectorController) updatePod(_ interface{}, new interface{}) { pod := new.(*corev1.Pod) pc.podProcessor.Enqueue(pod) } @@ -294,14 +294,14 @@ func (pc *PodReflectorController) tryUpdateOrCreate(pod *corev1.Pod) error { return err } } - copy := shadowPod.DeepCopy() - copy.SetAnnotations(pod.Annotations) - copy.SetLabels(pod.Labels) - copy.Spec = pod.Spec - copy.Spec.NodeName = clusterName - copy.Status = pod.Status - copy.UID = "" - updated, err := pc.kubeClient.CoreV1().Pods(copy.Namespace).Update(context.Background(), copy, metav1.UpdateOptions{}) + deepCopy := shadowPod.DeepCopy() + deepCopy.SetAnnotations(pod.Annotations) + deepCopy.SetLabels(pod.Labels) + deepCopy.Spec = pod.Spec + deepCopy.Spec.NodeName = clusterName + deepCopy.Status = pod.Status + deepCopy.UID = "" + updated, err := pc.kubeClient.CoreV1().Pods(deepCopy.Namespace).Update(context.Background(), deepCopy, metav1.UpdateOptions{}) if err != nil { klog.Errorf("failed to update pod %s/%s: %v", pod.Namespace, pod.Name, err) return err diff --git a/pkg/clustertree/cluster-manager/extensions/daemonset/update.go b/pkg/clustertree/cluster-manager/extensions/daemonset/update.go index 40e75c6e6..5aa2cde3a 100644 --- a/pkg/clustertree/cluster-manager/extensions/daemonset/update.go +++ b/pkg/clustertree/cluster-manager/extensions/daemonset/update.go @@ -365,11 +365,11 @@ func (dsc *HostDaemonSetsController) dedupCurHistories(ctx context.Context, ds * }, }, } - patchJson, err := json.Marshal(patchRaw) + patchJSON, err := json.Marshal(patchRaw) if err != nil { return nil, err } - _, err = dsc.kubeClient.CoreV1().Pods(ds.Namespace).Patch(ctx, pod.Name, types.MergePatchType, patchJson, metav1.PatchOptions{}) + _, err = dsc.kubeClient.CoreV1().Pods(ds.Namespace).Patch(ctx, pod.Name, types.MergePatchType, patchJSON, metav1.PatchOptions{}) if err != nil { return nil, err } diff --git a/pkg/clustertree/cluster-manager/node-server/api/errdefs.go b/pkg/clustertree/cluster-manager/node-server/api/errdefs.go index 59c38885a..de3ce0ec7 100644 --- a/pkg/clustertree/cluster-manager/node-server/api/errdefs.go +++ b/pkg/clustertree/cluster-manager/node-server/api/errdefs.go @@ -5,6 +5,7 @@ import ( "fmt" ) +// nolint:revive const ( ERR_NOT_FOUND = "ErrNotFound" ERR_INVALID_INPUT = "ErrInvalidInput" diff --git a/pkg/clustertree/cluster-manager/node-server/api/exec.go b/pkg/clustertree/cluster-manager/node-server/api/exec.go index c5bfdfd42..11606709f 100644 --- a/pkg/clustertree/cluster-manager/node-server/api/exec.go +++ b/pkg/clustertree/cluster-manager/node-server/api/exec.go @@ -58,7 +58,7 @@ type containerExecutor struct { getClient getClientFunc } -func (c *containerExecutor) ExecInContainer(name string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remoteutils.TerminalSize, timeout time.Duration) error { +func (c *containerExecutor) ExecInContainer(_ string, _ types.UID, _ string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remoteutils.TerminalSize, _ time.Duration) error { eio := &execIO{ tty: tty, stdin: in, diff --git a/pkg/clustertree/cluster-manager/node-server/api/helper.go b/pkg/clustertree/cluster-manager/node-server/api/helper.go index fe465f1e9..a5c3c55e1 100644 --- a/pkg/clustertree/cluster-manager/node-server/api/helper.go +++ b/pkg/clustertree/cluster-manager/node-server/api/helper.go @@ -83,11 +83,6 @@ func httpStatusCode(err error) int { } } -func NotImplemented(w http.ResponseWriter, r *http.Request) { - klog.Warning("501 not implemented") - http.Error(w, "501 not implemented", http.StatusNotImplemented) -} - func NotFound(w http.ResponseWriter, r *http.Request) { klog.Warningf("404 request not found, url: %s", r.URL) http.Error(w, "404 request not found", http.StatusNotFound) diff --git a/pkg/clustertree/cluster-manager/node-server/server.go b/pkg/clustertree/cluster-manager/node-server/server.go index fc56b590e..3805c69f6 100644 --- a/pkg/clustertree/cluster-manager/node-server/server.go +++ b/pkg/clustertree/cluster-manager/node-server/server.go @@ -44,7 +44,7 @@ type NodeServer struct { GlobalLeafClientManager leafUtils.LeafClientResourceManager } -type HttpConfig struct { +type HTTPConfig struct { listenAddr string handler http.Handler tlsConfig *tls.Config @@ -77,7 +77,7 @@ func (s *NodeServer) getClient(ctx context.Context, namespace string, podName st return lcr.Clientset, lcr.RestConfig, nil } -func (s *NodeServer) RunHTTP(ctx context.Context, httpConfig HttpConfig) (func(), error) { +func (s *NodeServer) RunHTTP(_ context.Context, httpConfig HTTPConfig) (func(), error) { if httpConfig.tlsConfig == nil { klog.Warning("TLS config not provided, not starting up http service") return func() {}, nil @@ -187,7 +187,7 @@ func (s *NodeServer) Start(ctx context.Context, opts *options.Options) error { handler := http.NewServeMux() s.AttachRoutes(handler) - cancelHTTP, err := s.RunHTTP(ctx, HttpConfig{ + cancelHTTP, err := s.RunHTTP(ctx, HTTPConfig{ listenAddr: fmt.Sprintf(":%d", opts.ListenPort), tlsConfig: tlsConfig, handler: handler, diff --git a/pkg/clustertree/cluster-manager/utils/leaf_client_resource_manager.go b/pkg/clustertree/cluster-manager/utils/leaf_client_resource_manager.go index 16da61706..bca1d073c 100644 --- a/pkg/clustertree/cluster-manager/utils/leaf_client_resource_manager.go +++ b/pkg/clustertree/cluster-manager/utils/leaf_client_resource_manager.go @@ -47,9 +47,8 @@ func (cr *leafClientResourceManager) GetLeafResource(actualClusterName string) ( defer cr.leafClientResourceManagersLock.Unlock() if m, ok := cr.clientResourceMap[actualClusterName]; ok { return m, nil - } else { - return nil, fmt.Errorf("cannot get leaf client resource, actualClusterName: %s", actualClusterName) } + return nil, fmt.Errorf("cannot get leaf client resource, actualClusterName: %s", actualClusterName) } func (cr *leafClientResourceManager) AddLeafClientResource(lcr *LeafClientResource, cluster *kosmosv1alpha1.Cluster) { diff --git a/pkg/clustertree/cluster-manager/utils/leaf_model_handler.go b/pkg/clustertree/cluster-manager/utils/leaf_model_handler.go index 71bb883e4..158d94d2c 100644 --- a/pkg/clustertree/cluster-manager/utils/leaf_model_handler.go +++ b/pkg/clustertree/cluster-manager/utils/leaf_model_handler.go @@ -194,11 +194,11 @@ func updateTaints(client kubernetes.Interface, taints []corev1.Taint, nodeName s Taints: taints, }, } - patchJson, err := json.Marshal(node) + patchJSON, err := json.Marshal(node) if err != nil { return err } - _, err = client.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.MergePatchType, patchJson, metav1.PatchOptions{}) + _, err = client.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.MergePatchType, patchJSON, metav1.PatchOptions{}) if err != nil { return err } diff --git a/pkg/clustertree/cluster-manager/utils/leaf_resource_manager.go b/pkg/clustertree/cluster-manager/utils/leaf_resource_manager.go index c47b22664..6e4e8ab45 100644 --- a/pkg/clustertree/cluster-manager/utils/leaf_resource_manager.go +++ b/pkg/clustertree/cluster-manager/utils/leaf_resource_manager.go @@ -130,9 +130,8 @@ func (l *leafResourceManager) GetLeafResource(clusterName string) (*LeafResource defer l.leafResourceManagersLock.Unlock() if m, ok := l.resourceMap[clusterName]; ok { return m, nil - } else { - return nil, fmt.Errorf("cannot get leaf resource, clusterName: %s", clusterName) } + return nil, fmt.Errorf("cannot get leaf resource, clusterName: %s", clusterName) } func (l *leafResourceManager) GetLeafResourceByNodeName(nodeName string) (*LeafResource, error) { diff --git a/pkg/clustertree/cluster-manager/utils/rootcluster.go b/pkg/clustertree/cluster-manager/utils/rootcluster.go index 0e65ce661..c193f3c52 100644 --- a/pkg/clustertree/cluster-manager/utils/rootcluster.go +++ b/pkg/clustertree/cluster-manager/utils/rootcluster.go @@ -91,15 +91,14 @@ func SortAddress(ctx context.Context, rootClient kubernetes.Interface, originAdd return false } return true - } else { - if !utils.IsIPv6(address[i].Address) && utils.IsIPv6(address[j].Address) { - return false - } - if utils.IsIPv6(address[i].Address) && !utils.IsIPv6(address[j].Address) { - return true - } + } + if !utils.IsIPv6(address[i].Address) && utils.IsIPv6(address[j].Address) { + return false + } + if utils.IsIPv6(address[i].Address) && !utils.IsIPv6(address[j].Address) { return true } + return true }) return append(address, otherAddress...), nil diff --git a/pkg/constants/network.go b/pkg/constants/network.go index b4c666404..9d1aacfa6 100644 --- a/pkg/constants/network.go +++ b/pkg/constants/network.go @@ -2,6 +2,7 @@ package constants type VxlanType int +// nolint:revive const ( VXLAN_BRIDGE_NAME = "vx-bridge" VXLAN_LOCAL_NAME = "vx-local" diff --git a/pkg/generated/openapi/zz_generated.openapi.go b/pkg/generated/openapi/zz_generated.openapi.go index c1163d60d..ccedfea20 100644 --- a/pkg/generated/openapi/zz_generated.openapi.go +++ b/pkg/generated/openapi/zz_generated.openapi.go @@ -1983,6 +1983,12 @@ func schema_pkg_apis_kosmos_v1alpha1_KubeInKubeConfig(ref common.ReferenceCallba Format: "", }, }, + "useTenantDns": { + SchemaProps: spec.SchemaProps{ + Type: []string{"boolean"}, + Format: "", + }, + }, }, }, }, diff --git a/pkg/kosmosctl/floater/analysis.go b/pkg/kosmosctl/floater/analysis.go index 7c6b97e48..e187328a8 100644 --- a/pkg/kosmosctl/floater/analysis.go +++ b/pkg/kosmosctl/floater/analysis.go @@ -167,11 +167,7 @@ func (o *CommandAnalysisOptions) runCluster() error { o.PrintResult(o.AnalysisResult) - if err = o.Floater.RemoveFloater(); err != nil { - return err - } - - return nil + return o.Floater.RemoveFloater() } func (o *CommandAnalysisOptions) analysisNodeConfig(nodeName string, nc1 v1alpha1.NodeConfigSpec, nc2 v1alpha1.NodeConfigSpec) { diff --git a/pkg/kosmosctl/floater/floater.go b/pkg/kosmosctl/floater/floater.go index 26b9138a1..f5d790a8e 100644 --- a/pkg/kosmosctl/floater/floater.go +++ b/pkg/kosmosctl/floater/floater.go @@ -139,11 +139,7 @@ func (f *Floater) CreateFloater() error { } klog.Infof("create Clusterlink floater, version: %s", f.Version) - if err = f.applyDaemonSet(); err != nil { - return err - } - - return nil + return f.applyDaemonSet() } func (f *Floater) applyServiceAccount() error { diff --git a/pkg/kosmosctl/install/install.go b/pkg/kosmosctl/install/install.go index bdb8e91d5..d67e4f77e 100644 --- a/pkg/kosmosctl/install/install.go +++ b/pkg/kosmosctl/install/install.go @@ -63,7 +63,7 @@ type CommandInstallOptions struct { CNI string DefaultNICName string NetworkType string - IpFamily string + IPFamily string UseProxy string KosmosClient versioned.Interface @@ -104,7 +104,7 @@ func NewCmdInstall() *cobra.Command { flags.StringVar(&o.CNI, "cni", "", "The cluster is configured using cni and currently supports calico and flannel.") flags.StringVar(&o.DefaultNICName, "default-nic", "", "Set default network interface card.") flags.StringVar(&o.NetworkType, "network-type", utils.NetworkTypeGateway, "Set the cluster network connection mode, which supports gateway and p2p modes, gateway is used by default.") - flags.StringVar(&o.IpFamily, "ip-family", string(v1alpha1.IPFamilyTypeIPV4), "Specify the IP protocol version used by network devices, common IP families include IPv4 and IPv6.") + flags.StringVar(&o.IPFamily, "ip-family", string(v1alpha1.IPFamilyTypeIPV4), "Specify the IP protocol version used by network devices, common IP families include IPv4 and IPv6.") flags.StringVar(&o.UseProxy, "use-proxy", "false", "Set whether to enable proxy.") flags.IntVarP(&o.WaitTime, "wait-time", "", utils.DefaultWaitTime, "Wait the specified time for the Kosmos install ready.") @@ -307,9 +307,8 @@ func (o *CommandInstallOptions) runClusterlink() error { if apierrors.IsAlreadyExists(err) { klog.Warningf("CRD %v is existed, creation process will skip", &crds.Items[i].Name) continue - } else { - return fmt.Errorf("kosmosctl install clusterlink run error, crd options failed: %v", err) } + return fmt.Errorf("kosmosctl install clusterlink run error, crd options failed: %v", err) } klog.Info("Create CRD " + crds.Items[i].Name + " successful.") } @@ -332,9 +331,8 @@ func (o *CommandInstallOptions) runClusterlink() error { networkManagerLabel := map[string]string{"app": networkManagerDeploy.Labels["app"]} if err = util.WaitPodReady(o.K8sClient, networkManagerDeploy.Namespace, util.MapToString(networkManagerLabel), o.WaitTime); err != nil { return fmt.Errorf("kosmosctl install clusterlink run error, network-manager deployment options failed: %v", err) - } else { - klog.Info("Deployment " + networkManagerDeploy.Name + " has been created.") } + klog.Info("Deployment " + networkManagerDeploy.Name + " has been created.") operatorDeploy, err := util.GenerateDeployment(manifest.KosmosOperatorDeployment, manifest.DeploymentReplace{ Namespace: o.Namespace, @@ -488,9 +486,8 @@ func (o *CommandInstallOptions) runClustertree() error { label := map[string]string{"app": clustertreeDeploy.Labels["app"]} if err = util.WaitPodReady(o.K8sClient, clustertreeDeploy.Namespace, util.MapToString(label), o.WaitTime); err != nil { return fmt.Errorf("kosmosctl install clustertree run error, deployment options failed: %v", err) - } else { - klog.Info("Deployment clustertree-cluster-manager has been created.") } + klog.Info("Deployment clustertree-cluster-manager has been created.") operatorDeploy, err := util.GenerateDeployment(manifest.KosmosOperatorDeployment, manifest.DeploymentReplace{ Namespace: o.Namespace, @@ -626,9 +623,9 @@ func (o *CommandInstallOptions) runScheduler() error { label := map[string]string{"app": schedulerDeploy.Labels["app"]} if err = util.WaitPodReady(o.K8sClient, schedulerDeploy.Namespace, util.MapToString(label), o.WaitTime); err != nil { return fmt.Errorf("kosmosctl install scheduler run error, deployment options failed: %v", err) - } else { - klog.Info("Deployment kosmos-scheduler has been created.") } + klog.Info("Deployment kosmos-scheduler has been created.") + return nil } @@ -702,9 +699,8 @@ func (o *CommandInstallOptions) createOperator() error { operatorLabel := map[string]string{"app": operatorDeploy.Labels["app"]} if err = util.WaitPodReady(o.K8sClient, operatorDeploy.Namespace, util.MapToString(operatorLabel), o.WaitTime); err != nil { return fmt.Errorf("kosmosctl install operator run error, operator options deployment failed: %s", err) - } else { - klog.Info("Operator " + operatorDeploy.Name + " has been created.") } + klog.Info("Operator " + operatorDeploy.Name + " has been created.") return nil } @@ -731,7 +727,7 @@ func (o *CommandInstallOptions) createControlCluster() error { joinOptions.CNI = o.CNI joinOptions.DefaultNICName = o.DefaultNICName joinOptions.NetworkType = o.NetworkType - joinOptions.IpFamily = o.IpFamily + joinOptions.IPFamily = o.IPFamily joinOptions.UseProxy = o.UseProxy controlCluster, err := o.KosmosClient.KosmosV1alpha1().Clusters().Get(context.TODO(), utils.DefaultClusterName, metav1.GetOptions{}) if err != nil { @@ -757,7 +753,7 @@ func (o *CommandInstallOptions) createControlCluster() error { controlCluster.Spec.ClusterLinkOptions.NetworkType = v1alpha1.NetworkTypeP2P } - switch o.IpFamily { + switch o.IPFamily { case utils.DefaultIPv4: controlCluster.Spec.ClusterLinkOptions.IPFamily = v1alpha1.IPFamilyTypeIPV4 case utils.DefaultIPv6: @@ -806,7 +802,7 @@ func (o *CommandInstallOptions) createControlCluster() error { joinOptions.CNI = o.CNI joinOptions.DefaultNICName = o.DefaultNICName joinOptions.NetworkType = o.NetworkType - joinOptions.IpFamily = o.IpFamily + joinOptions.IPFamily = o.IPFamily joinOptions.UseProxy = o.UseProxy controlCluster, err := o.KosmosClient.KosmosV1alpha1().Clusters().Get(context.TODO(), utils.DefaultClusterName, metav1.GetOptions{}) @@ -834,7 +830,7 @@ func (o *CommandInstallOptions) createControlCluster() error { controlCluster.Spec.ClusterLinkOptions.NetworkType = v1alpha1.NetworkTypeP2P } - switch o.IpFamily { + switch o.IPFamily { case utils.DefaultIPv4: controlCluster.Spec.ClusterLinkOptions.IPFamily = v1alpha1.IPFamilyTypeIPV4 case utils.DefaultIPv6: @@ -969,9 +965,8 @@ func (o *CommandInstallOptions) runCoreDNS() error { } if err = util.WaitDeploymentReady(o.K8sClient, deploy, o.WaitTime); err != nil { return fmt.Errorf("kosmosctl install coredns run error, deployment options failed: %v", err) - } else { - klog.Info("Deployment coredns has been created.") } + klog.Info("Deployment coredns has been created.") klog.Info("Attempting to create coredns service...") svc, err := util.GenerateService(manifest.CorednsService, manifest.ServiceReplace{ diff --git a/pkg/kosmosctl/join/join.go b/pkg/kosmosctl/join/join.go index f732ef076..fd7edc540 100644 --- a/pkg/kosmosctl/join/join.go +++ b/pkg/kosmosctl/join/join.go @@ -59,7 +59,7 @@ type CommandJoinOptions struct { CNI string DefaultNICName string NetworkType string - IpFamily string + IPFamily string UseProxy string EnableTree bool @@ -105,7 +105,7 @@ func NewCmdJoin(f ctlutil.Factory) *cobra.Command { flags.StringVar(&o.CNI, "cni", "", "The cluster is configured using cni and currently supports calico and flannel.") flags.StringVar(&o.DefaultNICName, "default-nic", "", "Set default network interface card.") flags.StringVar(&o.NetworkType, "network-type", "", "Set the cluster network connection mode, which supports gateway and p2p modes, gateway is used by default.") - flags.StringVar(&o.IpFamily, "ip-family", "", "Specify the IP protocol version used by network devices, common IP families include IPv4 and IPv6.") + flags.StringVar(&o.IPFamily, "ip-family", "", "Specify the IP protocol version used by network devices, common IP families include IPv4 and IPv6.") flags.StringVar(&o.UseProxy, "use-proxy", "false", "Set whether to enable proxy.") flags.BoolVar(&o.EnableTree, "enable-tree", false, "Turn on clustertree.") flags.StringVar(&o.LeafModel, "leaf-model", "", "Set leaf cluster model, which supports one-to-one model.") @@ -114,7 +114,7 @@ func NewCmdJoin(f ctlutil.Factory) *cobra.Command { return cmd } -func (o *CommandJoinOptions) Complete(f ctlutil.Factory) error { +func (o *CommandJoinOptions) Complete(_ ctlutil.Factory) error { hostConfig, err := utils.RestConfig(o.HostKubeConfig, o.HostContext) if err != nil { return fmt.Errorf("kosmosctl join complete error, generate host rest config failed: %s", err) @@ -172,9 +172,9 @@ func (o *CommandJoinOptions) Complete(f ctlutil.Factory) error { // no enable-all,enable-tree,enable-link found, make 'EnableAll' with other config if !o.EnableAll && !o.EnableTree && !o.EnableLink { - // due to NetworkType or IpFamily is not empty, make EnableLink true - if o.NetworkType != "" || o.IpFamily != "" { - klog.Warning("due to NetworkType or IpFamily is not empty, make EnableLink ture.") + // due to NetworkType or IPFamily is not empty, make EnableLink true + if o.NetworkType != "" || o.IPFamily != "" { + klog.Warning("due to NetworkType or IPFamily is not empty, make EnableLink ture.") o.EnableLink = true } @@ -196,8 +196,8 @@ func (o *CommandJoinOptions) Complete(f ctlutil.Factory) error { o.EnableTree = true } - if o.IpFamily == "" { - o.IpFamily = utils.DefaultIPv4 + if o.IPFamily == "" { + o.IPFamily = utils.DefaultIPv4 } if o.NetworkType == "" { @@ -343,7 +343,7 @@ func (o *CommandJoinOptions) runCluster() error { cluster.Spec.ClusterLinkOptions.NetworkType = v1alpha1.NetWorkTypeGateWay } - switch o.IpFamily { + switch o.IPFamily { case utils.DefaultIPv4: cluster.Spec.ClusterLinkOptions.IPFamily = v1alpha1.IPFamilyTypeIPV4 case utils.DefaultIPv6: diff --git a/pkg/kosmosctl/kosmosctl.go b/pkg/kosmosctl/kosmosctl.go index 6fdce81d3..fc03fc13e 100644 --- a/pkg/kosmosctl/kosmosctl.go +++ b/pkg/kosmosctl/kosmosctl.go @@ -96,6 +96,6 @@ func NewKosmosCtlCommand() *cobra.Command { return cmds } -func runHelp(cmd *cobra.Command, args []string) error { +func runHelp(cmd *cobra.Command, _ []string) error { return cmd.Help() } diff --git a/pkg/kosmosctl/rsmigrate/serviceexport.go b/pkg/kosmosctl/rsmigrate/serviceexport.go index b5cd15311..11cf37dbb 100644 --- a/pkg/kosmosctl/rsmigrate/serviceexport.go +++ b/pkg/kosmosctl/rsmigrate/serviceexport.go @@ -17,7 +17,7 @@ var exportExample = templates.Examples(i18n.T(` kosmosctl export service foo -n namespacefoo --kubeconfig=[control plane kubeconfig] `)) -var exportErr string = "kosmosctl export error" +var exportErr = "kosmosctl export error" type CommandExportOptions struct { *CommandOptions @@ -56,7 +56,7 @@ func (o *CommandExportOptions) Complete(cmd *cobra.Command) error { return nil } -func (o *CommandExportOptions) Run(cmd *cobra.Command, args []string) error { +func (o *CommandExportOptions) Run(_ *cobra.Command, args []string) error { if len(args) == 0 { return fmt.Errorf("args is null, resource type should be specified") } diff --git a/pkg/kosmosctl/rsmigrate/serviceimport.go b/pkg/kosmosctl/rsmigrate/serviceimport.go index b73ddbb77..9c153179b 100644 --- a/pkg/kosmosctl/rsmigrate/serviceimport.go +++ b/pkg/kosmosctl/rsmigrate/serviceimport.go @@ -18,7 +18,7 @@ var importExample = templates.Examples(i18n.T(` kosmosctl import service foo -n namespacefoo --kubecnfig=[control plane kubeconfig] --to-leafcluster leafclusterfoo `)) -var importErr string = "kosmosctl import error" +var importErr = "kosmosctl import error" type CommandImportOptions struct { *CommandOptions @@ -78,7 +78,7 @@ func (o *CommandImportOptions) Validate(cmd *cobra.Command) error { return nil } -func (o *CommandImportOptions) Run(f ctlutil.Factory, cmd *cobra.Command, args []string) error { +func (o *CommandImportOptions) Run(_ ctlutil.Factory, _ *cobra.Command, args []string) error { if len(args) == 0 { return fmt.Errorf("args is null, resource should be specified") } diff --git a/pkg/kosmosctl/unjoin/unjoin.go b/pkg/kosmosctl/unjoin/unjoin.go index 19a7943c9..b859f386c 100644 --- a/pkg/kosmosctl/unjoin/unjoin.go +++ b/pkg/kosmosctl/unjoin/unjoin.go @@ -72,7 +72,7 @@ func NewCmdUnJoin(f ctlutil.Factory) *cobra.Command { return cmd } -func (o *CommandUnJoinOptions) Complete(f ctlutil.Factory) error { +func (o *CommandUnJoinOptions) Complete(_ ctlutil.Factory) error { hostConfig, err := utils.RestConfig(o.HostKubeConfig, o.HostContext) if err != nil { return fmt.Errorf("kosmosctl unjoin complete error, generate host config failed: %s", err) @@ -115,7 +115,7 @@ func (o *CommandUnJoinOptions) Complete(f ctlutil.Factory) error { return nil } -func (o *CommandUnJoinOptions) Validate(args []string) error { +func (o *CommandUnJoinOptions) Validate(_ []string) error { if len(o.Name) == 0 { return fmt.Errorf("kosmosctl unjoin validate error, name is not valid") } diff --git a/pkg/kubenest/constants/constant.go b/pkg/kubenest/constants/constant.go index efe52302e..0613b33e6 100644 --- a/pkg/kubenest/constants/constant.go +++ b/pkg/kubenest/constants/constant.go @@ -16,7 +16,7 @@ const ( DefaultNs = "default" DefaultImageRepositoryEnv = "IMAGE_REPOSITIRY" DefaultImageVersionEnv = "IMAGE_VERSION" - DefaultCoreDnsImageTagEnv = "COREDNS_IMAGE_TAG" + DefaultCoreDNSImageTagEnv = "COREDNS_IMAGE_TAG" DefaultVirtualControllerLabelEnv = "VIRTUAL_CONTROLLER_LABEL" VirtualClusterFinalizerName = "kosmos.io/virtual-cluster-finalizer" ServiceType = "NodePort" @@ -46,12 +46,12 @@ const ( ProxyServerCertAndKeyName = "proxy-server" //controlplane apiserver - ApiServer = "apiserver" - ApiServerAnp = "apiserver-anp" - ApiServerEtcdListenClientPort = 2379 - ApiServerServiceType = "NodePort" - // APICallRetryInterval defines how long kubeadm should wait before retrying a failed API operation - ApiServerCallRetryInterval = 100 * time.Millisecond + APIServer = "apiserver" + APIServerAnp = "apiserver-anp" + APIServerEtcdListenClientPort = 2379 + APIServerServiceType = "NodePort" + // APIServerCallRetryInterval defines how long kubeadm should wait before retrying a failed API operation + APIServerCallRetryInterval = 100 * time.Millisecond APIServerSVCPortName = "client" //install kube-proxy in virtualCluster @@ -98,11 +98,11 @@ const ( //host_port_manager HostPortsCMName = "kosmos-hostports" HostPortsCMDataName = "config.yaml" - ApiServerPortKey = "apiserver-port" - ApiServerNetworkProxyAgentPortKey = "apiserver-network-proxy-agent-port" - ApiServerNetworkProxyServerPortKey = "apiserver-network-proxy-server-port" - ApiServerNetworkProxyHealthPortKey = "apiserver-network-proxy-health-port" - ApiServerNetworkProxyAdminPortKey = "apiserver-network-proxy-admin-port" + APIServerPortKey = "apiserver-port" + APIServerNetworkProxyAgentPortKey = "apiserver-network-proxy-agent-port" + APIServerNetworkProxyServerPortKey = "apiserver-network-proxy-server-port" + APIServerNetworkProxyHealthPortKey = "apiserver-network-proxy-health-port" + APIServerNetworkProxyAdminPortKey = "apiserver-network-proxy-admin-port" VirtualClusterPortNum = 5 // vip @@ -124,25 +124,26 @@ const ( // core-dns KubeDNSSVCName = "kube-dns" // nolint - HostCoreDnsComponents = "host-core-dns-components" - VirtualCoreDnsComponents = "virtual-core-dns-components" - PrometheusRuleManifest = "prometheus-rules" + HostCoreDnsComponents = "host-core-dns-components" + VirtualCoreDNSComponents = "virtual-core-dns-components" + PrometheusRuleManifest = "prometheus-rules" + TenantCoreDNSComponentName = "core-dns-tenant" StateLabelKey = "kosmos-io/state" KonnectivityServerSuffix = "konnectivity-server" //in virtual cluster - ApiServerExternalService = "api-server-external-service" + APIServerExternalService = "api-server-external-service" ) type Action string -var ApiServerServiceSubnet string +var APIServerServiceSubnet string var KubeControllerManagerPodSubnet string func init() { - ApiServerServiceSubnet = utils.GetEnvWithDefaultValue("SERVICE_SUBNET", "10.237.6.0/18") + APIServerServiceSubnet = utils.GetEnvWithDefaultValue("SERVICE_SUBNET", "10.237.6.0/18") // fd11:1122:1111::/48, KubeControllerManagerPodSubnet = utils.GetEnvWithDefaultValue("POD_SUBNET", "10.244.0.0/16") } diff --git a/pkg/kubenest/controller/endpoints.sync.controller/apiserver_external_sync_controller.go b/pkg/kubenest/controller/endpoints.sync.controller/apiserver_external_sync_controller.go index b67075ae7..ad065ab57 100644 --- a/pkg/kubenest/controller/endpoints.sync.controller/apiserver_external_sync_controller.go +++ b/pkg/kubenest/controller/endpoints.sync.controller/apiserver_external_sync_controller.go @@ -8,6 +8,7 @@ import ( v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/retry" @@ -17,9 +18,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" "github.com/kosmos.io/kosmos/pkg/kubenest/constants" @@ -27,20 +30,20 @@ import ( "github.com/kosmos.io/kosmos/pkg/utils" ) -type ApiServerExternalSyncController struct { +type APIServerExternalSyncController struct { client.Client EventRecorder record.EventRecorder } -const ApiServerExternalSyncControllerName string = "api-server-external-service-sync-controller" +const APIServerExternalSyncControllerName string = "api-server-external-service-sync-controller" -func (e *ApiServerExternalSyncController) SetupWithManager(mgr manager.Manager) error { +func (e *APIServerExternalSyncController) SetupWithManager(mgr manager.Manager) error { skipEvent := func(obj client.Object) bool { return strings.Contains(obj.GetName(), "apiserver") && obj.GetNamespace() != "" } return controllerruntime.NewControllerManagedBy(mgr). - Named(ApiServerExternalSyncControllerName). + Named(APIServerExternalSyncControllerName). WithOptions(controller.Options{MaxConcurrentReconciles: 5}). For(&v1.Endpoints{}, builder.WithPredicates(predicate.Funcs{ @@ -50,20 +53,40 @@ func (e *ApiServerExternalSyncController) SetupWithManager(mgr manager.Manager) UpdateFunc: func(updateEvent event.UpdateEvent) bool { return skipEvent(updateEvent.ObjectNew) }, DeleteFunc: func(deleteEvent event.DeleteEvent) bool { return false }, })). + Watches(&source.Kind{Type: &v1alpha1.VirtualCluster{}}, handler.EnqueueRequestsFromMapFunc(e.newVirtualClusterMapFunc())). Complete(e) } -func (e *ApiServerExternalSyncController) SyncApiServerExternalEPS(ctx context.Context, k8sClient kubernetes.Interface) error { +func (e *APIServerExternalSyncController) newVirtualClusterMapFunc() handler.MapFunc { + return func(a client.Object) []reconcile.Request { + var requests []reconcile.Request + vcluster := a.(*v1alpha1.VirtualCluster) + + // Join the Reconcile queue only if the status of the vcluster is Completed + if vcluster.Status.Phase == v1alpha1.Completed { + klog.V(4).Infof("api-server-external-sync-controller: virtualcluster change to completed: %s", vcluster.Name) + // Add the vcluster to the Reconcile queue + requests = append(requests, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: vcluster.Name, + Namespace: vcluster.Namespace, + }, + }) + } + return requests + } +} + +func (e *APIServerExternalSyncController) SyncAPIServerExternalEPS(ctx context.Context, k8sClient kubernetes.Interface) error { kubeEndpoints, err := k8sClient.CoreV1().Endpoints(constants.DefaultNs).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil { klog.Errorf("Error getting endpoints: %v", err) return err - } else { - klog.V(4).Infof("Endpoints for service 'kubernetes': %v", kubeEndpoints) - for _, subset := range kubeEndpoints.Subsets { - for _, address := range subset.Addresses { - klog.V(4).Infof("IP: %s", address.IP) - } + } + klog.V(4).Infof("Endpoints for service 'kubernetes': %v", kubeEndpoints) + for _, subset := range kubeEndpoints.Subsets { + for _, address := range subset.Addresses { + klog.V(4).Infof("IP: %s", address.IP) } } @@ -76,9 +99,9 @@ func (e *ApiServerExternalSyncController) SyncApiServerExternalEPS(ctx context.C return err } - apiServerExternalEndpoints, err := k8sClient.CoreV1().Endpoints(constants.DefaultNs).Get(ctx, constants.ApiServerExternalService, metav1.GetOptions{}) + apiServerExternalEndpoints, err := k8sClient.CoreV1().Endpoints(constants.DefaultNs).Get(ctx, constants.APIServerExternalService, metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { - klog.Errorf("failed to get endpoints for %s : %v", constants.ApiServerExternalService, err) + klog.Errorf("failed to get endpoints for %s : %v", constants.APIServerExternalService, err) return err } @@ -113,9 +136,9 @@ func (e *ApiServerExternalSyncController) SyncApiServerExternalEPS(ctx context.C return nil } -func (e *ApiServerExternalSyncController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - klog.V(4).Infof("============ %s start to reconcile %s ============", ApiServerExternalSyncControllerName, request.NamespacedName) - defer klog.V(4).Infof("============ %s finish to reconcile %s ============", ApiServerExternalSyncControllerName, request.NamespacedName) +func (e *APIServerExternalSyncController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + klog.V(4).Infof("============ %s start to reconcile %s ============", APIServerExternalSyncControllerName, request.NamespacedName) + defer klog.V(4).Infof("============ %s finish to reconcile %s ============", APIServerExternalSyncControllerName, request.NamespacedName) var virtualClusterList v1alpha1.VirtualClusterList if err := e.List(ctx, &virtualClusterList); err != nil { @@ -140,8 +163,8 @@ func (e *ApiServerExternalSyncController) Reconcile(ctx context.Context, request return reconcile.Result{}, nil } - if targetVirtualCluster.Status.Phase != v1alpha1.Initialized { - return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil + if targetVirtualCluster.Status.Phase != v1alpha1.Completed { + return reconcile.Result{}, nil } k8sClient, err := util.GenerateKubeclient(&targetVirtualCluster) @@ -151,7 +174,7 @@ func (e *ApiServerExternalSyncController) Reconcile(ctx context.Context, request } if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - return e.SyncApiServerExternalEPS(ctx, k8sClient) + return e.SyncAPIServerExternalEPS(ctx, k8sClient) }); err != nil { klog.Errorf("virtualcluster %s sync apiserver external endpoints failed: %v", targetVirtualCluster.Name, err) return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil diff --git a/pkg/kubenest/controller/endpoints.sync.controller/coredns_sync_controller.go b/pkg/kubenest/controller/endpoints.sync.controller/coredns_sync_controller.go index 7995a6963..23822357c 100644 --- a/pkg/kubenest/controller/endpoints.sync.controller/coredns_sync_controller.go +++ b/pkg/kubenest/controller/endpoints.sync.controller/coredns_sync_controller.go @@ -151,6 +151,10 @@ func (e *CoreDNSController) Reconcile(ctx context.Context, request reconcile.Req return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil } + if targetVirtualCluster.Spec.KubeInKubeConfig != nil && targetVirtualCluster.Spec.KubeInKubeConfig.UseTenantDNS { + return reconcile.Result{}, nil + } + // Get the corresponding svc var kubesvc v1.Service if err := e.Get(ctx, request.NamespacedName, &kubesvc); err != nil { diff --git a/pkg/kubenest/controller/global.node.controller/global_node_controller.go b/pkg/kubenest/controller/global.node.controller/global_node_controller.go index 5932ac752..9f98ce92f 100644 --- a/pkg/kubenest/controller/global.node.controller/global_node_controller.go +++ b/pkg/kubenest/controller/global.node.controller/global_node_controller.go @@ -140,16 +140,12 @@ func (r *GlobalNodeController) SyncTaint(ctx context.Context, globalNode *v1alph return nil } - if err := util.DrainNode(ctx, targetNode.Name, r.RootClientSet, &targetNode, env.GetDrainWaitSeconds(), true); err != nil { - return err - } - return nil + return util.DrainNode(ctx, targetNode.Name, r.RootClientSet, &targetNode, env.GetDrainWaitSeconds(), true) }) return err - } else { - klog.V(4).Infof("global-node-controller: SyncTaints: node status is %s, skip", globalNode.Spec.State, globalNode.Name) - return nil } + klog.V(4).Infof("global-node-controller: SyncTaints: node status is %s, skip", globalNode.Spec.State, globalNode.Name) + return nil } func (r *GlobalNodeController) SyncState(ctx context.Context, globalNode *v1alpha1.GlobalNode) error { @@ -231,12 +227,11 @@ func (r *GlobalNodeController) Reconcile(ctx context.Context, request reconcile. } globalNode.Name = request.Name globalNode.Spec.State = v1alpha1.NodeReserved - for _, a := range rootNode.Status.Addresses { - if a.Type == v1.NodeInternalIP { - globalNode.Spec.NodeIP = a.Address - break - } + firstNodeIP, err := utils.FindFirstNodeIPAddress(*rootNode, v1.NodeInternalIP) + if err != nil { + klog.Errorf("get first node ip address err: %s %s", v1.NodeInternalIP, err.Error()) } + globalNode.Spec.NodeIP = firstNodeIP if _, err = r.KosmosClient.KosmosV1alpha1().GlobalNodes().Create(ctx, &globalNode, metav1.CreateOptions{}); err != nil { klog.Errorf("global-node-controller: can not create global node: %s", globalNode.Name) return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil @@ -268,18 +263,17 @@ func (r *GlobalNodeController) Reconcile(ctx context.Context, request reconcile. return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil } - if err := r.SyncLabel(ctx, &globalNode); err != nil { + if err = r.SyncLabel(ctx, &globalNode); err != nil { klog.Warningf("sync label %s error: %v", request.NamespacedName, err) return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil - } else { - klog.V(4).Infof("sync label successed, %s", request.NamespacedName) } + klog.V(4).Infof("sync label successed, %s", request.NamespacedName) - if err := r.SyncTaint(ctx, &globalNode); err != nil { + if err = r.SyncTaint(ctx, &globalNode); err != nil { klog.Errorf("sync taint %s error: %v", request.NamespacedName, err) return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil - } else { - klog.V(4).Infof("sync taint successed, %s", request.NamespacedName) } + klog.V(4).Infof("sync taint successed, %s", request.NamespacedName) + return reconcile.Result{}, nil } diff --git a/pkg/kubenest/controller/kosmos/kosmos_join_controller.go b/pkg/kubenest/controller/kosmos/kosmos_join_controller.go index becfca6c3..1fe9c2e91 100644 --- a/pkg/kubenest/controller/kosmos/kosmos_join_controller.go +++ b/pkg/kubenest/controller/kosmos/kosmos_join_controller.go @@ -310,7 +310,7 @@ func (c *KosmosJoinController) ClearSomeNodeOwner(nodeNames *[]string) { } } -func (c *KosmosJoinController) CreateClusterObject(ctx context.Context, request reconcile.Request, +func (c *KosmosJoinController) CreateClusterObject(_ context.Context, _ reconcile.Request, vc *v1alpha1.VirtualCluster, hostK8sClient kubernetes.Interface, cluster *v1alpha1.Cluster) (*[]string, *map[string]struct{}, error) { var leafModels []v1alpha1.LeafModel // recored new nodes' name, if error happen before create or update, need clear newNodeNames @@ -369,7 +369,7 @@ func (c *KosmosJoinController) CreateClusterObject(ctx context.Context, request return &newNodeNames, &allNodeNamesMap, nil } -func (c *KosmosJoinController) CreateOrUpdateCluster(ctx context.Context, request reconcile.Request, +func (c *KosmosJoinController) CreateOrUpdateCluster(_ context.Context, request reconcile.Request, kosmosClient versioned.Interface, k8sClient kubernetes.Interface, newNodeNames *[]string, allNodeNamesMap *map[string]struct{}, cluster *v1alpha1.Cluster) error { old, err := kosmosClient.KosmosV1alpha1().Clusters().Get(context.TODO(), cluster.Name, metav1.GetOptions{}) @@ -473,7 +473,7 @@ func (c *KosmosJoinController) CreateCluster(ctx context.Context, request reconc return fmt.Errorf("crd kubernetes client failed: %v", err) } - newNodeNames, allNodeNamesMap, nil := c.CreateClusterObject(ctx, request, vc, hostK8sClient, &cluster) + newNodeNames, allNodeNamesMap, err := c.CreateClusterObject(ctx, request, vc, hostK8sClient, &cluster) if err != nil { return err } diff --git a/pkg/kubenest/controller/virtualcluster.node.controller/exector/exector.go b/pkg/kubenest/controller/virtualcluster.node.controller/exector/exector.go index ed3032732..3df3767e8 100644 --- a/pkg/kubenest/controller/virtualcluster.node.controller/exector/exector.go +++ b/pkg/kubenest/controller/virtualcluster.node.controller/exector/exector.go @@ -25,6 +25,7 @@ const ( NotFoundText = "127" ) +// nolint:revive type ExectorReturn struct { Status Status Reason string @@ -37,11 +38,13 @@ func (r *ExectorReturn) String() string { return fmt.Sprintf("%d, %s, %s, %d", r.Status, r.Reason, r.LastLog, r.Code) } +// nolint:revive type Exector interface { GetWebSocketOption() WebSocketOption SendHandler(conn *websocket.Conn, done <-chan struct{}, interrupt chan struct{}, result *ExectorReturn) } +// nolint:revive type ExectorHelper struct { Token string Addr string diff --git a/pkg/kubenest/controller/virtualcluster.node.controller/node_controller.go b/pkg/kubenest/controller/virtualcluster.node.controller/node_controller.go index 68ab0b65e..66ab8d65f 100644 --- a/pkg/kubenest/controller/virtualcluster.node.controller/node_controller.go +++ b/pkg/kubenest/controller/virtualcluster.node.controller/node_controller.go @@ -293,10 +293,7 @@ func (r *NodeController) DoNodeClean(ctx context.Context, virtualCluster v1alpha cleanNodeInfos = append(cleanNodeInfos, *globalNode) } - if err := r.cleanGlobalNode(ctx, cleanNodeInfos, virtualCluster, nil); err != nil { - return err - } - return nil + return r.cleanGlobalNode(ctx, cleanNodeInfos, virtualCluster, nil) } func (r *NodeController) cleanGlobalNode(ctx context.Context, nodeInfos []v1alpha1.GlobalNode, virtualCluster v1alpha1.VirtualCluster, _ kubernetes.Interface) error { @@ -320,9 +317,8 @@ func (r *NodeController) joinNode(ctx context.Context, nodeInfos []v1alpha1.Glob dnssvc, err := k8sClient.CoreV1().Services(constants.SystemNs).Get(ctx, constants.KubeDNSSVCName, metav1.GetOptions{}) if err != nil { return fmt.Errorf("get kube-dns service failed: %s", err) - } else { - clusterDNS = dnssvc.Spec.ClusterIP } + clusterDNS = dnssvc.Spec.ClusterIP return r.BatchProcessNodes(nodeInfos, func(nodeInfo v1alpha1.GlobalNode) error { return workflow.NewJoinWorkFlow().RunTask(ctx, task.TaskOpt{ diff --git a/pkg/kubenest/controller/virtualcluster.node.controller/workflow/task/task.go b/pkg/kubenest/controller/virtualcluster.node.controller/workflow/task/task.go index e37fb13fe..d7a297367 100644 --- a/pkg/kubenest/controller/virtualcluster.node.controller/workflow/task/task.go +++ b/pkg/kubenest/controller/virtualcluster.node.controller/workflow/task/task.go @@ -24,6 +24,7 @@ import ( "github.com/kosmos.io/kosmos/pkg/kubenest/util" ) +// nolint:revive type TaskOpt struct { NodeInfo v1alpha1.GlobalNode VirtualCluster v1alpha1.VirtualCluster diff --git a/pkg/kubenest/controller/virtualcluster.node.controller/workflow/workflow.go b/pkg/kubenest/controller/virtualcluster.node.controller/workflow/workflow.go index e78bde136..a6e715f47 100644 --- a/pkg/kubenest/controller/virtualcluster.node.controller/workflow/workflow.go +++ b/pkg/kubenest/controller/virtualcluster.node.controller/workflow/workflow.go @@ -13,6 +13,7 @@ const ( maxRetries = 5 ) +// nolint:revive type WorkflowData struct { Tasks []task.Task } @@ -44,6 +45,7 @@ func RunWithRetry(ctx context.Context, task task.Task, opt task.TaskOpt, preArgs return args, nil } +// nolint:revive func (w WorkflowData) RunTask(ctx context.Context, opt task.TaskOpt) error { var args interface{} for i, t := range w.Tasks { diff --git a/pkg/kubenest/controller/virtualcluster_init_controller.go b/pkg/kubenest/controller/virtualcluster_init_controller.go index aa3abde63..7463ba51d 100644 --- a/pkg/kubenest/controller/virtualcluster_init_controller.go +++ b/pkg/kubenest/controller/virtualcluster_init_controller.go @@ -122,13 +122,12 @@ func (c *VirtualClusterInitController) Reconcile(ctx context.Context, request re } else if updatedCluster.Status.Phase == v1alpha1.Deleting { klog.V(2).InfoS("Virtual Cluster is deleting, wait for event 'AllNodeDeleted'", "Virtual Cluster", request) return reconcile.Result{}, nil - } else { - return c.removeFinalizer(updatedCluster) } + return c.removeFinalizer(updatedCluster) } switch originalCluster.Status.Phase { - case "", v1alpha1.Pending: + case "": //create request updatedCluster.Status.Phase = v1alpha1.Preparing err := c.Update(updatedCluster) @@ -197,18 +196,18 @@ func (c *VirtualClusterInitController) Reconcile(ctx context.Context, request re } if !policyChanged { return reconcile.Result{}, nil - } else { - err := c.assignWorkNodes(updatedCluster) - if err != nil { - return reconcile.Result{RequeueAfter: RequeueTime}, errors.Wrapf(err, "Error update virtualcluster %s", updatedCluster.Name) - } - updatedCluster.Status.Phase = v1alpha1.Updating - err = c.Update(updatedCluster) - if err != nil { - klog.Errorf("Error update virtualcluster %s status to %s", updatedCluster.Name, updatedCluster.Status.Phase) - return reconcile.Result{}, errors.Wrapf(err, "Error update virtualcluster %s status", updatedCluster.Name) - } } + err = c.assignWorkNodes(updatedCluster) + if err != nil { + return reconcile.Result{RequeueAfter: RequeueTime}, errors.Wrapf(err, "Error update virtualcluster %s", updatedCluster.Name) + } + updatedCluster.Status.Phase = v1alpha1.Updating + err = c.Update(updatedCluster) + if err != nil { + klog.Errorf("Error update virtualcluster %s status to %s", updatedCluster.Name, updatedCluster.Status.Phase) + return reconcile.Result{}, errors.Wrapf(err, "Error update virtualcluster %s status", updatedCluster.Name) + } + default: klog.Warningf("Skip virtualcluster %s reconcile status: %s", originalCluster.Name, originalCluster.Status.Phase) } @@ -232,7 +231,7 @@ func (c *VirtualClusterInitController) SetupWithManager(mgr manager.Manager) err } func (c *VirtualClusterInitController) Update(updated *v1alpha1.VirtualCluster) error { - if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + return retry.RetryOnConflict(retry.DefaultRetry, func() error { current := &v1alpha1.VirtualCluster{} if err := c.Client.Get(context.TODO(), types.NamespacedName{ Namespace: updated.Namespace, @@ -245,10 +244,7 @@ func (c *VirtualClusterInitController) Update(updated *v1alpha1.VirtualCluster) updated.Status.UpdateTime = &now updated.ResourceVersion = current.ResourceVersion return c.Client.Patch(context.TODO(), updated, client.MergeFrom(current)) - }); err != nil { - return err - } - return nil + }) } func (c *VirtualClusterInitController) ensureFinalizer(virtualCluster *v1alpha1.VirtualCluster) (reconcile.Result, error) { @@ -301,6 +297,7 @@ func (c *VirtualClusterInitController) removeFinalizer(virtualCluster *v1alpha1. return reconcile.Result{}, nil } +// nolint:revive // createVirtualCluster assign work nodes, create control plane and create compoennts from manifests func (c *VirtualClusterInitController) createVirtualCluster(virtualCluster *v1alpha1.VirtualCluster, kubeNestOptions *v1alpha1.KubeNestConfiguration) error { klog.V(2).Infof("Reconciling virtual cluster", "name", virtualCluster.Name) @@ -584,10 +581,7 @@ func (c *VirtualClusterInitController) setGlobalNodeUsageStatus(virtualCluster * return nil } - if err := retry.RetryOnConflict(retry.DefaultRetry, updateStatusFunc); err != nil { - return err - } - return nil + return retry.RetryOnConflict(retry.DefaultRetry, updateStatusFunc) } func (c *VirtualClusterInitController) ensureAllPodsRunning(virtualCluster *v1alpha1.VirtualCluster, timeout time.Duration) error { @@ -746,15 +740,6 @@ func checkPortOnHostWithAddresses(port int32, hostAddress []string) (bool, error return false, nil } -func findAddress(node corev1.Node) (string, error) { - for _, addr := range node.Status.Addresses { - if addr.Type == corev1.NodeInternalIP { - return addr.Address, nil - } - } - return "", fmt.Errorf("cannot find internal IP address in node addresses, node name: %s", node.GetName()) -} - // Return false to indicate that the port is not occupied func CheckPortOnHost(addr string, port int32) (bool, error) { hostExectorHelper := exector.NewExectorHelper(addr, "") @@ -778,9 +763,8 @@ func CheckPortOnHost(addr string, port int32) (bool, error) { if ret.Status != exector.SUCCESS { return true, fmt.Errorf("pod[%d] is occupied", port) - } else { - return false, nil } + return false, nil } func (c *VirtualClusterInitController) findHostAddresses() ([]string, error) { @@ -794,7 +778,7 @@ func (c *VirtualClusterInitController) findHostAddresses() ([]string, error) { ret := []string{} for _, node := range nodes.Items { - addr, err := findAddress(node) + addr, err := utils.FindFirstNodeIPAddress(node, corev1.NodeExternalIP) if err != nil { return nil, err } @@ -804,7 +788,7 @@ func (c *VirtualClusterInitController) findHostAddresses() ([]string, error) { return ret, nil } -func (c *VirtualClusterInitController) GetHostPortNextFunc(virtualCluster *v1alpha1.VirtualCluster) (func() (int32, error), error) { +func (c *VirtualClusterInitController) GetHostPortNextFunc(_ *v1alpha1.VirtualCluster) (func() (int32, error), error) { var hostPool *HostPortPool var err error type nextfunc func() (int32, error) @@ -829,10 +813,10 @@ func (c *VirtualClusterInitController) GetHostPortNextFunc(virtualCluster *v1alp return next, nil } -func createApiAnpAgentSvc(name, namespace string, nameMap map[string]int) *corev1.Service { +func createAPIAnpAgentSvc(name, namespace string, nameMap map[string]int) *corev1.Service { apiAnpAgentSvc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: util.GetKonnectivityApiServerName(name), + Name: util.GetKonnectivityAPIServerName(name), Namespace: namespace, }, Spec: corev1.ServiceSpec{ @@ -858,12 +842,12 @@ func createApiAnpAgentSvc(name, namespace string, nameMap map[string]int) *corev func (c *VirtualClusterInitController) GetNodePorts(client kubernetes.Interface, virtualCluster *v1alpha1.VirtualCluster) ([]int32, error) { ports := make([]int32, 5) - ipFamilies := utils.IPFamilyGenerator(constants.ApiServerServiceSubnet) + ipFamilies := utils.IPFamilyGenerator(constants.APIServerServiceSubnet) name := virtualCluster.GetName() namespace := virtualCluster.GetNamespace() apiSvc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: util.GetApiServerName(name), + Name: util.GetAPIServerName(name), Namespace: namespace, }, Spec: corev1.ServiceSpec{ @@ -886,14 +870,14 @@ func (c *VirtualClusterInitController) GetNodePorts(client kubernetes.Interface, return nil, fmt.Errorf("can not create api svc for allocate port, error: %s", err) } - createdApiSvc, err := client.CoreV1().Services(namespace).Get(context.TODO(), apiSvc.GetName(), metav1.GetOptions{}) + createdAPISvc, err := client.CoreV1().Services(namespace).Get(context.TODO(), apiSvc.GetName(), metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("can not get api svc for allocate port, error: %s", err) } - nodePort := createdApiSvc.Spec.Ports[0].NodePort + nodePort := createdAPISvc.Spec.Ports[0].NodePort ports[0] = nodePort - apiAnpAgentSvc := createApiAnpAgentSvc(name, namespace, nameMap) + apiAnpAgentSvc := createAPIAnpAgentSvc(name, namespace, nameMap) err = util.CreateOrUpdateService(client, apiAnpAgentSvc) if err != nil { return nil, fmt.Errorf("can not create anp svc for allocate port, error: %s", err) @@ -945,7 +929,7 @@ func (c *VirtualClusterInitController) GetHostNetworkPorts(virtualCluster *v1alp // AllocateHostPort allocate host port for virtual cluster // #nosec G602 -func (c *VirtualClusterInitController) AllocateHostPort(virtualCluster *v1alpha1.VirtualCluster, kubeNestOptions *v1alpha1.KubeNestConfiguration) (int32, error) { +func (c *VirtualClusterInitController) AllocateHostPort(virtualCluster *v1alpha1.VirtualCluster, _ *v1alpha1.KubeNestConfiguration) (int32, error) { c.lock.Lock() defer c.lock.Unlock() if len(virtualCluster.Status.PortMap) > 0 || virtualCluster.Status.Port != 0 { @@ -955,7 +939,7 @@ func (c *VirtualClusterInitController) AllocateHostPort(virtualCluster *v1alpha1 var ports []int32 var err error - if virtualCluster.Spec.KubeInKubeConfig != nil && virtualCluster.Spec.KubeInKubeConfig.ApiServerServiceType == v1alpha1.NodePort { + if virtualCluster.Spec.KubeInKubeConfig != nil && virtualCluster.Spec.KubeInKubeConfig.APIServerServiceType == v1alpha1.NodePort { ports, err = c.GetNodePorts(c.RootClientSet, virtualCluster) } else { ports, err = c.GetHostNetworkPorts(virtualCluster) @@ -970,11 +954,11 @@ func (c *VirtualClusterInitController) AllocateHostPort(virtualCluster *v1alpha1 return 0, fmt.Errorf("no available ports to allocate") } virtualCluster.Status.PortMap = make(map[string]int32) - virtualCluster.Status.PortMap[constants.ApiServerPortKey] = ports[0] - virtualCluster.Status.PortMap[constants.ApiServerNetworkProxyAgentPortKey] = ports[1] - virtualCluster.Status.PortMap[constants.ApiServerNetworkProxyServerPortKey] = ports[2] - virtualCluster.Status.PortMap[constants.ApiServerNetworkProxyHealthPortKey] = ports[3] - virtualCluster.Status.PortMap[constants.ApiServerNetworkProxyAdminPortKey] = ports[4] + virtualCluster.Status.PortMap[constants.APIServerPortKey] = ports[0] + virtualCluster.Status.PortMap[constants.APIServerNetworkProxyAgentPortKey] = ports[1] + virtualCluster.Status.PortMap[constants.APIServerNetworkProxyServerPortKey] = ports[2] + virtualCluster.Status.PortMap[constants.APIServerNetworkProxyHealthPortKey] = ports[3] + virtualCluster.Status.PortMap[constants.APIServerNetworkProxyAdminPortKey] = ports[4] klog.V(4).InfoS("Success allocate virtual cluster ports", "allocate ports", ports, "vc ports", ports[:2]) @@ -982,6 +966,7 @@ func (c *VirtualClusterInitController) AllocateHostPort(virtualCluster *v1alpha1 } // AllocateVip allocate vip for virtual cluster +// nolint:revive // #nosec G602 func (c *VirtualClusterInitController) AllocateVip(virtualCluster *v1alpha1.VirtualCluster, vipPool *VipPool) error { c.lock.Lock() diff --git a/pkg/kubenest/controller/virtualcluster_init_controller_test.go b/pkg/kubenest/controller/virtualcluster_init_controller_test.go index 2310bd661..0e3a3b436 100644 --- a/pkg/kubenest/controller/virtualcluster_init_controller_test.go +++ b/pkg/kubenest/controller/virtualcluster_init_controller_test.go @@ -5,7 +5,7 @@ import ( "testing" ) -func TestNetxFunc(t *testing.T) { +func TestNetxFunc(_ *testing.T) { portsPool := []int32{1, 2, 3, 4, 5} type nextfunc func() (int32, error) // var next nextfunc @@ -28,33 +28,34 @@ func TestNetxFunc(t *testing.T) { func TestCreateApiAnpServer(t *testing.T) { var name, namespace string - apiAnpAgentSvc := createApiAnpAgentSvc(name, namespace, nameMap) + apiAnpAgentSvc := createAPIAnpAgentSvc(name, namespace, nameMap) if len(apiAnpAgentSvc.Spec.Ports) != 4 { t.Fatalf("apiAnpAgentSvc.Spec.Ports len != 4") } - if apiAnpAgentSvc.Spec.Ports[0].Name != "agentport" { - t.Fatalf("apiAnpAgentSvc.Spec.Ports[0].Name != agentport") + ports := make([]int32, 5) + for _, port := range apiAnpAgentSvc.Spec.Ports { + v, ok := nameMap[port.Name] + if ok { + ports[v] = port.Port + } else { + t.Fatalf("can not get node port for %s", port.Name) + } } - if apiAnpAgentSvc.Spec.Ports[0].Port != 8081 { + + if ports[1] != 8081 { t.Fatalf("apiAnpAgentSvc.Spec.Ports[0].Port != 8081") } - if apiAnpAgentSvc.Spec.Ports[1].Name != "serverport" { - t.Fatalf("apiAnpAgentSvc.Spec.Ports[1].Name != serverport") - } - if apiAnpAgentSvc.Spec.Ports[1].Port != 8082 { + + if ports[2] != 8082 { t.Fatalf("apiAnpAgentSvc.Spec.Ports[1].Port != 8082") } - if apiAnpAgentSvc.Spec.Ports[2].Name != "healthport" { - t.Fatalf("apiAnpAgentSvc.Spec.Ports[2].Name != healthport") - } - if apiAnpAgentSvc.Spec.Ports[2].Port != 8083 { + + if ports[3] != 8083 { t.Fatalf("apiAnpAgentSvc.Spec.Ports[2].Port != 8083") } - if apiAnpAgentSvc.Spec.Ports[3].Name != "adminport" { - t.Fatalf("apiAnpAgentSvc.Spec.Ports[3].Name != adminport") - } - if apiAnpAgentSvc.Spec.Ports[3].Port != 8084 { - t.Fatalf("apiAnpAgentSvc.Spec.Ports[3].Port != 8084") + + if ports[4] != 8084 { + t.Fatalf("apiAnpAgentSvc.Spec.Ports[2].Port != 8084") } } diff --git a/pkg/kubenest/controlplane/apiserver.go b/pkg/kubenest/controlplane/apiserver.go index a1affd1e3..ef763fa5a 100644 --- a/pkg/kubenest/controlplane/apiserver.go +++ b/pkg/kubenest/controlplane/apiserver.go @@ -22,7 +22,7 @@ func EnsureVirtualClusterAPIServer(client clientset.Interface, name, namespace s } func DeleteVirtualClusterAPIServer(client clientset.Interface, name, namespace string) error { - deployName := util.GetApiServerName(name) + deployName := util.GetAPIServerName(name) if err := util.DeleteDeployment(client, deployName, namespace); err != nil { return errors.Wrapf(err, "Failed to delete deployment %s/%s", deployName, namespace) } @@ -31,14 +31,14 @@ func DeleteVirtualClusterAPIServer(client clientset.Interface, name, namespace s func installAPIServer(client clientset.Interface, name, namespace string, portMap map[string]int32, kubeNestConfiguration *v1alpha1.KubeNestConfiguration, vc *v1alpha1.VirtualCluster) error { imageRepository, imageVersion := util.GetImageMessage() - clusterIp, err := util.GetEtcdServiceClusterIp(namespace, name+constants.EtcdSuffix, client) + clusterIP, err := util.GetEtcdServiceClusterIP(namespace, name+constants.EtcdSuffix, client) if err != nil { return nil } vclabel := util.GetVirtualControllerLabel() - IPV6FirstFlag, err := util.IPV6First(constants.ApiServerServiceSubnet) + IPV6FirstFlag, err := util.IPV6First(constants.APIServerServiceSubnet) if err != nil { return err } @@ -51,23 +51,23 @@ func installAPIServer(client clientset.Interface, name, namespace string, portMa ClusterPort int32 AdmissionPlugins bool IPV6First bool - UseApiServerNodePort bool + UseAPIServerNodePort bool }{ - DeploymentName: util.GetApiServerName(name), + DeploymentName: util.GetAPIServerName(name), Namespace: namespace, ImageRepository: imageRepository, Version: imageVersion, VirtualControllerLabel: vclabel, - EtcdClientService: clusterIp, - ServiceSubnet: constants.ApiServerServiceSubnet, + EtcdClientService: clusterIP, + ServiceSubnet: constants.APIServerServiceSubnet, VirtualClusterCertsSecret: util.GetCertName(name), EtcdCertsSecret: util.GetEtcdCertName(name), - Replicas: kubeNestConfiguration.KubeInKubeConfig.ApiServerReplicas, - EtcdListenClientPort: constants.ApiServerEtcdListenClientPort, - ClusterPort: portMap[constants.ApiServerPortKey], + Replicas: kubeNestConfiguration.KubeInKubeConfig.APIServerReplicas, + EtcdListenClientPort: constants.APIServerEtcdListenClientPort, + ClusterPort: portMap[constants.APIServerPortKey], IPV6First: IPV6FirstFlag, AdmissionPlugins: kubeNestConfiguration.KubeInKubeConfig.AdmissionPlugins, - UseApiServerNodePort: vc.Spec.KubeInKubeConfig != nil && vc.Spec.KubeInKubeConfig.ApiServerServiceType == v1alpha1.NodePort, + UseAPIServerNodePort: vc.Spec.KubeInKubeConfig != nil && vc.Spec.KubeInKubeConfig.APIServerServiceType == v1alpha1.NodePort, }) if err != nil { return fmt.Errorf("error when parsing virtual cluster apiserver deployment template: %w", err) diff --git a/pkg/kubenest/controlplane/component.go b/pkg/kubenest/controlplane/component.go index 71c248888..ab7fb33ae 100644 --- a/pkg/kubenest/controlplane/component.go +++ b/pkg/kubenest/controlplane/component.go @@ -116,7 +116,7 @@ func getKubeControllerManagerManifest(name, namespace, clusterCIDR string) (*app vclabel := util.GetVirtualControllerLabel() - IPV6FirstFlag, err := util.IPV6First(constants.ApiServerServiceSubnet) + IPV6FirstFlag, err := util.IPV6First(constants.APIServerServiceSubnet) if err != nil { return nil, err } @@ -139,7 +139,7 @@ func getKubeControllerManagerManifest(name, namespace, clusterCIDR string) (*app VirtualControllerLabel: vclabel, VirtualClusterCertsSecret: util.GetCertName(name), KubeconfigSecret: util.GetAdminConfigClusterIPSecretName(name), - ServiceSubnet: constants.ApiServerServiceSubnet, + ServiceSubnet: constants.APIServerServiceSubnet, PodSubnet: podSubnet, Replicas: constants.KubeControllerReplicas, IPV6First: IPV6FirstFlag, diff --git a/pkg/kubenest/controlplane/coredns/coredns.go b/pkg/kubenest/controlplane/coredns/coredns.go deleted file mode 100644 index 1265f1cc4..000000000 --- a/pkg/kubenest/controlplane/coredns/coredns.go +++ /dev/null @@ -1,153 +0,0 @@ -package coredns - -import ( - "fmt" - - "github.com/pkg/errors" - appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/dynamic" - clientset "k8s.io/client-go/kubernetes" - - "github.com/kosmos.io/kosmos/pkg/kubenest/manifest/controlplane/coredns/host" - "github.com/kosmos.io/kosmos/pkg/kubenest/manifest/controlplane/coredns/virtualcluster" - "github.com/kosmos.io/kosmos/pkg/kubenest/util" -) - -func EnsureHostCoreDns(client clientset.Interface, name, namespace string) error { - err := installCoreDnsConfigMap(client, namespace) - if err != nil { - return err - } - - err = EnsureCoreDnsRBAC(client, namespace, name) - if err != nil { - return err - } - - err = installCoreDnsDeployment(client, name, namespace) - if err != nil { - return err - } - return nil -} - -func EnsureVirtualClusterCoreDns(dynamicClient dynamic.Interface, templateMapping map[string]interface{}) error { - err := installCoreDnsEndpointsInVirtualCluster(dynamicClient, templateMapping) - if err != nil { - return err - } - - err = installCoreDnsServiceInVirtualCluster(dynamicClient, templateMapping) - if err != nil { - return err - } - return nil -} - -func installCoreDnsDeployment(client clientset.Interface, name, namespace string) error { - imageRepository, _ := util.GetImageMessage() - imageTag := util.GetCoreDnsImageTag() - coreDnsDeploymentBytes, err := util.ParseTemplate(host.CoreDnsDeployment, struct { - Namespace, Name, ImageRepository, CoreDNSImageTag string - }{ - Namespace: namespace, - Name: name, - ImageRepository: imageRepository, - CoreDNSImageTag: imageTag, - }) - if err != nil { - return fmt.Errorf("error when parsing core-dns deployment template: %w", err) - } - coreDnsDeployment := &appsv1.Deployment{} - if err := yaml.Unmarshal([]byte(coreDnsDeploymentBytes), coreDnsDeployment); err != nil { - return fmt.Errorf("error when decoding core-dns deployment: %w", err) - } - - if err := util.CreateOrUpdateDeployment(client, coreDnsDeployment); err != nil { - return fmt.Errorf("error when creating deployment for %s, err: %w", coreDnsDeployment.Name, err) - } - return nil -} - -func getCoreDnsConfigMapManifest(namespace string) (*v1.ConfigMap, error) { - coreDnsConfigMapBytes, err := util.ParseTemplate(host.CoreDnsCM, struct { - Namespace string - }{ - Namespace: namespace, - }) - if err != nil { - return nil, fmt.Errorf("error when parsing core-dns configMap template: %w", err) - } - - config := &v1.ConfigMap{} - if err := yaml.Unmarshal([]byte(coreDnsConfigMapBytes), config); err != nil { - return nil, fmt.Errorf("err when decoding core-dns configMap: %w", err) - } - - return config, nil -} - -func installCoreDnsConfigMap(client clientset.Interface, namespace string) error { - config, err := getCoreDnsConfigMapManifest(namespace) - if err != nil { - return err - } - - if err := util.CreateOrUpdateConfigMap(client, config); err != nil { - return fmt.Errorf("error when creating configMap for %s, err: %w", config.Name, err) - } - return nil -} - -func installCoreDnsServiceInVirtualCluster(dynamicClient dynamic.Interface, templateMapping map[string]interface{}) error { - coreDnsServiceInVcBytes, err := util.ParseTemplate(virtualcluster.CoreDnsService, templateMapping) - if err != nil { - return fmt.Errorf("error when parsing core-dns service in virtual cluster template: %w", err) - } - var obj unstructured.Unstructured - if err := yaml.Unmarshal([]byte(coreDnsServiceInVcBytes), &obj); err != nil { - return fmt.Errorf("err when decoding core-dns service in virtual cluster: %w", err) - } - - err = util.CreateObject(dynamicClient, obj.GetNamespace(), obj.GetName(), &obj) - if err != nil { - return fmt.Errorf("error when creating core-dns service in virtual cluster err: %w", err) - } - return nil -} - -func installCoreDnsEndpointsInVirtualCluster(dynamicClient dynamic.Interface, templateMapping map[string]interface{}) error { - coreDnsEndpointsInVcBytes, err := util.ParseTemplate(virtualcluster.CoreDnsEndpoints, templateMapping) - if err != nil { - return fmt.Errorf("error when parsing core-dns service in virtual cluster template: %w", err) - } - var obj unstructured.Unstructured - if err := yaml.Unmarshal([]byte(coreDnsEndpointsInVcBytes), &obj); err != nil { - return fmt.Errorf("err when decoding core-dns service in virtual cluster: %w", err) - } - - err = util.CreateObject(dynamicClient, obj.GetNamespace(), obj.GetName(), &obj) - if err != nil { - return fmt.Errorf("error when creating core-dns service in virtual cluster err: %w", err) - } - return nil -} - -func DeleteCoreDnsDeployment(client clientset.Interface, name, namespace string) error { - // delete deployment - deployName := fmt.Sprintf("%s-%s", name, "coredns") - if err := util.DeleteDeployment(client, deployName, namespace); err != nil { - return errors.Wrapf(err, "Failed to delete deployment %s/%s", deployName, namespace) - } - - // delete configmap - cmName := "coredns" - if err := util.DeleteConfigmap(client, cmName, namespace); err != nil { - return errors.Wrapf(err, "Failed to delete configmap %s/%s", cmName, namespace) - } - - return nil -} diff --git a/pkg/kubenest/controlplane/coredns/rbac.go b/pkg/kubenest/controlplane/coredns/rbac.go deleted file mode 100644 index 8e4d34e3f..000000000 --- a/pkg/kubenest/controlplane/coredns/rbac.go +++ /dev/null @@ -1,77 +0,0 @@ -package coredns - -import ( - "fmt" - - v1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/util/yaml" - clientset "k8s.io/client-go/kubernetes" - - "github.com/kosmos.io/kosmos/pkg/kubenest/manifest/controlplane/coredns/host" - "github.com/kosmos.io/kosmos/pkg/kubenest/util" -) - -func EnsureCoreDnsRBAC(client clientset.Interface, namespace string, name string) error { - if err := grantCoreDnsClusterSA(client, namespace); err != nil { - return err - } - if err := grantCoreDnsClusterRoleBinding(client, namespace, name); err != nil { - return err - } - if err := grantCoreDnsClusterRole(client, name); err != nil { - return err - } - return nil -} - -func grantCoreDnsClusterSA(client clientset.Interface, namespace string) error { - coreDnsClusterSABytes, err := util.ParseTemplate(host.CoreDnsSA, struct { - Namespace string - }{ - Namespace: namespace, - }) - if err != nil { - return fmt.Errorf("error when parsing core-dns sa template: %w", err) - } - serviceAccount := &v1.ServiceAccount{} - if err := yaml.Unmarshal([]byte(coreDnsClusterSABytes), serviceAccount); err != nil { - return fmt.Errorf("err when decoding core-dns view Clusterrole: %w", err) - } - return util.CreateOrUpdateClusterSA(client, serviceAccount, namespace) -} - -func grantCoreDnsClusterRoleBinding(client clientset.Interface, namespace string, name string) error { - coreDnsClusterRoleBindingBytes, err := util.ParseTemplate(host.CoreDnsClusterRoleBinding, struct { - Name string - Namespace string - }{ - Name: name, - Namespace: namespace, - }) - if err != nil { - return fmt.Errorf("error when parsing core-dns role binding template: %w", err) - } - viewClusterRoleBinding := &rbacv1.ClusterRoleBinding{} - - if err := yaml.Unmarshal([]byte(coreDnsClusterRoleBindingBytes), viewClusterRoleBinding); err != nil { - return fmt.Errorf("err when decoding core-dns Clusterrole Binding: %w", err) - } - return util.CreateOrUpdateClusterRoleBinding(client, viewClusterRoleBinding) -} - -func grantCoreDnsClusterRole(client clientset.Interface, name string) error { - viewClusterRole := &rbacv1.ClusterRole{} - coreDnsClusterRoleBytes, err := util.ParseTemplate(host.CoreDnsClusterRole, struct { - Name string - }{ - Name: name, - }) - if err != nil { - return fmt.Errorf("error when parsing core-dns cluster role template: %w", err) - } - if err := yaml.Unmarshal([]byte(coreDnsClusterRoleBytes), viewClusterRole); err != nil { - return fmt.Errorf("err when decoding core-dns Clusterrole: %w", err) - } - return util.CreateOrUpdateClusterRole(client, viewClusterRole) -} diff --git a/pkg/kubenest/controlplane/endpoint.go b/pkg/kubenest/controlplane/endpoint.go index 0c7589e6f..2a815781a 100644 --- a/pkg/kubenest/controlplane/endpoint.go +++ b/pkg/kubenest/controlplane/endpoint.go @@ -17,20 +17,20 @@ import ( "github.com/kosmos.io/kosmos/pkg/kubenest/util" ) -func EnsureApiServerExternalEndPoint(kubeClient kubernetes.Interface) error { - err := CreateOrUpdateApiServerExternalEndpoint(kubeClient) +func EnsureAPIServerExternalEndPoint(kubeClient kubernetes.Interface) error { + err := CreateOrUpdateAPIServerExternalEndpoint(kubeClient) if err != nil { return err } - err = CreateOrUpdateApiServerExternalService(kubeClient) + err = CreateOrUpdateAPIServerExternalService(kubeClient) if err != nil { return err } return nil } -func CreateOrUpdateApiServerExternalEndpoint(kubeClient kubernetes.Interface) error { +func CreateOrUpdateAPIServerExternalEndpoint(kubeClient kubernetes.Interface) error { klog.V(4).Info("begin to get kubernetes endpoint") kubeEndpoint, err := kubeClient.CoreV1().Endpoints(constants.DefaultNs).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) if err != nil { @@ -40,10 +40,20 @@ func CreateOrUpdateApiServerExternalEndpoint(kubeClient kubernetes.Interface) er klog.V(4).Info("the Kubernetes endpoint is:", kubeEndpoint) newEndpoint := kubeEndpoint.DeepCopy() - newEndpoint.Name = constants.ApiServerExternalService + newEndpoint.Name = constants.APIServerExternalService newEndpoint.Namespace = constants.DefaultNs newEndpoint.ResourceVersion = "" + // Reconstruct the Ports without the 'name' field + for i := range newEndpoint.Subsets { + for j := range newEndpoint.Subsets[i].Ports { + newEndpoint.Subsets[i].Ports[j] = corev1.EndpointPort{ + Port: newEndpoint.Subsets[i].Ports[j].Port, + Protocol: newEndpoint.Subsets[i].Ports[j].Protocol, + } + } + } + // Try to create the endpoint _, err = kubeClient.CoreV1().Endpoints(constants.DefaultNs).Create(context.TODO(), newEndpoint, metav1.CreateOptions{}) if err != nil { @@ -53,7 +63,7 @@ func CreateOrUpdateApiServerExternalEndpoint(kubeClient kubernetes.Interface) er } // Endpoint already exists, retrieve it - existingEndpoint, err := kubeClient.CoreV1().Endpoints(constants.DefaultNs).Get(context.TODO(), constants.ApiServerExternalService, metav1.GetOptions{}) + existingEndpoint, err := kubeClient.CoreV1().Endpoints(constants.DefaultNs).Get(context.TODO(), constants.APIServerExternalService, metav1.GetOptions{}) if err != nil { klog.Error("get existing api-server-external-service endpoint failed", err) return errors.Wrap(err, "failed to get existing api-server-external-service endpoint") @@ -66,9 +76,8 @@ func CreateOrUpdateApiServerExternalEndpoint(kubeClient kubernetes.Interface) er if err != nil { klog.Error("update api-server-external-service endpoint failed", err) return errors.Wrap(err, "failed to update api-server-external-service endpoint") - } else { - klog.V(4).Info("successfully updated api-server-external-service endpoint") } + klog.V(4).Info("successfully updated api-server-external-service endpoint") } else { klog.V(4).Info("successfully created api-server-external-service endpoint") } @@ -76,12 +85,12 @@ func CreateOrUpdateApiServerExternalEndpoint(kubeClient kubernetes.Interface) er return nil } -func CreateOrUpdateApiServerExternalService(kubeClient kubernetes.Interface) error { +func CreateOrUpdateAPIServerExternalService(kubeClient kubernetes.Interface) error { port, err := getEndPointPort(kubeClient) if err != nil { return fmt.Errorf("error when getEndPointPort: %w", err) } - apiServerExternalServiceBytes, err := util.ParseTemplate(virtualcluster.ApiServerExternalService, struct { + apiServerExternalServiceBytes, err := util.ParseTemplate(virtualcluster.APIServerExternalService, struct { ServicePort int32 }{ ServicePort: port, @@ -94,7 +103,7 @@ func CreateOrUpdateApiServerExternalService(kubeClient kubernetes.Interface) err if err := yaml.Unmarshal([]byte(apiServerExternalServiceBytes), &svc); err != nil { return fmt.Errorf("err when decoding api-server-external-service in virtual cluster: %w", err) } - _, err = kubeClient.CoreV1().Services(constants.DefaultNs).Get(context.TODO(), constants.ApiServerExternalService, metav1.GetOptions{}) + _, err = kubeClient.CoreV1().Services(constants.DefaultNs).Get(context.TODO(), constants.APIServerExternalService, metav1.GetOptions{}) if err != nil { if !apierrors.IsNotFound(err) { // Try to create the service @@ -110,7 +119,7 @@ func CreateOrUpdateApiServerExternalService(kubeClient kubernetes.Interface) err func getEndPointPort(kubeClient kubernetes.Interface) (int32, error) { klog.V(4).Info("begin to get Endpoints ports...") - endpoints, err := kubeClient.CoreV1().Endpoints(constants.DefaultNs).Get(context.TODO(), constants.ApiServerExternalService, metav1.GetOptions{}) + endpoints, err := kubeClient.CoreV1().Endpoints(constants.DefaultNs).Get(context.TODO(), constants.APIServerExternalService, metav1.GetOptions{}) if err != nil { klog.Errorf("get Endpoints failed: %v", err) return 0, err diff --git a/pkg/kubenest/controlplane/etcd.go b/pkg/kubenest/controlplane/etcd.go index 453365e5e..3ce484a30 100644 --- a/pkg/kubenest/controlplane/etcd.go +++ b/pkg/kubenest/controlplane/etcd.go @@ -19,10 +19,7 @@ import ( ) func EnsureVirtualClusterEtcd(client clientset.Interface, name, namespace string, kubeNestConfiguration *v1alpha1.KubeNestConfiguration, vc *v1alpha1.VirtualCluster) error { - if err := installEtcd(client, name, namespace, kubeNestConfiguration, vc); err != nil { - return err - } - return nil + return installEtcd(client, name, namespace, kubeNestConfiguration, vc) } func DeleteVirtualClusterEtcd(client clientset.Interface, name, namespace string) error { @@ -33,16 +30,34 @@ func DeleteVirtualClusterEtcd(client clientset.Interface, name, namespace string return nil } +// nolint:revive func installEtcd(client clientset.Interface, name, namespace string, kubeNestConfiguration *v1alpha1.KubeNestConfiguration, vc *v1alpha1.VirtualCluster) error { imageRepository, imageVersion := util.GetImageMessage() - nodeCount := getNodeCountFromPromotePolicy(vc) - resourceQuantity, err := resource.ParseQuantity(kubeNestConfiguration.KubeInKubeConfig.ETCDUnitSize) - if err != nil { - klog.Errorf("Failed to parse quantity %s: %v", kubeNestConfiguration.KubeInKubeConfig.ETCDUnitSize, err) - return err + var resourceQuantity resource.Quantity + var err error + + if vc.Spec.KubeInKubeConfig.ETCDUnitSize != "" { + resourceQuantity, err = resource.ParseQuantity(vc.Spec.KubeInKubeConfig.ETCDUnitSize) + if err != nil { + klog.Errorf("Failed to parse etcdSize %s: %v", vc.Spec.KubeInKubeConfig.ETCDUnitSize, err) + return err + } + if resourceQuantity.Value() <= 0 { + klog.Errorf("Invalid vc.Spec.KubeInKubeConfig.ETCDUnitSize: must be greater than zero") + return err + } + resourceQuantity.Set(resourceQuantity.Value()) + } else { + nodeCount := getNodeCountFromPromotePolicy(vc) + resourceQuantity, err = resource.ParseQuantity(kubeNestConfiguration.KubeInKubeConfig.ETCDUnitSize) + if err != nil { + klog.Errorf("Failed to parse quantity %s: %v", kubeNestConfiguration.KubeInKubeConfig.ETCDUnitSize, err) + return err + } + resourceQuantity.Set(resourceQuantity.Value() * int64(nodeCount)) + } - resourceQuantity.Set(resourceQuantity.Value() * int64(nodeCount)) initialClusters := make([]string, constants.EtcdReplicas) for index := range initialClusters { @@ -57,10 +72,8 @@ func installEtcd(client clientset.Interface, name, namespace string, kubeNestCon initialClusters[index] = fmt.Sprintf("%s=%s", memberName, memberPeerURL) } - vclabel := util.GetVirtualControllerLabel() - - IPV6FirstFlag, err := util.IPV6First(constants.ApiServerServiceSubnet) + IPV6FirstFlag, err := util.IPV6First(constants.APIServerServiceSubnet) if err != nil { return err } diff --git a/pkg/kubenest/controlplane/rbac.go b/pkg/kubenest/controlplane/rbac.go deleted file mode 100644 index 9d344ab3e..000000000 --- a/pkg/kubenest/controlplane/rbac.go +++ /dev/null @@ -1,67 +0,0 @@ -package controlplane - -import ( - "fmt" - - v1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/util/yaml" - clientset "k8s.io/client-go/kubernetes" - - "github.com/kosmos.io/kosmos/pkg/kubenest/manifest/controlplane/scheduler" - "github.com/kosmos.io/kosmos/pkg/kubenest/util" -) - -func EnsureVirtualSchedulerRBAC(client clientset.Interface, namespace string) error { - if err := grantVirtualClusterResourceClusterSA(client, namespace); err != nil { - return err - } - if err := grantVirtualClusterResourceClusterRoleBinding(client, namespace); err != nil { - return err - } - if err := grantVirtualClusterResourceClusterRole(client); err != nil { - return err - } - return nil -} - -func grantVirtualClusterResourceClusterSA(client clientset.Interface, namespace string) error { - virtualClusterResourceClusterSABytes, err := util.ParseTemplate(scheduler.VirtualSchedulerSA, struct { - Namespace string - }{ - Namespace: namespace, - }) - if err != nil { - return fmt.Errorf("error when parsing virtualCluster-scheduler sa template: %w", err) - } - serviceAccount := &v1.ServiceAccount{} - if err := yaml.Unmarshal([]byte(virtualClusterResourceClusterSABytes), serviceAccount); err != nil { - return fmt.Errorf("err when decoding Karmada view Clusterrole: %w", err) - } - return util.CreateOrUpdateClusterSA(client, serviceAccount, namespace) -} - -func grantVirtualClusterResourceClusterRoleBinding(client clientset.Interface, namespace string) error { - virtualClusterResourceClusterRoleBindingBytes, err := util.ParseTemplate(scheduler.VirtualSchedulerRoleBinding, struct { - Namespace string - }{ - Namespace: namespace, - }) - if err != nil { - return fmt.Errorf("error when parsing virtualCluster-scheduler role binding template: %w", err) - } - viewClusterRoleBinding := &rbacv1.ClusterRoleBinding{} - - if err := yaml.Unmarshal([]byte(virtualClusterResourceClusterRoleBindingBytes), viewClusterRoleBinding); err != nil { - return fmt.Errorf("err when decoding virtualCluster scheduler Clusterrole Binding: %w", err) - } - return util.CreateOrUpdateClusterRoleBinding(client, viewClusterRoleBinding) -} - -func grantVirtualClusterResourceClusterRole(client clientset.Interface) error { - viewClusterrole := &rbacv1.ClusterRole{} - if err := yaml.Unmarshal([]byte(scheduler.VirtualSchedulerRole), viewClusterrole); err != nil { - return fmt.Errorf("err when decoding virtualCluster scheduler Clusterrole: %w", err) - } - return util.CreateOrUpdateClusterRole(client, viewClusterrole) -} diff --git a/pkg/kubenest/controlplane/service.go b/pkg/kubenest/controlplane/service.go index a252f6241..ce1efcccc 100644 --- a/pkg/kubenest/controlplane/service.go +++ b/pkg/kubenest/controlplane/service.go @@ -30,12 +30,12 @@ func EnsureVirtualClusterService(client clientset.Interface, name, namespace str func DeleteVirtualClusterService(client clientset.Interface, name, namespace string) error { services := []string{ - util.GetApiServerName(name), + util.GetAPIServerName(name), util.GetEtcdServerName(name), util.GetEtcdClientServerName(name), "kube-dns", util.GetKonnectivityServerName(name), - util.GetKonnectivityApiServerName(name), + util.GetKonnectivityAPIServerName(name), } for _, service := range services { err := client.CoreV1().Services(namespace).Delete(context.TODO(), service, metav1.DeleteOptions{}) @@ -53,19 +53,19 @@ func DeleteVirtualClusterService(client clientset.Interface, name, namespace str } func createServerService(client clientset.Interface, name, namespace string, portMap map[string]int32, _ *v1alpha1.KubeNestConfiguration, vc *v1alpha1.VirtualCluster) error { - ipFamilies := utils.IPFamilyGenerator(constants.ApiServerServiceSubnet) + ipFamilies := utils.IPFamilyGenerator(constants.APIServerServiceSubnet) apiserverServiceBytes, err := util.ParseTemplate(apiserver.ApiserverService, struct { ServiceName, Namespace, ServiceType string ServicePort int32 IPFamilies []corev1.IPFamily - UseApiServerNodePort bool + UseAPIServerNodePort bool }{ - ServiceName: util.GetApiServerName(name), + ServiceName: util.GetAPIServerName(name), Namespace: namespace, - ServiceType: constants.ApiServerServiceType, - ServicePort: portMap[constants.ApiServerPortKey], + ServiceType: constants.APIServerServiceType, + ServicePort: portMap[constants.APIServerPortKey], IPFamilies: ipFamilies, - UseApiServerNodePort: vc.Spec.KubeInKubeConfig != nil && vc.Spec.KubeInKubeConfig.ApiServerServiceType == v1alpha1.NodePort, + UseAPIServerNodePort: vc.Spec.KubeInKubeConfig != nil && vc.Spec.KubeInKubeConfig.APIServerServiceType == v1alpha1.NodePort, }) if err != nil { return fmt.Errorf("error when parsing virtualClusterApiserver serive template: %w", err) @@ -76,7 +76,7 @@ func createServerService(client clientset.Interface, name, namespace string, por }{ ServiceName: util.GetKonnectivityServerName(name), Namespace: namespace, - ProxyServerPort: portMap[constants.ApiServerNetworkProxyServerPortKey], + ProxyServerPort: portMap[constants.APIServerNetworkProxyServerPortKey], }) if err != nil { return fmt.Errorf("error when parsing virtualClusterApiserver anp service template: %w", err) @@ -143,7 +143,7 @@ func createServerService(client clientset.Interface, name, namespace string, por } //core-dns service - coreDnsServiceBytes, err := util.ParseTemplate(host.CoreDnsService, struct { + coreDNSServiceBytes, err := util.ParseTemplate(host.CoreDNSService, struct { Namespace string }{ Namespace: namespace, @@ -152,12 +152,12 @@ func createServerService(client clientset.Interface, name, namespace string, por return fmt.Errorf("error when parsing core-dns serive template: %w", err) } - coreDnsService := &corev1.Service{} - if err := yaml.Unmarshal([]byte(coreDnsServiceBytes), coreDnsService); err != nil { + coreDNSService := &corev1.Service{} + if err := yaml.Unmarshal([]byte(coreDNSServiceBytes), coreDNSService); err != nil { return fmt.Errorf("err when decoding core-dns service: %w", err) } - if err := util.CreateOrUpdateService(client, coreDnsService); err != nil { + if err := util.CreateOrUpdateService(client, coreDNSService); err != nil { return fmt.Errorf("err when creating core-dns service, err: %w", err) } diff --git a/pkg/kubenest/init.go b/pkg/kubenest/init.go index 0d4446cd2..26f2a7129 100644 --- a/pkg/kubenest/init.go +++ b/pkg/kubenest/init.go @@ -170,9 +170,9 @@ func newRunData(opt *InitOptions) (*initData, error) { if err != nil { return nil, fmt.Errorf("failed to get a valid node IP for APIServer, err: %w", err) } - var clusterIps []string - err, clusterIp := util.GetAPIServiceClusterIp(opt.Namespace, remoteClient) - clusterIps = append(clusterIps, clusterIp) + var clusterIPs []string + clusterIP, err := util.GetAPIServiceClusterIP(opt.Namespace, remoteClient) + clusterIPs = append(clusterIPs, clusterIP) if err != nil { return nil, fmt.Errorf("failed to get APIServer Service-ClusterIp, err: %w", err) } @@ -181,7 +181,7 @@ func newRunData(opt *InitOptions) (*initData, error) { namespace: opt.Namespace, virtualClusterVersion: version, controlplaneAddr: address, - clusterIps: clusterIps, + clusterIps: clusterIPs, remoteClient: remoteClient, dynamicClient: dynamicClient, kosmosClient: kosmosClient, @@ -228,8 +228,8 @@ func (i initData) ControlplaneAddress() string { return i.controlplaneAddr } -func (i initData) ServiceClusterIp() []string { - err, clusterIps := util.GetServiceClusterIp(i.namespace, i.remoteClient) +func (i initData) ServiceClusterIP() []string { + clusterIps, err := util.GetServiceClusterIP(i.namespace, i.remoteClient) if err != nil { return nil } diff --git a/pkg/kubenest/manifest/controlplane/apiserver/mainfests_deployment.go b/pkg/kubenest/manifest/controlplane/apiserver/mainfests_deployment.go index 3c3dfec2d..e0b8ab8c4 100644 --- a/pkg/kubenest/manifest/controlplane/apiserver/mainfests_deployment.go +++ b/pkg/kubenest/manifest/controlplane/apiserver/mainfests_deployment.go @@ -21,7 +21,7 @@ spec: virtualCluster-app: apiserver spec: automountServiceAccountToken: false - {{ if not .UseApiServerNodePort }} + {{ if not .UseAPIServerNodePort }} hostNetwork: true {{ end }} dnsPolicy: ClusterFirstWithHostNet @@ -56,7 +56,7 @@ spec: image: {{ .ImageRepository }}/kube-apiserver:{{ .Version }} imagePullPolicy: IfNotPresent env: - {{ if .UseApiServerNodePort }} + {{ if .UseAPIServerNodePort }} - name: HOSTIP valueFrom: fieldRef: @@ -107,7 +107,7 @@ spec: - --max-requests-inflight=1500 - --max-mutating-requests-inflight=500 - --v=4 - {{ if .UseApiServerNodePort }} + {{ if .UseAPIServerNodePort }} - --advertise-address=$(HOSTIP) {{ else }} - --advertise-address=$(PODIP) @@ -177,7 +177,7 @@ spec: virtualCluster-anp: apiserver-anp spec: automountServiceAccountToken: false - {{ if not .UseApiServerNodePort }} + {{ if not .UseAPIServerNodePort }} hostNetwork: true {{ end }} dnsPolicy: ClusterFirstWithHostNet diff --git a/pkg/kubenest/manifest/controlplane/apiserver/mainfests_service.go b/pkg/kubenest/manifest/controlplane/apiserver/mainfests_service.go index 8bb31367c..e2dd017f2 100644 --- a/pkg/kubenest/manifest/controlplane/apiserver/mainfests_service.go +++ b/pkg/kubenest/manifest/controlplane/apiserver/mainfests_service.go @@ -20,7 +20,7 @@ spec: port: {{ .ServicePort }} protocol: TCP targetPort: {{ .ServicePort }} - {{ if .UseApiServerNodePort }} + {{ if .UseAPIServerNodePort }} nodePort: {{ .ServicePort }} {{ end }} selector: diff --git a/pkg/kubenest/manifest/controlplane/apiserver/mainfests_service_test.go b/pkg/kubenest/manifest/controlplane/apiserver/mainfests_service_test.go index 1290184fd..3b0b48969 100644 --- a/pkg/kubenest/manifest/controlplane/apiserver/mainfests_service_test.go +++ b/pkg/kubenest/manifest/controlplane/apiserver/mainfests_service_test.go @@ -18,12 +18,14 @@ func ParseServerTemplate(apiServerServiceSubnet string) (*corev1.Service, error) ServiceName, Namespace, ServiceType string ServicePort int32 IPFamilies []corev1.IPFamily + UseAPIServerNodePort bool }{ - ServiceName: fmt.Sprintf("%s-%s", "test", "apiserver"), - Namespace: "test-namespace", - ServiceType: constants.ApiServerServiceType, - ServicePort: 40010, - IPFamilies: ipFamilies, + ServiceName: fmt.Sprintf("%s-%s", "test", "apiserver"), + Namespace: "test-namespace", + ServiceType: constants.APIServerServiceType, + ServicePort: 40010, + IPFamilies: ipFamilies, + UseAPIServerNodePort: false, }) if err != nil { diff --git a/pkg/kubenest/manifest/controlplane/coredns/host/manifest_configmap.go b/pkg/kubenest/manifest/controlplane/coredns/host/manifest_configmap.go index ea733a7a9..7156773a6 100644 --- a/pkg/kubenest/manifest/controlplane/coredns/host/manifest_configmap.go +++ b/pkg/kubenest/manifest/controlplane/coredns/host/manifest_configmap.go @@ -1,7 +1,7 @@ package host const ( - CoreDnsCM = ` + CoreDNSCM = ` apiVersion: v1 data: Corefile: | diff --git a/pkg/kubenest/manifest/controlplane/coredns/host/manifest_deployment.go b/pkg/kubenest/manifest/controlplane/coredns/host/manifest_deployment.go index 4843463ba..b0b7229f8 100644 --- a/pkg/kubenest/manifest/controlplane/coredns/host/manifest_deployment.go +++ b/pkg/kubenest/manifest/controlplane/coredns/host/manifest_deployment.go @@ -1,7 +1,7 @@ package host const ( - CoreDnsDeployment = ` + CoreDNSDeployment = ` apiVersion: apps/v1 kind: Deployment metadata: diff --git a/pkg/kubenest/manifest/controlplane/coredns/host/manifest_service.go b/pkg/kubenest/manifest/controlplane/coredns/host/manifest_service.go index c704f5a2a..404f4010f 100644 --- a/pkg/kubenest/manifest/controlplane/coredns/host/manifest_service.go +++ b/pkg/kubenest/manifest/controlplane/coredns/host/manifest_service.go @@ -1,7 +1,7 @@ package host const ( - CoreDnsService = ` + CoreDNSService = ` apiVersion: v1 kind: Service metadata: diff --git a/pkg/kubenest/manifest/controlplane/coredns/host/manifests_rbac.go b/pkg/kubenest/manifest/controlplane/coredns/host/manifests_rbac.go index fbd3072d7..f04ed7da7 100644 --- a/pkg/kubenest/manifest/controlplane/coredns/host/manifests_rbac.go +++ b/pkg/kubenest/manifest/controlplane/coredns/host/manifests_rbac.go @@ -1,7 +1,7 @@ package host const ( - CoreDnsSA = ` + CoreDNSSA = ` apiVersion: v1 kind: ServiceAccount metadata: @@ -9,7 +9,7 @@ metadata: namespace: {{ .Namespace }} ` - CoreDnsClusterRoleBinding = ` + CoreDNSClusterRoleBinding = ` apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: @@ -24,7 +24,7 @@ subjects: namespace: {{ .Namespace }} ` - CoreDnsClusterRole = ` + CoreDNSClusterRole = ` apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: diff --git a/pkg/kubenest/manifest/controlplane/coredns/virtualcluster/manifest_endpoints.go b/pkg/kubenest/manifest/controlplane/coredns/virtualcluster/manifest_endpoints.go index b9ac2de84..d11b89c5d 100644 --- a/pkg/kubenest/manifest/controlplane/coredns/virtualcluster/manifest_endpoints.go +++ b/pkg/kubenest/manifest/controlplane/coredns/virtualcluster/manifest_endpoints.go @@ -1,7 +1,7 @@ package virtualcluster const ( - CoreDnsEndpoints = ` + CoreDNSEndpoints = ` apiVersion: v1 kind: Endpoints metadata: diff --git a/pkg/kubenest/manifest/controlplane/coredns/virtualcluster/manifest_service.go b/pkg/kubenest/manifest/controlplane/coredns/virtualcluster/manifest_service.go index 29fd8b482..3f5df06d4 100644 --- a/pkg/kubenest/manifest/controlplane/coredns/virtualcluster/manifest_service.go +++ b/pkg/kubenest/manifest/controlplane/coredns/virtualcluster/manifest_service.go @@ -1,7 +1,7 @@ package virtualcluster const ( - CoreDnsService = ` + CoreDNSService = ` apiVersion: v1 kind: Service metadata: diff --git a/pkg/kubenest/manifest/controlplane/virtualcluster/manifests_service.go b/pkg/kubenest/manifest/controlplane/virtualcluster/manifests_service.go index 1ac24b81d..614fbd753 100644 --- a/pkg/kubenest/manifest/controlplane/virtualcluster/manifests_service.go +++ b/pkg/kubenest/manifest/controlplane/virtualcluster/manifests_service.go @@ -1,7 +1,7 @@ package virtualcluster const ( - ApiServerExternalService = ` + APIServerExternalService = ` apiVersion: v1 kind: Service metadata: diff --git a/pkg/kubenest/tasks/anp.go b/pkg/kubenest/tasks/anp.go index eb4e675f4..66a95a935 100644 --- a/pkg/kubenest/tasks/anp.go +++ b/pkg/kubenest/tasks/anp.go @@ -79,7 +79,7 @@ func runAnpServer(r workflow.RunData) error { }{ Namespace: namespace, Name: name, - ProxyServerPort: portMap[constants.ApiServerNetworkProxyServerPortKey], + ProxyServerPort: portMap[constants.APIServerNetworkProxyServerPortKey], SvcName: fmt.Sprintf("%s-konnectivity-server.%s.svc.cluster.local", name, namespace), AnpMode: kubeNestOpt.KubeInKubeConfig.AnpMode, }) @@ -145,12 +145,12 @@ func uninstallAnp(r workflow.RunData) error { } func installAnpServer(client clientset.Interface, name, namespace string, portMap map[string]int32, kubeNestConfiguration *v1alpha1.KubeNestConfiguration, vc *v1alpha1.VirtualCluster) error { imageRepository, imageVersion := util.GetImageMessage() - clusterIp, err := util.GetEtcdServiceClusterIp(namespace, name+constants.EtcdSuffix, client) + clusterIP, err := util.GetEtcdServiceClusterIP(namespace, name+constants.EtcdSuffix, client) if err != nil { return nil } - IPV6FirstFlag, err := util.IPV6First(constants.ApiServerServiceSubnet) + IPV6FirstFlag, err := util.IPV6First(constants.APIServerServiceSubnet) if err != nil { return err } @@ -172,30 +172,30 @@ func installAnpServer(client clientset.Interface, name, namespace string, portMa AnpMode string AdmissionPlugins bool IPV6First bool - UseApiServerNodePort bool + UseAPIServerNodePort bool }{ DeploymentName: util.GetApiServerAnpName(name), Namespace: namespace, ImageRepository: imageRepository, Version: imageVersion, VirtualControllerLabel: vclabel, - EtcdClientService: clusterIp, - ServiceSubnet: constants.ApiServerServiceSubnet, + EtcdClientService: clusterIP, + ServiceSubnet: constants.APIServerServiceSubnet, VirtualClusterCertsSecret: util.GetCertName(name), EtcdCertsSecret: util.GetEtcdCertName(name), - Replicas: kubeNestConfiguration.KubeInKubeConfig.ApiServerReplicas, - EtcdListenClientPort: constants.ApiServerEtcdListenClientPort, - ClusterPort: portMap[constants.ApiServerPortKey], - AgentPort: portMap[constants.ApiServerNetworkProxyAgentPortKey], - ServerPort: portMap[constants.ApiServerNetworkProxyServerPortKey], - HealthPort: portMap[constants.ApiServerNetworkProxyHealthPortKey], - AdminPort: portMap[constants.ApiServerNetworkProxyAdminPortKey], + Replicas: kubeNestConfiguration.KubeInKubeConfig.APIServerReplicas, + EtcdListenClientPort: constants.APIServerEtcdListenClientPort, + ClusterPort: portMap[constants.APIServerPortKey], + AgentPort: portMap[constants.APIServerNetworkProxyAgentPortKey], + ServerPort: portMap[constants.APIServerNetworkProxyServerPortKey], + HealthPort: portMap[constants.APIServerNetworkProxyHealthPortKey], + AdminPort: portMap[constants.APIServerNetworkProxyAdminPortKey], KubeconfigSecret: util.GetAdminConfigClusterIPSecretName(name), Name: name, AnpMode: kubeNestConfiguration.KubeInKubeConfig.AnpMode, AdmissionPlugins: kubeNestConfiguration.KubeInKubeConfig.AdmissionPlugins, IPV6First: IPV6FirstFlag, - UseApiServerNodePort: vc.Spec.KubeInKubeConfig != nil && vc.Spec.KubeInKubeConfig.ApiServerServiceType == v1alpha1.NodePort, + UseAPIServerNodePort: vc.Spec.KubeInKubeConfig != nil && vc.Spec.KubeInKubeConfig.APIServerServiceType == v1alpha1.NodePort, }) if err != nil { return fmt.Errorf("error when parsing virtual cluster apiserver deployment template: %w", err) @@ -211,7 +211,7 @@ func installAnpServer(client clientset.Interface, name, namespace string, portMa return fmt.Errorf("error when creating deployment for %s, err: %w", apiserverDeployment.Name, err) } - if vc.Spec.KubeInKubeConfig != nil && vc.Spec.KubeInKubeConfig.ApiServerServiceType == v1alpha1.NodePort { + if vc.Spec.KubeInKubeConfig != nil && vc.Spec.KubeInKubeConfig.APIServerServiceType == v1alpha1.NodePort { apiserverServiceBytes, err := util.ParseTemplate(apiserver.ApiserverAnpAgentService, struct { SVCName, Namespace string ClusterPort int32 @@ -220,13 +220,13 @@ func installAnpServer(client clientset.Interface, name, namespace string, portMa HealthPort int32 AdminPort int32 }{ - SVCName: util.GetKonnectivityApiServerName(name), + SVCName: util.GetKonnectivityAPIServerName(name), Namespace: namespace, - ClusterPort: portMap[constants.ApiServerPortKey], - AgentPort: portMap[constants.ApiServerNetworkProxyAgentPortKey], - ServerPort: portMap[constants.ApiServerNetworkProxyServerPortKey], - HealthPort: portMap[constants.ApiServerNetworkProxyHealthPortKey], - AdminPort: portMap[constants.ApiServerNetworkProxyAdminPortKey], + ClusterPort: portMap[constants.APIServerPortKey], + AgentPort: portMap[constants.APIServerNetworkProxyAgentPortKey], + ServerPort: portMap[constants.APIServerNetworkProxyServerPortKey], + HealthPort: portMap[constants.APIServerNetworkProxyHealthPortKey], + AdminPort: portMap[constants.APIServerNetworkProxyAdminPortKey], }) if err != nil { return fmt.Errorf("error when parsing virtual cluster apiserver svc template: %w", err) @@ -269,10 +269,10 @@ func getAnpAgentManifest(client clientset.Interface, name string, namespace stri // get apiServer hostIp var proxyServerHost []string var err error - if vc.Spec.KubeInKubeConfig != nil && vc.Spec.KubeInKubeConfig.ApiServerServiceType == v1alpha1.NodePort { - proxyServerHost, err = getDeploymentHostIPs(client, namespace, util.GetApiServerName(name)) + if vc.Spec.KubeInKubeConfig != nil && vc.Spec.KubeInKubeConfig.APIServerServiceType == v1alpha1.NodePort { + proxyServerHost, err = getDeploymentHostIPs(client, namespace, util.GetAPIServerName(name)) } else { - proxyServerHost, err = getDeploymentPodIPs(client, namespace, util.GetApiServerName(name)) + proxyServerHost, err = getDeploymentPodIPs(client, namespace, util.GetAPIServerName(name)) } if err != nil { @@ -291,7 +291,7 @@ func getAnpAgentManifest(client clientset.Interface, name string, namespace stri }{ ImageRepository: imageRepository, Version: imageVersion, - AgentPort: portMap[constants.ApiServerNetworkProxyAgentPortKey], + AgentPort: portMap[constants.APIServerNetworkProxyAgentPortKey], ProxyServerHost: proxyServerHost, AnpMode: kubeNestConfiguration.KubeInKubeConfig.AnpMode, AgentCertName: util.GetCertName(name), diff --git a/pkg/kubenest/tasks/apiserver.go b/pkg/kubenest/tasks/apiserver.go index 254163f05..48e02dd56 100644 --- a/pkg/kubenest/tasks/apiserver.go +++ b/pkg/kubenest/tasks/apiserver.go @@ -86,7 +86,7 @@ func UninstallVirtualClusterApiserverTask() workflow.Task { RunSubTasks: true, Tasks: []workflow.Task{ { - Name: constants.ApiServer, + Name: constants.APIServer, Run: uninstallVirtualClusterAPIServer, }, }, diff --git a/pkg/kubenest/tasks/cert.go b/pkg/kubenest/tasks/cert.go index e8deaa495..74abe21d8 100644 --- a/pkg/kubenest/tasks/cert.go +++ b/pkg/kubenest/tasks/cert.go @@ -134,7 +134,7 @@ func mutateCertConfig(data InitData, cc *cert.CertConfig) error { Name: data.GetName(), Namespace: data.GetNamespace(), ControlplaneAddr: data.ControlplaneAddress(), - ClusterIps: data.ServiceClusterIp(), + ClusterIPs: data.ServiceClusterIP(), ExternalIP: data.ExternalIP(), ExternalIPs: data.ExternalIPs(), VipMap: data.VipMap(), diff --git a/pkg/kubenest/tasks/check.go b/pkg/kubenest/tasks/check.go index d87182ea2..d951984d6 100644 --- a/pkg/kubenest/tasks/check.go +++ b/pkg/kubenest/tasks/check.go @@ -15,9 +15,9 @@ import ( var ( kubeControllerManagerLabels = labels.Set{"virtualCluster-app": constants.KubeControllerManager} virtualClusterManagerLabels = labels.Set{"virtualCluster-app": constants.VirtualClusterScheduler} - virtualClusterApiserverLabels = labels.Set{"virtualCluster-app": constants.ApiServer} + virtualClusterApiserverLabels = labels.Set{"virtualCluster-app": constants.APIServer} virtualClusterEtcdLabels = labels.Set{"virtualCluster-app": constants.Etcd} - virtualClusterAnpLabels = labels.Set{"virtualCluster-anp": constants.ApiServerAnp} + virtualClusterAnpLabels = labels.Set{"virtualCluster-anp": constants.APIServerAnp} ) func NewCheckApiserverHealthTask() workflow.Task { diff --git a/pkg/kubenest/tasks/coredns.go b/pkg/kubenest/tasks/coredns.go index 936bff053..e181b8039 100644 --- a/pkg/kubenest/tasks/coredns.go +++ b/pkg/kubenest/tasks/coredns.go @@ -27,26 +27,40 @@ import ( func NewCoreDNSTask() workflow.Task { return workflow.Task{ Name: "coreDns", - Run: runCoreDns, + Run: runCoreDNS, + Skip: skipCoreDNS, RunSubTasks: true, Tasks: []workflow.Task{ { Name: "deploy-core-dns-in-host-cluster", - Run: runCoreDnsHostTask, + Run: runCoreDNSHostTask, }, { Name: "check-core-dns", - Run: runCheckCoreDnsTask, + Run: runCheckCoreDNSTask, }, { Name: "deploy-core-dns-service-in-virtual-cluster", - Run: runCoreDnsVirtualTask, + Run: runCoreDNSVirtualTask, }, }, } } -func runCoreDns(r workflow.RunData) error { +func skipCoreDNS(d workflow.RunData) (bool, error) { + data, ok := d.(InitData) + if !ok { + return false, errors.New("coreDns task invoked with an invalid data struct") + } + + vc := data.VirtualCluster() + if vc.Spec.KubeInKubeConfig != nil && vc.Spec.KubeInKubeConfig.UseTenantDNS { + return true, nil + } + return false, nil +} + +func runCoreDNS(r workflow.RunData) error { data, ok := r.(InitData) if !ok { return errors.New("coreDns task invoked with an invalid data struct") @@ -59,7 +73,7 @@ func runCoreDns(r workflow.RunData) error { func UninstallCoreDNSTask() workflow.Task { return workflow.Task{ Name: "coredns", - Run: runCoreDns, + Run: runCoreDNS, RunSubTasks: true, Tasks: []workflow.Task{ { @@ -70,7 +84,7 @@ func UninstallCoreDNSTask() workflow.Task { } } -func getCoreDnsHostComponentsConfig(client clientset.Interface, keyName string) ([]ComponentConfig, error) { +func getCoreDNSHostComponentsConfig(client clientset.Interface, keyName string) ([]ComponentConfig, error) { cm, err := client.CoreV1().ConfigMaps(constants.KosmosNs).Get(context.Background(), constants.ManifestComponentsConfigMap, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { @@ -93,7 +107,7 @@ func getCoreDnsHostComponentsConfig(client clientset.Interface, keyName string) } // in host -func runCoreDnsHostTask(r workflow.RunData) error { +func runCoreDNSHostTask(r workflow.RunData) error { data, ok := r.(InitData) if !ok { return errors.New("Virtual cluster manifests-components task invoked with an invalid data struct") @@ -101,7 +115,7 @@ func runCoreDnsHostTask(r workflow.RunData) error { dynamicClient := data.DynamicClient() - components, err := getCoreDnsHostComponentsConfig(data.RemoteClient(), constants.HostCoreDnsComponents) + components, err := getCoreDNSHostComponentsConfig(data.RemoteClient(), constants.HostCoreDnsComponents) if err != nil { return err } @@ -135,7 +149,7 @@ func uninstallCorednsHostTask(r workflow.RunData) error { dynamicClient := data.DynamicClient() - components, err := getCoreDnsHostComponentsConfig(data.RemoteClient(), constants.HostCoreDnsComponents) + components, err := getCoreDNSHostComponentsConfig(data.RemoteClient(), constants.HostCoreDnsComponents) if err != nil { return err } @@ -159,7 +173,7 @@ func uninstallCorednsHostTask(r workflow.RunData) error { } // in host -func runCheckCoreDnsTask(r workflow.RunData) error { +func runCheckCoreDNSTask(r workflow.RunData) error { data, ok := r.(InitData) if !ok { return errors.New("Virtual cluster manifests-components task invoked with an invalid data struct") @@ -185,7 +199,7 @@ func runCheckCoreDnsTask(r workflow.RunData) error { return fmt.Errorf("kube-dns is not ready") } -func runCoreDnsVirtualTask(r workflow.RunData) error { +func runCoreDNSVirtualTask(r workflow.RunData) error { data, ok := r.(InitData) if !ok { return errors.New("Virtual cluster coreDns task invoked with an invalid data struct") @@ -205,7 +219,7 @@ func runCoreDnsVirtualTask(r workflow.RunData) error { return err } - components, err := getCoreDnsHostComponentsConfig(data.RemoteClient(), constants.VirtualCoreDnsComponents) + components, err := getCoreDNSHostComponentsConfig(data.RemoteClient(), constants.VirtualCoreDNSComponents) if err != nil { return err } diff --git a/pkg/kubenest/tasks/data.go b/pkg/kubenest/tasks/data.go index 4093d2285..814899b40 100644 --- a/pkg/kubenest/tasks/data.go +++ b/pkg/kubenest/tasks/data.go @@ -14,7 +14,7 @@ type InitData interface { GetName() string GetNamespace() string ControlplaneAddress() string - ServiceClusterIp() []string + ServiceClusterIP() []string RemoteClient() clientset.Interface KosmosClient() versioned.Interface DataDir() string diff --git a/pkg/kubenest/tasks/endpoint.go b/pkg/kubenest/tasks/endpoint.go index 7e5474b51..8fcd9d31b 100644 --- a/pkg/kubenest/tasks/endpoint.go +++ b/pkg/kubenest/tasks/endpoint.go @@ -60,7 +60,7 @@ func runEndPointInVirtualClusterTask(r workflow.RunData) error { return err } - err = controlplane.EnsureApiServerExternalEndPoint(kubeClient) + err = controlplane.EnsureAPIServerExternalEndPoint(kubeClient) if err != nil { return err } diff --git a/pkg/kubenest/tasks/manifests_components.go b/pkg/kubenest/tasks/manifests_components.go index 3af0ffda2..b5179ccab 100644 --- a/pkg/kubenest/tasks/manifests_components.go +++ b/pkg/kubenest/tasks/manifests_components.go @@ -29,6 +29,11 @@ type ComponentConfig struct { Path string `json:"path" yaml:"path"` } +type SkipComponentCondition struct { + Condition bool + ComponentName string +} + func NewComponentsFromManifestsTask() workflow.Task { return workflow.Task{ Name: "manifests-components", @@ -53,6 +58,14 @@ func runComponentsFromManifests(r workflow.RunData) error { return nil } +func getSkipComponentsForVirtualCluster(condition []*SkipComponentCondition) map[string]bool { + skipComponents := map[string]bool{} + for _, c := range condition { + skipComponents[c.ComponentName] = c.Condition + } + return skipComponents +} + func applyComponentsManifests(r workflow.RunData) error { data, ok := r.(InitData) if !ok { @@ -96,10 +109,24 @@ func applyComponentsManifests(r workflow.RunData) error { templatedMapping["KeepalivedReplicas"] = keepalivedReplicas } + UseTenantDNS := data.VirtualCluster().Spec.KubeInKubeConfig != nil && data.VirtualCluster().Spec.KubeInKubeConfig.UseTenantDNS + + skipComponents := getSkipComponentsForVirtualCluster([]*SkipComponentCondition{ + { + // skip coredns component if tenant dns is enabled + Condition: !UseTenantDNS, + ComponentName: constants.TenantCoreDNSComponentName, + }, { + // skip keepalived component if vip is not enabled + Condition: !keepalivedEnable, + ComponentName: constants.VipKeepalivedComponentName, + }, + }) + for _, component := range components { klog.V(2).Infof("Deploy component %s", component.Name) - // skip keepalived component if vip is not enabled - if !keepalivedEnable && component.Name == constants.VipKeepalivedComponentName { + if v, ok := skipComponents[component.Name]; ok && v { + klog.V(2).Infof("Deploy component %s skipped", component.Name) continue } err = applyTemplatedManifests(component.Name, dynamicClient, component.Path, templatedMapping) diff --git a/pkg/kubenest/tasks/manifests_components_test.go b/pkg/kubenest/tasks/manifests_components_test.go new file mode 100644 index 000000000..9f57b4e98 --- /dev/null +++ b/pkg/kubenest/tasks/manifests_components_test.go @@ -0,0 +1,476 @@ +package tasks + +import "testing" + +type ResultFlag bool + +const ( + Reserve ResultFlag = true + Skip ResultFlag = false +) + +type Want struct { + Name string + Result ResultFlag // false if skip +} + +func TestGetSkipComponentsForVirtualCluster(t *testing.T) { + tests := []struct { + name string + input []*SkipComponentCondition + want []Want + skipCount int + }{ + { + name: "test-single", + input: []*SkipComponentCondition{ + { + Condition: true, + ComponentName: "skip-1", + }, + }, + want: []Want{ + { + Name: "skip-1", + Result: Skip, + }, + }, + skipCount: 1, + }, + { + name: "test-double", + input: []*SkipComponentCondition{ + { + Condition: true, + ComponentName: "skip-1", + }, + { + Condition: true, + ComponentName: "skip-2", + }, + }, + want: []Want{ + { + Name: "skip-1", + Result: Skip, + }, + { + Name: "skip-2", + Result: Skip, + }, + }, + skipCount: 2, + }, + { + name: "test-middle", + input: []*SkipComponentCondition{ + { + Condition: true, + ComponentName: "skip-1", + }, + { + Condition: false, + ComponentName: "skip-2", + }, + { + Condition: true, + ComponentName: "skip-3", + }, + }, + want: []Want{ + { + Name: "skip-1", + Result: Skip, + }, + { + Name: "skip-2", + Result: Reserve, + }, + { + Name: "skip-3", + Result: Skip, + }, + }, + skipCount: 2, + }, + { + name: "test-all-reserve", + input: []*SkipComponentCondition{ + { + Condition: false, + ComponentName: "skip-1", + }, + { + Condition: false, + ComponentName: "skip-2", + }, + { + Condition: false, + ComponentName: "skip-3", + }, + }, + want: []Want{ + { + Name: "skip-1", + Result: Reserve, + }, + { + Name: "skip-2", + Result: Reserve, + }, + { + Name: "skip-3", + Result: Reserve, + }, + }, + skipCount: 0, + }, + { + name: "test-all-skip", + input: []*SkipComponentCondition{ + { + Condition: true, + ComponentName: "skip-1", + }, + { + Condition: true, + ComponentName: "skip-2", + }, + { + Condition: true, + ComponentName: "skip-3", + }, + }, + want: []Want{ + { + Name: "skip-1", + Result: Skip, + }, + { + Name: "skip-2", + Result: Skip, + }, + { + Name: "skip-3", + Result: Skip, + }, + }, + skipCount: 3, + }, + { + name: "test-first-skip", + input: []*SkipComponentCondition{ + { + Condition: true, + ComponentName: "skip-1", + }, + { + Condition: false, + ComponentName: "skip-2", + }, + { + Condition: false, + ComponentName: "skip-3", + }, + }, + want: []Want{ + { + Name: "skip-1", + Result: Skip, + }, + { + Name: "skip-2", + Result: Reserve, + }, + { + Name: "skip-3", + Result: Reserve, + }, + }, + skipCount: 1, + }, + { + name: "test-big-data", + input: []*SkipComponentCondition{ + { + Condition: true, + ComponentName: "skip-1", + }, + { + Condition: false, + ComponentName: "skip-2", + }, + { + Condition: false, + ComponentName: "skip-3", + }, + { + Condition: false, + ComponentName: "skip-4", + }, + { + Condition: false, + ComponentName: "skip-5", + }, + { + Condition: false, + ComponentName: "skip-6", + }, + { + Condition: false, + ComponentName: "skip-7", + }, + { + Condition: false, + ComponentName: "skip-8", + }, + { + Condition: false, + ComponentName: "skip-9", + }, + { + Condition: false, + ComponentName: "skip-10", + }, + }, + want: []Want{ + { + Name: "skip-1", + Result: Skip, + }, + { + Name: "skip-2", + Result: Reserve, + }, + { + Name: "skip-3", + Result: Reserve, + }, + { + Name: "skip-4", + Result: Reserve, + }, + { + Name: "skip-5", + Result: Reserve, + }, + { + Name: "skip-6", + Result: Reserve, + }, + { + Name: "skip-7", + Result: Reserve, + }, + { + Name: "skip-8", + Result: Reserve, + }, + { + Name: "skip-9", + Result: Reserve, + }, + { + Name: "skip-10", + Result: Reserve, + }, + }, + skipCount: 1, + }, + { + name: "test-big-data", + input: []*SkipComponentCondition{ + { + Condition: true, + ComponentName: "skip-1", + }, + { + Condition: false, + ComponentName: "skip-2", + }, + { + Condition: false, + ComponentName: "skip-3", + }, + { + Condition: false, + ComponentName: "skip-4", + }, + { + Condition: false, + ComponentName: "skip-5", + }, + { + Condition: false, + ComponentName: "skip-6", + }, + { + Condition: true, + ComponentName: "skip-7", + }, + { + Condition: true, + ComponentName: "skip-8", + }, + { + Condition: true, + ComponentName: "skip-9", + }, + }, + want: []Want{ + { + Name: "skip-1", + Result: Skip, + }, + { + Name: "skip-2", + Result: Reserve, + }, + { + Name: "skip-3", + Result: Reserve, + }, + { + Name: "skip-4", + Result: Reserve, + }, + { + Name: "skip-5", + Result: Reserve, + }, + { + Name: "skip-6", + Result: Reserve, + }, + { + Name: "skip-7", + Result: Skip, + }, + { + Name: "skip-8", + Result: Skip, + }, + { + Name: "skip-9", + Result: Skip, + }, + }, + skipCount: 4, + }, + { + name: "test-big-data", + input: []*SkipComponentCondition{ + { + Condition: true, + ComponentName: "skip-1", + }, + { + Condition: false, + ComponentName: "skip-2", + }, + { + Condition: false, + ComponentName: "skip-3", + }, + { + Condition: false, + ComponentName: "skip-4", + }, + { + Condition: false, + ComponentName: "skip-5", + }, + { + Condition: false, + ComponentName: "skip-6", + }, + { + Condition: true, + ComponentName: "skip-7", + }, + { + Condition: true, + ComponentName: "skip-8", + }, + { + Condition: true, + ComponentName: "skip-9", + }, + { + Condition: true, + ComponentName: "skip-10", + }, + { + Condition: true, + ComponentName: "skip-11", + }, + }, + want: []Want{ + { + Name: "skip-1", + Result: Skip, + }, + { + Name: "skip-2", + Result: Reserve, + }, + { + Name: "skip-3", + Result: Reserve, + }, + { + Name: "skip-4", + Result: Reserve, + }, + { + Name: "skip-5", + Result: Reserve, + }, + { + Name: "skip-6", + Result: Reserve, + }, + { + Name: "skip-7", + Result: Skip, + }, + { + Name: "skip-8", + Result: Skip, + }, + { + Name: "skip-9", + Result: Skip, + }, + { + Name: "skip-10", + Result: Skip, + }, + { + Name: "skip-11", + Result: Skip, + }, + }, + skipCount: 6, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + skipComponents := getSkipComponentsForVirtualCluster(tt.input) + count := 0 + for _, want := range tt.want { + if v, ok := skipComponents[want.Name]; ok && v { + count++ + continue + } + if !want.Result { + t.Errorf("getSkipComponentsForVirtualCluster() name: %v, want %v", want.Name, want.Result) + } + } + if count != tt.skipCount { + t.Errorf("getSkipComponentsForVirtualCluster() name: %v, want %v", count, tt.skipCount) + } + }) + } +} diff --git a/pkg/kubenest/tasks/proxy.go b/pkg/kubenest/tasks/proxy.go index 089bebeeb..07e90b06f 100644 --- a/pkg/kubenest/tasks/proxy.go +++ b/pkg/kubenest/tasks/proxy.go @@ -113,7 +113,7 @@ func UninstallVirtualClusterProxyTask() workflow.Task { RunSubTasks: true, Tasks: []workflow.Task{ { - Name: constants.ApiServer, + Name: constants.APIServer, Run: uninstallVirtualClusterProxy, }, }, diff --git a/pkg/kubenest/tasks/upload.go b/pkg/kubenest/tasks/upload.go index 09171e44f..1a82a95ca 100644 --- a/pkg/kubenest/tasks/upload.go +++ b/pkg/kubenest/tasks/upload.go @@ -171,15 +171,15 @@ func runUploadAdminKubeconfig(r workflow.RunData) error { return errors.New("UploadAdminKubeconfig task invoked with an invalid data struct") } - var controlplaneIpEndpoint, clusterIPEndpoint string - service, err := data.RemoteClient().CoreV1().Services(data.GetNamespace()).Get(context.TODO(), util.GetApiServerName(data.GetName()), metav1.GetOptions{}) + var controlplaneIPEndpoint, clusterIPEndpoint string + service, err := data.RemoteClient().CoreV1().Services(data.GetNamespace()).Get(context.TODO(), util.GetAPIServerName(data.GetName()), metav1.GetOptions{}) if err != nil { return err } portInfo := getPortInfoFromAPIServerService(service) // controlplane address + nodePort - controlplaneIpEndpoint = fmt.Sprintf("https://%s", utils.GenerateAddrStr(data.ControlplaneAddress(), fmt.Sprintf("%d", portInfo.NodePort))) - controlplaneIpKubeconfig, err := buildKubeConfigFromSpec(data, controlplaneIpEndpoint) + controlplaneIPEndpoint = fmt.Sprintf("https://%s", utils.GenerateAddrStr(data.ControlplaneAddress(), fmt.Sprintf("%d", portInfo.NodePort))) + controlplaneIPKubeconfig, err := buildKubeConfigFromSpec(data, controlplaneIPEndpoint) if err != nil { return err } @@ -191,7 +191,7 @@ func runUploadAdminKubeconfig(r workflow.RunData) error { return err } - controlplaneIpConfigBytes, err := clientcmd.Write(*controlplaneIpKubeconfig) + controlplaneIPConfigBytes, err := clientcmd.Write(*controlplaneIPKubeconfig) if err != nil { return err } @@ -207,7 +207,7 @@ func runUploadAdminKubeconfig(r workflow.RunData) error { Name: util.GetAdminConfigSecretName(data.GetName()), Labels: VirtualClusterControllerLabel, }, - Data: map[string][]byte{"kubeconfig": controlplaneIpConfigBytes}, + Data: map[string][]byte{"kubeconfig": controlplaneIPConfigBytes}, }) if err != nil { return fmt.Errorf("failed to create secret of kubeconfig, err: %w", err) diff --git a/pkg/kubenest/util/address.go b/pkg/kubenest/util/address.go index 7e8a7175b..f22defbd2 100644 --- a/pkg/kubenest/util/address.go +++ b/pkg/kubenest/util/address.go @@ -10,6 +10,7 @@ import ( netutils "k8s.io/utils/net" "github.com/kosmos.io/kosmos/pkg/kubenest/constants" + "github.com/kosmos.io/kosmos/pkg/utils" ) func GetAPIServiceIP(clientset clientset.Interface) (string, error) { @@ -19,8 +20,8 @@ func GetAPIServiceIP(clientset clientset.Interface) (string, error) { } var ( - masterLabel = labels.Set{"node-role.kubernetes.io/master": ""} - controlplaneLabel = labels.Set{"node-role.kubernetes.io/control-plane": ""} + masterLabel = labels.Set{utils.LabelNodeRoleOldControlPlane: ""} + controlplaneLabel = labels.Set{utils.LabelNodeRoleControlPlane: ""} ) // first, select the master node as the IP of APIServer. if there is // no master nodes, randomly select a worker node. @@ -36,25 +37,25 @@ func GetAPIServiceIP(clientset clientset.Interface) (string, error) { return nodes.Items[0].Status.Addresses[0].Address, nil } -func GetAPIServiceClusterIp(namespace string, client clientset.Interface) (error, string) { +func GetAPIServiceClusterIP(namespace string, client clientset.Interface) (string, error) { serviceLists, err := client.CoreV1().Services(namespace).List(context.TODO(), metav1.ListOptions{}) if err != nil { - return err, "" + return "", err } if serviceLists != nil { for _, service := range serviceLists.Items { if service.Spec.Type == constants.ServiceType { - return nil, service.Spec.ClusterIP + return service.Spec.ClusterIP, nil } } } - return nil, "" + return "", nil } -func GetServiceClusterIp(namespace string, client clientset.Interface) (error, []string) { +func GetServiceClusterIP(namespace string, client clientset.Interface) ([]string, error) { serviceLists, err := client.CoreV1().Services(namespace).List(context.TODO(), metav1.ListOptions{}) if err != nil { - return err, nil + return nil, err } var clusterIps []string if serviceLists != nil { @@ -64,10 +65,10 @@ func GetServiceClusterIp(namespace string, client clientset.Interface) (error, [ } } } - return nil, clusterIps + return clusterIps, nil } -func GetEtcdServiceClusterIp(namespace string, serviceName string, client clientset.Interface) (string, error) { +func GetEtcdServiceClusterIP(namespace string, serviceName string, client clientset.Interface) (string, error) { service, err := client.CoreV1().Services(namespace).Get(context.TODO(), serviceName, metav1.GetOptions{}) if err != nil { return "", err diff --git a/pkg/kubenest/util/address_test.go b/pkg/kubenest/util/address_test.go new file mode 100644 index 000000000..c08b10180 --- /dev/null +++ b/pkg/kubenest/util/address_test.go @@ -0,0 +1,45 @@ +package util + +import ( + "testing" + + netutils "k8s.io/utils/net" +) + +func TestGetAPIServiceIP(t *testing.T) { + client, err := prepare() + if err != nil { + t.Logf("failed to prepare client: %v", err) + return + } + + str, err := GetAPIServiceIP(client) + if err != nil { + t.Logf("failed to get api service ip: %v", err) + } + if len(str) == 0 { + t.Logf("api service ip is empty") + } else { + t.Logf("api service ip is %s", str) + } +} + +func TestParseIP(t *testing.T) { + tests := []struct { + name string + input string + want string + }{ + {"ipv4", "10.237.6.0", "10.237.6.0"}, + {"ipv6", "2409:8c2f:3800:0011::0a18:0000", "2409:8c2f:3800:11::a18:0"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ip := netutils.ParseIPSloppy(tt.input) + if ip.String() != tt.want { + t.Fatalf("%s, %s, %s, %s", tt.name, tt.input, ip.String(), tt.want) + } + }) + } +} diff --git a/pkg/kubenest/util/api-client/check.go b/pkg/kubenest/util/api-client/check.go index 8d4879b55..1efd26d76 100644 --- a/pkg/kubenest/util/api-client/check.go +++ b/pkg/kubenest/util/api-client/check.go @@ -31,7 +31,7 @@ func NewVirtualClusterChecker(client clientset.Interface, timeout time.Duration) } func (v *VirtualClusterChecker) WaitForSomePods(label, namespace string, podNum int32) error { - return wait.PollImmediate(constants.ApiServerCallRetryInterval, v.timeout, func() (bool, error) { + return wait.PollImmediate(constants.APIServerCallRetryInterval, v.timeout, func() (bool, error) { listOpts := metav1.ListOptions{LabelSelector: label} pods, err := v.client.CoreV1().Pods(namespace).List(context.TODO(), listOpts) if err != nil { @@ -51,10 +51,10 @@ func (v *VirtualClusterChecker) WaitForSomePods(label, namespace string, podNum return expected >= podNum, nil }) } -func (w *VirtualClusterChecker) WaitForAPI() error { - return wait.PollImmediate(constants.ApiServerCallRetryInterval, w.timeout, func() (bool, error) { +func (v *VirtualClusterChecker) WaitForAPI() error { + return wait.PollImmediate(constants.APIServerCallRetryInterval, v.timeout, func() (bool, error) { healthStatus := 0 - w.client.Discovery().RESTClient().Get().AbsPath("/healthz").Do(context.TODO()).StatusCode(&healthStatus) + v.client.Discovery().RESTClient().Get().AbsPath("/healthz").Do(context.TODO()).StatusCode(&healthStatus) if healthStatus != http.StatusOK { return false, nil } diff --git a/pkg/kubenest/util/cert/certs.go b/pkg/kubenest/util/cert/certs.go index fe90409e6..82fa06521 100644 --- a/pkg/kubenest/util/cert/certs.go +++ b/pkg/kubenest/util/cert/certs.go @@ -26,6 +26,7 @@ import ( "github.com/kosmos.io/kosmos/pkg/kubenest/util" ) +// nolint:revive type CertConfig struct { Name string CAName string @@ -41,7 +42,7 @@ type AltNamesMutatorConfig struct { Name string Namespace string ControlplaneAddr string - ClusterIps []string + ClusterIPs []string ExternalIP string ExternalIPs []string VipMap map[string]string @@ -151,9 +152,9 @@ func etcdServerAltNamesMutator(cfg *AltNamesMutatorConfig) (*certutil.AltNames, IPs: []net.IP{net.ParseIP("::1"), net.IPv4(127, 0, 0, 1)}, } - if len(cfg.ClusterIps) > 0 { - for _, clusterIp := range cfg.ClusterIps { - appendSANsToAltNames(altNames, []string{clusterIp}) + if len(cfg.ClusterIPs) > 0 { + for _, clusterIP := range cfg.ClusterIPs { + appendSANsToAltNames(altNames, []string{clusterIP}) } } return altNames, nil @@ -206,7 +207,7 @@ func makeAltNamesMutator(f func(cfg *AltNamesMutatorConfig) (*certutil.AltNames, } func proxyServerAltNamesMutator(cfg *AltNamesMutatorConfig) (*certutil.AltNames, error) { - firstIPs, err := util.GetFirstIP(constants.ApiServerServiceSubnet) + firstIPs, err := util.GetFirstIP(constants.APIServerServiceSubnet) if err != nil { return nil, err } @@ -236,21 +237,21 @@ func proxyServerAltNamesMutator(cfg *AltNamesMutatorConfig) (*certutil.AltNames, } if len(cfg.ExternalIPs) > 0 { - for _, externalIp := range cfg.ExternalIPs { - appendSANsToAltNames(altNames, []string{externalIp}) + for _, externalIP := range cfg.ExternalIPs { + appendSANsToAltNames(altNames, []string{externalIP}) } } - if len(cfg.ClusterIps) > 0 { - for _, clusterIp := range cfg.ClusterIps { - appendSANsToAltNames(altNames, []string{clusterIp}) + if len(cfg.ClusterIPs) > 0 { + for _, clusterIP := range cfg.ClusterIPs { + appendSANsToAltNames(altNames, []string{clusterIP}) } } return altNames, nil } func apiServerAltNamesMutator(cfg *AltNamesMutatorConfig) (*certutil.AltNames, error) { - firstIPs, err := util.GetFirstIP(constants.ApiServerServiceSubnet) + firstIPs, err := util.GetFirstIP(constants.APIServerServiceSubnet) if err != nil { return nil, err } @@ -284,8 +285,8 @@ func apiServerAltNamesMutator(cfg *AltNamesMutatorConfig) (*certutil.AltNames, e } if len(cfg.ExternalIPs) > 0 { - for _, externalIp := range cfg.ExternalIPs { - appendSANsToAltNames(altNames, []string{externalIp}) + for _, externalIP := range cfg.ExternalIPs { + appendSANsToAltNames(altNames, []string{externalIP}) } } @@ -294,9 +295,9 @@ func apiServerAltNamesMutator(cfg *AltNamesMutatorConfig) (*certutil.AltNames, e appendSANsToAltNames(altNames, []string{vip}) } } - if len(cfg.ClusterIps) > 0 { - for _, clusterIp := range cfg.ClusterIps { - appendSANsToAltNames(altNames, []string{clusterIp}) + if len(cfg.ClusterIPs) > 0 { + for _, clusterIP := range cfg.ClusterIPs { + appendSANsToAltNames(altNames, []string{clusterIP}) } } return altNames, nil diff --git a/pkg/kubenest/util/cert/store.go b/pkg/kubenest/util/cert/store.go index 64511ebd2..9977c9f2a 100644 --- a/pkg/kubenest/util/cert/store.go +++ b/pkg/kubenest/util/cert/store.go @@ -9,6 +9,7 @@ import ( "github.com/kosmos.io/kosmos/pkg/kubenest/constants" ) +// nolint:revive type CertStore interface { AddCert(cert *VirtualClusterCert) GetCert(name string) *VirtualClusterCert diff --git a/pkg/kubenest/util/helper.go b/pkg/kubenest/util/helper.go index 26d91fa6c..35797fd6a 100644 --- a/pkg/kubenest/util/helper.go +++ b/pkg/kubenest/util/helper.go @@ -278,9 +278,8 @@ func CreateObject(dynamicClient dynamic.Interface, namespace string, name string if apierrors.IsAlreadyExists(err) { klog.Warningf("%s %s already exists", gvr.String(), name) return nil - } else { - return err } + return err } return nil } @@ -334,9 +333,8 @@ func DeleteObject(dynamicClient dynamic.Interface, namespace string, name string if apierrors.IsNotFound(err) { klog.Warningf("%s %s already deleted", gvr.String(), name) return nil - } else { - return err } + return err } return nil } diff --git a/pkg/kubenest/util/helper_test.go b/pkg/kubenest/util/helper_test.go index c533e7908..c1d58d13c 100644 --- a/pkg/kubenest/util/helper_test.go +++ b/pkg/kubenest/util/helper_test.go @@ -9,7 +9,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes" - clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" ) @@ -30,7 +29,7 @@ func createKubeConfig() (*restclient.Config, error) { return kubeConfig, nil } -func prepare() (clientset.Interface, error) { +func prepare() (kubernetes.Interface, error) { // Prepare kube config. kubeConfig, err := createKubeConfig() if err != nil { @@ -48,7 +47,8 @@ func prepare() (clientset.Interface, error) { func TestCreateOrUpdate(t *testing.T) { client, err := prepare() if err != nil { - t.Fatalf("failed to prepare client: %v", err) + t.Logf("failed to prepare client: %v", err) + return } tests := []struct { @@ -149,7 +149,8 @@ func TestCreateOrUpdate(t *testing.T) { func TestCreateSvc(t *testing.T) { client, err := prepare() if err != nil { - t.Fatalf("failed to prepare client: %v", err) + t.Logf("failed to prepare client: %v", err) + return } tests := []struct { diff --git a/pkg/kubenest/util/image.go b/pkg/kubenest/util/image.go index 016e04dcf..c7cfcd307 100644 --- a/pkg/kubenest/util/image.go +++ b/pkg/kubenest/util/image.go @@ -19,18 +19,18 @@ func GetImageMessage() (imageRepository string, imageVersion string) { return imageRepository, imageVersion } -func GetCoreDnsImageTag() string { - coreDnsImageTag := os.Getenv(constants.DefaultCoreDnsImageTagEnv) - if coreDnsImageTag == "" { - coreDnsImageTag = utils.DefaultCoreDnsImageTag +func GetCoreDNSImageTag() string { + coreDNSImageTag := os.Getenv(constants.DefaultCoreDNSImageTagEnv) + if coreDNSImageTag == "" { + coreDNSImageTag = utils.DefaultCoreDNSImageTag } - return coreDnsImageTag + return coreDNSImageTag } func GetVirtualControllerLabel() string { lb := os.Getenv(constants.DefaultVirtualControllerLabelEnv) if len(lb) == 0 { - return "node-role.kubernetes.io/control-plane" + return utils.LabelNodeRoleControlPlane } return lb } diff --git a/pkg/kubenest/util/name.go b/pkg/kubenest/util/name.go index 517f06342..76300dc4b 100644 --- a/pkg/kubenest/util/name.go +++ b/pkg/kubenest/util/name.go @@ -6,7 +6,7 @@ import ( "github.com/kosmos.io/kosmos/pkg/kubenest/constants" ) -func GetApiServerName(name string) string { +func GetAPIServerName(name string) string { return fmt.Sprintf("%s-%s", name, "apiserver") } @@ -18,7 +18,7 @@ func GetKonnectivityServerName(name string) string { return fmt.Sprintf("%s-%s", name, "konnectivity-server") } -func GetKonnectivityApiServerName(name string) string { +func GetKonnectivityAPIServerName(name string) string { return fmt.Sprintf("%s-%s-konnectivity", name, "apiserver") } diff --git a/pkg/kubenest/util/util_test.go b/pkg/kubenest/util/util_test.go index 403785a25..4ec053e73 100644 --- a/pkg/kubenest/util/util_test.go +++ b/pkg/kubenest/util/util_test.go @@ -70,7 +70,7 @@ func TestFindAvailableIP(t *testing.T) { } } -func TestFindAvailableIP2(t *testing.T) { +func TestFindAvailableIP2(_ *testing.T) { type HostPortPool struct { PortsPool []int32 `yaml:"portsPool"` } diff --git a/pkg/scheduler/lifted/plugins/leafnodevolumebinding/leafnode_volume_binding.go b/pkg/scheduler/lifted/plugins/leafnodevolumebinding/leafnode_volume_binding.go index d4a2e525c..9c7b09223 100644 --- a/pkg/scheduler/lifted/plugins/leafnodevolumebinding/leafnode_volume_binding.go +++ b/pkg/scheduler/lifted/plugins/leafnodevolumebinding/leafnode_volume_binding.go @@ -135,8 +135,8 @@ func (pl *VolumeBinding) podHasPVCs(pod *corev1.Pod) (bool, error) { // PreFilter invoked at the prefilter extension point to check if pod has all // immediate PVCs bound. If not all immediate PVCs are bound, an -// UnschedulableAndUnresolvable is returned. -func (pl *VolumeBinding) PreFilter(ctx context.Context, state *framework.CycleState, pod *corev1.Pod) (*framework.PreFilterResult, *framework.Status) { +// UnscheduledAndUnresolvable is returned. +func (pl *VolumeBinding) PreFilter(_ context.Context, state *framework.CycleState, pod *corev1.Pod) (*framework.PreFilterResult, *framework.Status) { // If pod does not reference any PVC, we don't need to do anything. if hasPVC, err := pl.podHasPVCs(pod); err != nil { return nil, framework.NewStatus(framework.UnschedulableAndUnresolvable, err.Error()) @@ -192,6 +192,7 @@ func getStateData(cs *framework.CycleState) (*stateData, error) { // // The predicate returns true if all bound PVCs have compatible PVs with the node, and if all unbound // PVCs can be matched with an available and node-compatible PV. +// nolint:revive func (pl *VolumeBinding) Filter(_ context.Context, cs *framework.CycleState, pod *corev1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { node := nodeInfo.Node() if node == nil { diff --git a/pkg/utils/constants.go b/pkg/utils/constants.go index d3ae425b8..8fdfcacd7 100644 --- a/pkg/utils/constants.go +++ b/pkg/utils/constants.go @@ -49,7 +49,7 @@ const ( DefaultClusterName = "kosmos-control-cluster" DefaultImageRepository = "ghcr.io/kosmos-io" DefaultImageVersion = "v1.21.5-eki.0" - DefaultCoreDnsImageTag = "v1.9.3" + DefaultCoreDNSImageTag = "v1.9.3" DefaultWaitTime = 120 RootClusterAnnotationKey = "kosmos.io/cluster-role" RootClusterAnnotationValue = "root" @@ -168,24 +168,28 @@ const ( MCSFinalizer = "kosmos.io/multi-cluster-service-finalizer" ) +// nolint:revive var GVR_CONFIGMAP = schema.GroupVersionResource{ Group: "", Version: "v1", Resource: "configmaps", } +// nolint:revive var GVR_PVC = schema.GroupVersionResource{ Group: "", Version: "v1", Resource: "persistentvolumeclaims", } +// nolint:revive var GVR_SECRET = schema.GroupVersionResource{ Group: "", Version: "v1", Resource: "secrets", } +// nolint:revive var GVR_SERVICE = schema.GroupVersionResource{ Group: "", Version: "v1", diff --git a/pkg/utils/controllers/controller_util.go b/pkg/utils/controllers/controller_util.go index c762c764c..856af8489 100644 --- a/pkg/utils/controllers/controller_util.go +++ b/pkg/utils/controllers/controller_util.go @@ -130,6 +130,7 @@ func (c *worker) SplitKey(key string) (namespace, name string, err error) { // marks them done. You may run as many of these in parallel as you wish; the // queue guarantees that they will not end up processing the same runtime object // at the same time +// nolint:revive func (c *worker) worker() { for c.processNextItem() { } diff --git a/pkg/utils/helper/mcs.go b/pkg/utils/helper/mcs.go index 694d59693..ff7c2b6c9 100644 --- a/pkg/utils/helper/mcs.go +++ b/pkg/utils/helper/mcs.go @@ -60,9 +60,8 @@ func HasAnnotation(m metav1.ObjectMeta, key string) bool { } if _, exists := annotations[key]; exists { return true - } else { - return false } + return false } // GetAnnotationValue returns the annotation key of ObjectMeta @@ -73,7 +72,6 @@ func GetAnnotationValue(m metav1.ObjectMeta, key string) (annotationValue string } if value, exists := annotations[key]; exists { return value, true - } else { - return "", false } + return "", false } diff --git a/pkg/utils/k8s.go b/pkg/utils/k8s.go index f5de46db0..87324ab69 100644 --- a/pkg/utils/k8s.go +++ b/pkg/utils/k8s.go @@ -89,6 +89,7 @@ func UpdateSecret(old, new *corev1.Secret) { } } +// nolint:revive func UpdateUnstructured[T *corev1.ConfigMap | *corev1.Secret](old, new *unstructured.Unstructured, oldObj T, newObj T, update func(old, new T)) (*unstructured.Unstructured, error) { if err := runtime.DefaultUnstructuredConverter.FromUnstructured(old.UnstructuredContent(), &oldObj); err != nil { return nil, err diff --git a/pkg/utils/node.go b/pkg/utils/node.go index f7cc169c4..482fc1937 100644 --- a/pkg/utils/node.go +++ b/pkg/utils/node.go @@ -1,6 +1,8 @@ package utils import ( + "fmt" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -102,3 +104,22 @@ func NodeReady(node *corev1.Node) bool { // n.Status.Conditions[i].LastHeartbeatTime = now // } //} + +func FindFirstNodeIPAddress(node corev1.Node, nodeAddressType corev1.NodeAddressType) (string, error) { + for _, addr := range node.Status.Addresses { + if addr.Type == nodeAddressType { + return addr.Address, nil + } + } + return "", fmt.Errorf("cannot find internal IP address in node addresses, node name: %s", node.GetName()) +} + +func FindNodeIPsAddress(node corev1.Node, nodeAddressType corev1.NodeAddressType) ([]string, error) { + ips := []string{} + for _, addr := range node.Status.Addresses { + if addr.Type == nodeAddressType { + ips = append(ips, addr.Address) + } + } + return ips, fmt.Errorf("cannot find internal IP address in node addresses, node name: %s", node.GetName()) +} diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 08db3f2c1..8a5d47b25 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -6,6 +6,7 @@ import ( "strings" corev1 "k8s.io/api/core/v1" + netutils "k8s.io/utils/net" ) func ContainsString(arr []string, s string) bool { @@ -57,3 +58,11 @@ func IPFamilyGenerator(apiServerServiceSubnet string) []corev1.IPFamily { } return ipFamilies } + +func FormatCIDR(cidr string) (string, error) { + _, ipNet, err := netutils.ParseCIDRSloppy(cidr) + if err != nil { + return "", fmt.Errorf("failed to parse cidr %s, err: %s", cidr, err.Error()) + } + return ipNet.String(), nil +} diff --git a/vendor/github.com/pelletier/go-toml/example-crlf.toml b/vendor/github.com/pelletier/go-toml/example-crlf.toml index f45bf88b8..780d9c68f 100644 --- a/vendor/github.com/pelletier/go-toml/example-crlf.toml +++ b/vendor/github.com/pelletier/go-toml/example-crlf.toml @@ -1,30 +1,30 @@ -# This is a TOML document. Boom. - -title = "TOML Example" - -[owner] -name = "Tom Preston-Werner" -organization = "GitHub" -bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." -dob = 1979-05-27T07:32:00Z # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it +# This is a TOML document. Boom. + +title = "TOML Example" + +[owner] +name = "Tom Preston-Werner" +organization = "GitHub" +bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." +dob = 1979-05-27T07:32:00Z # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported \ No newline at end of file