diff --git a/README.md b/README.md index 04a8cde..03a2fe6 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,22 @@ For Kubernetes components that use leader election mechanisms make sure to deact ## Cite us -The paper related to this repository is currently under review. We will add citation info as soon as available. +``` +@inproceedings{straesser2023kubernetesintheloop, + abstract = {Microservices deployed and managed by container orchestration frameworks like Kubernetes are the bases of modern cloud applications. In microservice performance modeling and prediction, simulations provide a lightweight alternative to experimental analysis, which requires dedicated infrastructure and a laborious setup. However, existing simulators cannot run realistic scenarios, as performance-critical orchestration mechanisms (like scheduling or autoscaling) are manually modeled and can consequently not be represented in their full complexity and configuration space. This work combines a state-of-the-art simulation for microservice performance with Kubernetes container orchestration. Hereby, we include the original implementation of Kubernetes artifacts enabling realistic scenarios and testing of orchestration policies with low overhead. In two experiments with Kubernetes' kube-scheduler and cluster-autoscaler, we demonstrate that our framework can correctly handle different configurations of these orchestration mechanisms boosting both the simulation's use cases and authenticity.}, + added-at = {2023-08-17T01:05:43.000+0200}, + author = {Straesser, Martin and Haas, Patrick and Frank, Sebastian and Hakamian, Alireza and Van Hoorn, André and Kounev, Samuel}, + biburl = {https://www.bibsonomy.org/bibtex/23ea9a74ebfc49b6a1a29bce1d6083855/samuel.kounev}, + booktitle = {Performance Evaluation Methodologies and Tools}, + interhash = {373d040402db63c40b7b0b707adf66ad}, + intrahash = {3ea9a74ebfc49b6a1a29bce1d6083855}, + keywords = {cloud_computing container_orchestration descartes discrete_event_simulation kubernetes microservices software_performance t_full myown}, + note = {In print.}, + timestamp = {2023-08-17T01:05:43.000+0200}, + title = {Kubernetes-in-the-Loop: Enriching Microservice Simulation Through Authentic Container Orchestration}, + year = 2023 +} +``` ## Any questions? diff --git a/cmd/kube-rise/main.go b/cmd/go-kube/main.go similarity index 59% rename from cmd/kube-rise/main.go rename to cmd/go-kube/main.go index 2bf6864..4ebafe0 100644 --- a/cmd/kube-rise/main.go +++ b/cmd/go-kube/main.go @@ -1,9 +1,11 @@ package main import ( - "kube-rise/internal/inmemorystorage" - "kube-rise/pkg/server" - "kube-rise/pkg/storage" + "flag" + "go-kube/pkg/interfaces" + "go-kube/pkg/storage" + "go-kube/pkg/storage/inmemorystorage" + "k8s.io/klog/v2" ) func initStorages() storage.StorageContainer { @@ -14,6 +16,9 @@ func initStorages() storage.StorageContainer { var machineStorage = inmemorystorage.NewMachineInMemoryStorage() var machineSetStorage = inmemorystorage.NewMachineSetInMemoryStorage(&nodeStorage, &machineStorage) var statusConfigMapStorage = inmemorystorage.NewStatusMapInMemoryStorage() + var podIdStorage = inmemorystorage.NewIdInMemoryStorage() + var machineIdStorage = inmemorystorage.NewIdInMemoryStorage() + var adapterStateStorage = inmemorystorage.NewAdapterStateInMemoryStorage() return storage.StorageContainer{ Pods: &podStorage, @@ -23,11 +28,17 @@ func initStorages() storage.StorageContainer { Machines: &machineStorage, MachineSets: &machineSetStorage, StatusConfigMap: &statusConfigMapStorage, + PodIds: &podIdStorage, + MachineIds: &machineIdStorage, + AdapterState: &adapterStateStorage, } } func main() { + klog.InitFlags(nil) // initializing the flags + defer klog.Flush() // flushes all pending log I/O + flag.Parse() // parses the command-line flags var storages = initStorages() - var app = server.NewAdapterApplication(&storages) + var app = interfaces.NewAdapterApplication(&storages) app.Start() } diff --git a/docs/Packages.md b/docs/Packages.md new file mode 100644 index 0000000..d1dbc8d --- /dev/null +++ b/docs/Packages.md @@ -0,0 +1,21 @@ +# Package Descriptions + +## cmd + +Contains main file and command line flag parsing + +## internal + +Contains generic helper structs and functions: + +- broadcast: A utility packages for broadcasting channels +- infrastructure: Definition and handling of REST endpoint handlers + +## pkg + +Contains the core functionality and interfaces of this adapter: + +- control: The core logic regarding essential resource types, e.g., pods and nodes +- interfaces: The REST interfaces for communication with MiSim and Kubernetes components +- misim: Misim specific data types and logic +- storage: Interfaces and structs for storing data in the adapter diff --git a/go.mod b/go.mod index 01ae1bd..89a3395 100644 --- a/go.mod +++ b/go.mod @@ -1,4 +1,4 @@ -module kube-rise +module go-kube go 1.20 @@ -6,6 +6,7 @@ require ( github.com/gorilla/mux v1.8.0 k8s.io/api v0.26.5 k8s.io/apimachinery v0.27.2 + k8s.io/klog/v2 v2.90.1 sigs.k8s.io/cluster-api v1.4.3 ) @@ -59,7 +60,6 @@ require ( k8s.io/apiextensions-apiserver v0.26.1 // indirect k8s.io/client-go v0.26.1 // indirect k8s.io/component-base v0.26.1 // indirect - k8s.io/klog/v2 v2.90.1 // indirect k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect sigs.k8s.io/controller-runtime v0.14.5 // indirect diff --git a/internal/broadcast/broadcaster.go b/internal/broadcast/broadcaster.go index 7cb0ee5..4872a99 100644 --- a/internal/broadcast/broadcaster.go +++ b/internal/broadcast/broadcaster.go @@ -2,7 +2,7 @@ package broadcast import ( "context" - "fmt" + "k8s.io/klog/v2" ) // https://betterprogramming.pub/how-to-broadcast-messages-in-go-using-channels-b68f42bdf32e @@ -16,14 +16,14 @@ type BroadcastServer[T any] struct { } func (s *BroadcastServer[T]) Subscribe() <-chan T { - fmt.Printf("Subscribe to %s\n", s.name) + klog.V(7).Info("Subscribe to ", s.name) newListener := make(chan T, 500) s.addListener <- newListener return newListener } func (s *BroadcastServer[T]) CancelSubscription(channel <-chan T) { - fmt.Printf("Remove from %s\n", s.name) + klog.V(7).Info("Remove from ", s.name) s.removeListener <- channel } diff --git a/internal/infrastructure/emptyresourcemocks.go b/internal/infrastructure/emptyresourcemocks.go new file mode 100644 index 0000000..4605cbb --- /dev/null +++ b/internal/infrastructure/emptyresourcemocks.go @@ -0,0 +1,48 @@ +package infrastructure + +import ( + apps "k8s.io/api/apps/v1" + batch "k8s.io/api/batch/v1" + core "k8s.io/api/core/v1" + policy "k8s.io/api/policy/v1" + storage "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + cluster "sigs.k8s.io/cluster-api/api/v1beta1" + exp "sigs.k8s.io/cluster-api/exp/api/v1beta1" +) + +func GetEmptyResourceList(resourceType string) runtime.Object { + switch resourceType { + case "replicasets": + return &apps.ReplicaSetList{TypeMeta: metav1.TypeMeta{Kind: "ReplicaSetList", APIVersion: "apps/v1"}, Items: nil} + case "persistentvolumes": + return &core.PersistentVolumeList{TypeMeta: metav1.TypeMeta{Kind: "PersistentVolumeList", APIVersion: "v1"}, Items: nil} + case "statefulsets": + return &apps.StatefulSetList{TypeMeta: metav1.TypeMeta{Kind: "StatefulSetList", APIVersion: "apps/v1"}, Items: nil} + case "storageclasses": + return &storage.StorageClassList{TypeMeta: metav1.TypeMeta{Kind: "StorageClassList", APIVersion: "storage.k8s.io/v1"}, Items: nil} + case "csidrivers": + return &storage.CSIDriverList{TypeMeta: metav1.TypeMeta{Kind: "CSIDriverList", APIVersion: "storage.k8s.io/v1"}, Items: nil} + case "poddisruptionbudgets": + return &policy.PodDisruptionBudgetList{TypeMeta: metav1.TypeMeta{Kind: "PodDisruptionBudgetList", APIVersion: "policy/v1"}, Items: nil} + case "csinodes": + return &storage.CSINodeList{TypeMeta: metav1.TypeMeta{Kind: "CSINodeList", APIVersion: "storage.k8s.io/v1"}, Items: nil} + case "persistentvolumeclaims": + return &core.PersistentVolumeClaimList{TypeMeta: metav1.TypeMeta{Kind: "PersistentVolumeClaimList", APIVersion: "v1"}, Items: nil} + case "csistoragecapacities": + return &storage.CSIStorageCapacityList{TypeMeta: metav1.TypeMeta{Kind: "CSIStorageCapacityList", APIVersion: "storage.k8s.io/v1beta1"}, Items: nil} + case "services": + return &core.ServiceList{TypeMeta: metav1.TypeMeta{Kind: "ServiceList", APIVersion: "v1"}, Items: nil} + case "replicationcontrollers": + return &core.ReplicationControllerList{TypeMeta: metav1.TypeMeta{Kind: "ReplicationControllerList", APIVersion: "v1"}, Items: nil} + case "jobs": + return &batch.JobList{TypeMeta: metav1.TypeMeta{Kind: "JobList", APIVersion: "batch/v1"}, Items: nil} + case "machinedeployments": + return &cluster.MachineDeploymentList{TypeMeta: metav1.TypeMeta{Kind: "MachineDeploymentList", APIVersion: "cluster.x-k8s.io/v1beta1"}, Items: nil} + case "machinepools": + return &exp.MachinePoolList{TypeMeta: metav1.TypeMeta{Kind: "MachinePoolList", APIVersion: "cluster.x-k8s.io/v1beta1"}, Items: nil} + default: + return nil + } +} diff --git a/internal/infrastructure/endpoint.go b/internal/infrastructure/endpoint.go new file mode 100644 index 0000000..5663c9b --- /dev/null +++ b/internal/infrastructure/endpoint.go @@ -0,0 +1,7 @@ +package infrastructure + +import ( + "net/http" +) + +type Endpoint func(w http.ResponseWriter, r *http.Request) diff --git a/internal/infrastructure/requestutils.go b/internal/infrastructure/requestutils.go new file mode 100644 index 0000000..a2019d0 --- /dev/null +++ b/internal/infrastructure/requestutils.go @@ -0,0 +1,61 @@ +package infrastructure + +import ( + "encoding/json" + "github.com/gorilla/mux" + "io" + "k8s.io/klog/v2" + "net/http" +) + +func HandleRequest[T any](supplier func() T) Endpoint { + return func(w http.ResponseWriter, r *http.Request) { + klog.V(7).Infof("Req: %s%s?%s", r.Host, r.URL.Path, r.URL.RawQuery) + w.Header().Set("Content-Type", "application/json") + resourceList := supplier() + json.NewEncoder(w).Encode(resourceList) + } +} + +func HandleRequestWithBody[B any, T any](supplier func(B) T) Endpoint { + return func(w http.ResponseWriter, r *http.Request) { + klog.V(7).Infof("Req: %s%s?%s", r.Host, r.URL.Path, r.URL.RawQuery) + w.Header().Set("Content-Type", "application/json") + reqBody, _ := io.ReadAll(r.Body) + var payload B + err := json.Unmarshal(reqBody, &payload) + if err != nil { + klog.V(1).ErrorS(err, "There was an error decoding the json. err = %s", err) + w.WriteHeader(500) + return + } + resourceList := supplier(payload) + json.NewEncoder(w).Encode(resourceList) + } +} + +func HandleRequestWithParamsAndBody[B any, T any](supplier func(map[string]string, B) T) Endpoint { + return func(w http.ResponseWriter, r *http.Request) { + klog.V(7).Infof("Req: %s%s?%s", r.Host, r.URL.Path, r.URL.RawQuery) + w.Header().Set("Content-Type", "application/json") + reqBody, _ := io.ReadAll(r.Body) + var payload B + err := json.Unmarshal(reqBody, &payload) + if err != nil { + klog.V(1).ErrorS(err, "There was an error decoding the json. err = %s", err) + w.WriteHeader(500) + return + } + resourceList := supplier(mux.Vars(r), payload) + json.NewEncoder(w).Encode(resourceList) + } +} + +func HandleRequestWithParams[T any](supplier func(map[string]string) T) Endpoint { + return func(w http.ResponseWriter, r *http.Request) { + klog.V(7).Infof("Req: %s%s?%s", r.Host, r.URL.Path, r.URL.RawQuery) + w.Header().Set("Content-Type", "application/json") + resourceList := supplier(mux.Vars(r)) + json.NewEncoder(w).Encode(resourceList) + } +} diff --git a/internal/infrastructure/unsupportedresource.go b/internal/infrastructure/unsupportedresource.go new file mode 100644 index 0000000..79dc14a --- /dev/null +++ b/internal/infrastructure/unsupportedresource.go @@ -0,0 +1,57 @@ +package infrastructure + +import ( + "encoding/json" + "k8s.io/klog/v2" + "net/http" + "strings" +) + +// If query parameter "watch" is added writes empty +// Writes {"metadata": null, "items": null} to the response +func UnsupportedResource() Endpoint { + return func(w http.ResponseWriter, r *http.Request) { + klog.V(7).Infof("Req: %s%s?%s", r.Host, r.URL.Path, r.URL.RawQuery) + w.Header().Set("Content-Type", "application/json") + if r.URL.Query().Get("watch") != "" { + ctx := r.Context() + flusher, ok := w.(http.Flusher) + if !ok { + http.NotFound(w, r) + return + } + + // Send the initial headers saying we're gonna stream the response. + w.Header().Set("Transfer-Encoding", "chunked") + w.WriteHeader(http.StatusOK) + flusher.Flush() + + for { + select { + case <-ctx.Done(): + klog.V(6).Info("Client stopped listening") + return + } + } + } else { + // if no watch we just list the resource + // just return nothing here, to *string datatype enables us to use nil + // y := map[string]*string{"metadata": nil, "items": nil} + resourceType := strings.Split(r.URL.Path, "/") + + y := GetEmptyResourceList(resourceType[len(resourceType)-1]) + var err error + if y == nil { + z := map[string]*string{"metadata": nil, "items": nil} + err = json.NewEncoder(w).Encode(z) + klog.V(6).ErrorS(err, "unseen type %s\n", resourceType[len(resourceType)-1]) + } else { + err = json.NewEncoder(w).Encode(y) + } + if err != nil { + klog.V(1).ErrorS(err, "unable to encode empty resource list, error is: %v", err) + return + } + } + } +} diff --git a/internal/infrastructure/watchablestream.go b/internal/infrastructure/watchablestream.go new file mode 100644 index 0000000..312fdab --- /dev/null +++ b/internal/infrastructure/watchablestream.go @@ -0,0 +1,67 @@ +package infrastructure + +import ( + "encoding/json" + "go-kube/internal/broadcast" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" + "net/http" +) + +func HandleWatchableRequest[T any](supplier func() (T, *broadcast.BroadcastServer[metav1.WatchEvent])) Endpoint { + return func(w http.ResponseWriter, r *http.Request) { + klog.V(7).Infof("Req: %s%s?%s", r.Host, r.URL.Path, r.URL.RawQuery) + w.Header().Set("Content-Type", "application/json") + resourceList, broadcastServer := supplier() + if r.URL.Query().Get("watch") != "" { + // watch initiated HTTP streaming answers + // Sources: https://gist.github.com/vmarmol/b967b29917a34d9307ce + // https://github.com/kubernetes/kubernetes/blob/828495bcc013b77bb63bcb64111e094e455715bb/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/watch.go#L181 + // https://stackoverflow.com/questions/54890809/how-to-use-request-context-instead-of-closenotifier + ctx := r.Context() + flusher, ok := w.(http.Flusher) + if !ok { + http.NotFound(w, r) + return + } + // Send the initial headers saying we're gonna stream the response. + w.Header().Set("Transfer-Encoding", "chunked") + w.WriteHeader(http.StatusOK) + flusher.Flush() + + enc := json.NewEncoder(w) + + eventChannel := broadcastServer.Subscribe() + defer broadcastServer.CancelSubscription(eventChannel) + + klog.V(6).Infof("Client started listening (%s)...", r.URL.Path) + for { + klog.V(6).Infof("Client waits for result (%s)...", r.URL.Path) + select { + case <-ctx.Done(): + klog.V(6).Infof("Client stopped listening (%s)", r.URL.Path) + return + case event := <-eventChannel: + klog.V(6).Infof("Received event for client (%s) of type %s", r.URL.Path, event.Type) + if err := enc.Encode(event); err != nil { + klog.V(1).ErrorS(err, "unable to encode watch object %T: %v", event, err) + // client disconnect. + return + } + if len(eventChannel) == 0 { + flusher.Flush() + klog.V(6).Infof("Client flushed (%s)!", r.URL.Path) + //return + } + } + } + } else { + // if no watch we just list the resource + err := json.NewEncoder(w).Encode(resourceList) + if err != nil { + klog.V(1).ErrorS(err, "unable to encode resource list, error is: %v", err) + return + } + } + } +} diff --git a/internal/inmemorystorage/cluster.go b/internal/inmemorystorage/cluster.go deleted file mode 100644 index a25f57c..0000000 --- a/internal/inmemorystorage/cluster.go +++ /dev/null @@ -1,354 +0,0 @@ -package inmemorystorage - -import ( - "context" - "encoding/json" - "fmt" - "github.com/gorilla/mux" - "io" - v1 "k8s.io/api/autoscaling/v1" - core "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "kube-rise/internal/broadcast" - "net/http" - cluster "sigs.k8s.io/cluster-api/api/v1beta1" - "strconv" -) - -// Structs - -type MachineInMemoryStorage struct { - machines cluster.MachineList - machineEventChan chan metav1.WatchEvent - machineBroadcaster *broadcast.BroadcastServer[metav1.WatchEvent] - machineCount int -} - -type MachineSetsInMemoryStorage struct { - machineSets cluster.MachineSetList - machineSetsEventChan chan metav1.WatchEvent - nodeStorage *NodeInMemoryStorage - machineStorage *MachineInMemoryStorage - machineSetBroadcaster *broadcast.BroadcastServer[metav1.WatchEvent] -} - -type StatusConfigMapInMemoryStorage struct { - statusConfigMap core.ConfigMap -} - -// Implementations - -func (s *MachineInMemoryStorage) GetMachines() (cluster.MachineList, *broadcast.BroadcastServer[metav1.WatchEvent]) { - return s.machines, s.machineBroadcaster -} - -func (s *MachineInMemoryStorage) StoreMachines(ms cluster.MachineList, events []metav1.WatchEvent) { - s.machines = ms - for _, n := range events { - s.machineEventChan <- n - if n.Type == "ADDED" { - s.machineCount = s.machineCount + 1 - } - } -} - -func (s *MachineInMemoryStorage) GetMachine(w http.ResponseWriter, r *http.Request) { - fmt.Printf("Req: %s %s %s\n", r.Host, r.URL.Path, r.URL.RawQuery) - w.Header().Set("Content-Type", "application/json") - - // Get machine set name as path parameter - pathParams := mux.Vars(r) - machineName := pathParams["machineName"] - var machineRef cluster.Machine - - for _, ms := range s.machines.Items { - if ms.Name == machineName { - machineRef = ms - break - } - } - - err := json.NewEncoder(w).Encode(machineRef) - if err != nil { - fmt.Printf("Unable to encode response for get machine, error is: %v", err) - return - } -} - -func (s *MachineInMemoryStorage) PutMachine(w http.ResponseWriter, r *http.Request) { - fmt.Printf("Req: %s %s %s\n", r.Host, r.URL.Path, r.URL.RawQuery) - w.Header().Set("Content-Type", "application/json") - - // Get node name as path parameter - pathParams := mux.Vars(r) - machineName := pathParams["machineName"] - - reqBody, _ := io.ReadAll(r.Body) - // fmt.Println(string(reqBody)) - var u cluster.Machine - err := json.Unmarshal(reqBody, &u) - if err != nil { - fmt.Printf("There was an error decoding the json. err = %s", err) - w.WriteHeader(500) - return - } - - indexForReplacement := -1 - for index, machine := range s.machines.Items { - if machine.Name == machineName { - indexForReplacement = index - break - } - } - s.machines.Items[indexForReplacement] = u - - err = json.NewEncoder(w).Encode(u) - if err != nil { - fmt.Printf("Unable to encode response for node update, error is: %v", err) - return - } -} - -func (s *MachineInMemoryStorage) ScaleMachines(machineSet cluster.MachineSet, changedNodes []core.Node, amount int) ([]cluster.Machine, error) { - var addedMachines []cluster.Machine - if amount < 0 { - // In case of downscaling we need to delete machines - for _, changedNode := range changedNodes { - index := -1 - for i, machine := range s.machines.Items { - if machine.Status.NodeRef.Name == changedNode.Name { - index = i - break - } - } - s.machineEventChan <- metav1.WatchEvent{Type: "DELETED", Object: runtime.RawExtension{Object: &s.machines.Items[index]}} - s.machines.Items[index] = s.machines.Items[len(s.machines.Items)-1] - s.machines.Items = s.machines.Items[:len(s.machines.Items)-1] - } - return nil, nil - } else { - providerIds := make([]string, amount) - nodeRefs := make([]core.ObjectReference, amount) - for amount > 0 { - - providerIds[amount-1] = "clusterapi://" + fmt.Sprintf("%s-machine-%d", machineSet.Name, s.machineCount) - nodeRefs[amount-1] = core.ObjectReference{Kind: "Node", APIVersion: "v1", Name: fmt.Sprintf("%s-machine-%d", machineSet.Name, s.machineCount) + "-node"} - newMachine := cluster.Machine{ - TypeMeta: metav1.TypeMeta{APIVersion: "cluster.x-k8s.io/v1beta1", Kind: "Machine"}, - ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("%s-machine-%d", machineSet.Name, s.machineCount), Namespace: "kube-system", Annotations: map[string]string{ - "machine-set-name": machineSet.Name, - "cpu": machineSet.Annotations["capacity.cluster-autoscaler.kubernetes.io/cpu"], - "memory": machineSet.Annotations["capacity.cluster-autoscaler.kubernetes.io/memory"], - "pods": machineSet.Annotations["capacity.cluster-autoscaler.kubernetes.io/maxPods"], - }, OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: "cluster.x-k8s.io/v1beta1", - Kind: "MachineSet", - Name: machineSet.Name, - }, - }}, - Spec: cluster.MachineSpec{ProviderID: &providerIds[amount-1]}, - Status: cluster.MachineStatus{Phase: "Running", NodeRef: &nodeRefs[amount-1]}, - } - s.machineCount = s.machineCount + 1 - amount = amount - 1 - addedMachines = append(addedMachines, newMachine) - } - for i, _ := range addedMachines { - s.machines.Items = append(s.machines.Items, addedMachines[i]) - machineAddEvent := metav1.WatchEvent{Type: "ADDED", Object: runtime.RawExtension{Object: &addedMachines[i]}} - s.machineEventChan <- machineAddEvent - } - } - return addedMachines, nil -} - -func (s *MachineSetsInMemoryStorage) GetMachineSets() (cluster.MachineSetList, *broadcast.BroadcastServer[metav1.WatchEvent]) { - return s.machineSets, s.machineSetBroadcaster -} - -func (s *MachineSetsInMemoryStorage) StoreMachineSets(ms cluster.MachineSetList, events []metav1.WatchEvent) { - s.machineSets = ms - for _, e := range events { - s.machineSetsEventChan <- e - } -} - -func (s *MachineSetsInMemoryStorage) GetMachineSetsScale(w http.ResponseWriter, r *http.Request) { - fmt.Printf("Req: %s %s %s\n", r.Host, r.URL.Path, r.URL.RawQuery) - w.Header().Set("Content-Type", "application/json") - - // Get machine set name as path parameter - pathParams := mux.Vars(r) - machineSetName := pathParams["machinesetName"] - var machineSetRef cluster.MachineSet - - for _, ms := range s.machineSets.Items { - if ms.Name == machineSetName { - machineSetRef = ms - break - } - } - - result := v1.Scale{TypeMeta: metav1.TypeMeta{APIVersion: "autoscaling/v1", Kind: "Scale"}, - ObjectMeta: metav1.ObjectMeta{Name: machineSetName}, - Spec: v1.ScaleSpec{Replicas: *machineSetRef.Spec.Replicas}, - Status: v1.ScaleStatus{Replicas: *machineSetRef.Spec.Replicas}} - - err := json.NewEncoder(w).Encode(result) - if err != nil { - fmt.Printf("Unable to encode response for machineset scale, error is: %v", err) - return - } -} - -func (s *MachineSetsInMemoryStorage) PutMachineSetsScale(w http.ResponseWriter, r *http.Request) { - fmt.Printf("Req: %s %s %s\n", r.Host, r.URL.Path, r.URL.RawQuery) - w.Header().Set("Content-Type", "application/json") - - reqBody, _ := io.ReadAll(r.Body) - // fmt.Println(string(reqBody)) - var u v1.Scale - err := json.Unmarshal(reqBody, &u) - if err != nil { - fmt.Printf("There was an error decoding the json. err = %s", err) - w.WriteHeader(500) - return - } - - // Get machine set name as path parameter - pathParams := mux.Vars(r) - machineSetName := pathParams["machinesetName"] - desiredReplicas := u.Spec.Replicas - scaleAmount := int32(0) - machineSetIndexToChange := -1 - for i, ms := range s.machineSets.Items { - if ms.Name == machineSetName { - machineSetIndexToChange = i - break - } - } - scaleAmount = desiredReplicas - *s.machineSets.Items[machineSetIndexToChange].Spec.Replicas - *s.machineSets.Items[machineSetIndexToChange].Spec.Replicas = desiredReplicas - (*s).machineSets.Items[machineSetIndexToChange].Status.AvailableReplicas = desiredReplicas - (*s).machineSets.Items[machineSetIndexToChange].Status.FullyLabeledReplicas = desiredReplicas - (*s).machineSets.Items[machineSetIndexToChange].Status.ReadyReplicas = desiredReplicas - s.machineSetsEventChan <- metav1.WatchEvent{Type: "MODIFIED", Object: runtime.RawExtension{Object: &s.machineSets.Items[machineSetIndexToChange]}} - - // We need to scale something - if scaleAmount != 0 { - if scaleAmount < 0 { - // For downscaling, we first delete nodes then machines - changedNodes, err := s.nodeStorage.ScaleNodes(nil, int(scaleAmount)) - if err != nil { - fmt.Printf("Error when scaling down nodes. err = %s", err) - w.WriteHeader(500) - return - } - _, err = s.machineStorage.ScaleMachines(s.machineSets.Items[machineSetIndexToChange], changedNodes, int(scaleAmount)) - if err != nil { - fmt.Printf("Error when scaling down machines. err = %s", err) - w.WriteHeader(500) - return - } - } else { - // For upscaling, we first create machines then nodes - addedMachines, err := s.machineStorage.ScaleMachines(s.machineSets.Items[machineSetIndexToChange], nil, int(scaleAmount)) - if err != nil { - fmt.Printf("Error when scaling up machines. err = %s", err) - w.WriteHeader(500) - return - } - _, err = s.nodeStorage.ScaleNodes(addedMachines, int(scaleAmount)) - if err != nil { - fmt.Printf("Error when scaling up nodes. err = %s", err) - w.WriteHeader(500) - return - } - } - } - - err = json.NewEncoder(w).Encode(u) - if err != nil { - fmt.Printf("Unable to encode response for machineset put scale, error is: %v", err) - return - } -} - -func (s *MachineSetsInMemoryStorage) IsUpscalingPossible() bool { - for _, ms := range s.machineSets.Items { - maxSize, _ := strconv.Atoi(ms.Annotations["cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size"]) - if *ms.Spec.Replicas < int32(maxSize) { - return true - } - } - return false -} - -func (s *MachineSetsInMemoryStorage) IsDownscalingPossible() bool { - for _, ms := range s.machineSets.Items { - minSize, _ := strconv.Atoi(ms.Annotations["cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size"]) - if *ms.Spec.Replicas > int32(minSize) { - return true - } - } - return false -} - -func (s *StatusConfigMapInMemoryStorage) GetStatusConfigMap() core.ConfigMap { - return s.statusConfigMap -} - -func (s *StatusConfigMapInMemoryStorage) StoreStatusConfigMap(w http.ResponseWriter, r *http.Request) { - fmt.Printf("Req: %s %s %s\n", r.Host, r.URL.Path, r.URL.RawQuery) - w.Header().Set("Content-Type", "application/json") - - reqBody, _ := io.ReadAll(r.Body) - // fmt.Println(string(reqBody)) - var u core.ConfigMap - err := json.Unmarshal(reqBody, &u) - if err != nil { - fmt.Printf("There was an error decoding the json. err = %s", err) - w.WriteHeader(500) - return - } - - s.statusConfigMap = u - - err = json.NewEncoder(w).Encode(s.statusConfigMap) - if err != nil { - fmt.Printf("Unable to encode response for node update, error is: %v", err) - return - } -} - -// "Constructors" - -func NewMachineSetInMemoryStorage(nodeStorage *NodeInMemoryStorage, machineStorage *MachineInMemoryStorage) MachineSetsInMemoryStorage { - machineSetsEventChan := make(chan metav1.WatchEvent, 500) - return MachineSetsInMemoryStorage{ - machineSets: cluster.MachineSetList{TypeMeta: metav1.TypeMeta{Kind: "MachineSetList", APIVersion: "cluster-x.k8s.io/v1beta1"}, Items: nil}, - machineSetsEventChan: machineSetsEventChan, - nodeStorage: nodeStorage, - machineStorage: machineStorage, - machineSetBroadcaster: broadcast.NewBroadcastServer(context.TODO(), "MachineSetBroadcaster", machineSetsEventChan), - } -} - -func NewMachineInMemoryStorage() MachineInMemoryStorage { - machineEventChan := make(chan metav1.WatchEvent, 500) - return MachineInMemoryStorage{ - machines: cluster.MachineList{TypeMeta: metav1.TypeMeta{Kind: "MachineList", APIVersion: "cluster.x-k8s.io/v1beta1"}, Items: nil}, - machineEventChan: machineEventChan, - machineBroadcaster: broadcast.NewBroadcastServer(context.TODO(), "MachineBroadcaster", machineEventChan), - machineCount: 0, - } -} - -func NewStatusMapInMemoryStorage() StatusConfigMapInMemoryStorage { - return StatusConfigMapInMemoryStorage{ - statusConfigMap: core.ConfigMap{TypeMeta: metav1.TypeMeta{Kind: "ConfigMap", APIVersion: "v1"}, - ObjectMeta: metav1.ObjectMeta{Name: "cluster-autoscaler-status", Namespace: "kube-system"}}, - } -} diff --git a/internal/inmemorystorage/core.go b/internal/inmemorystorage/core.go deleted file mode 100644 index 634b31f..0000000 --- a/internal/inmemorystorage/core.go +++ /dev/null @@ -1,286 +0,0 @@ -package inmemorystorage - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "github.com/gorilla/mux" - "io" - core "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "kube-rise/internal/broadcast" - "net/http" - cluster "sigs.k8s.io/cluster-api/api/v1beta1" - "strconv" -) - -// Structs - -type NodeInMemoryStorage struct { - nodes core.NodeList - nodeEventChan chan metav1.WatchEvent - nodeBroadcaster *broadcast.BroadcastServer[metav1.WatchEvent] - nodeUpscalingChan chan core.Node - nodeDownscalingChan chan core.Node - nodeUpscalingBroadcaster *broadcast.BroadcastServer[core.Node] - nodeDownscalingBroadcaster *broadcast.BroadcastServer[core.Node] -} - -type PodInMemoryStorage struct { - pods core.PodList - podEventChan chan metav1.WatchEvent - podBroadcaster *broadcast.BroadcastServer[metav1.WatchEvent] - nextResourceId int -} - -type NamespaceInMemoryStorage struct { - namespaces core.NamespaceList - namespaceEventChan chan metav1.WatchEvent - namespaceBroadcaster *broadcast.BroadcastServer[metav1.WatchEvent] -} - -// Implementations - -func (s *NodeInMemoryStorage) GetNodes() (core.NodeList, *broadcast.BroadcastServer[metav1.WatchEvent]) { - return s.nodes, s.nodeBroadcaster -} - -func (s *NodeInMemoryStorage) StoreNodes(nodes core.NodeList, events []metav1.WatchEvent) { - s.nodes = nodes - for _, n := range events { - s.nodeEventChan <- n - } -} - -func (s *NodeInMemoryStorage) PutNode(w http.ResponseWriter, r *http.Request) { - fmt.Printf("Req: %s %s %s\n", r.Host, r.URL.Path, r.URL.RawQuery) - w.Header().Set("Content-Type", "application/json") - - // Get node name as path parameter - pathParams := mux.Vars(r) - nodeName := pathParams["nodeName"] - - reqBody, _ := io.ReadAll(r.Body) - // fmt.Println(string(reqBody)) - var u core.Node - err := json.Unmarshal(reqBody, &u) - if err != nil { - fmt.Printf("There was an error decoding the json. err = %s", err) - w.WriteHeader(500) - return - } - - indexForReplacement := -1 - for index, node := range s.nodes.Items { - if node.Name == nodeName { - indexForReplacement = index - break - } - } - s.nodes.Items[indexForReplacement] = u - - err = json.NewEncoder(w).Encode(u) - if err != nil { - fmt.Printf("Unable to encode response for node update, error is: %v", err) - return - } -} - -func (s *NodeInMemoryStorage) GetNode(w http.ResponseWriter, r *http.Request) { - fmt.Printf("Req: %s %s %s\n", r.Host, r.URL.Path, r.URL.RawQuery) - w.Header().Set("Content-Type", "application/json") - - // Get node name as path parameter - pathParams := mux.Vars(r) - nodeName := pathParams["nodeName"] - - var u core.Node - - for _, node := range s.nodes.Items { - if node.Name == nodeName { - u = node - break - } - } - - err := json.NewEncoder(w).Encode(u) - if err != nil { - fmt.Printf("Unable to encode response for node get, error is: %v", err) - return - } -} - -func (s *NodeInMemoryStorage) ScaleNodes(addedMachines []cluster.Machine, amount int) ([]core.Node, error) { - var changedNodes []core.Node - // Case downscaling - if amount < 0 { - amount = amount * -1 - var nodeIndicesToDelete []int - for i, node := range s.nodes.Items { - for _, taint := range node.Spec.Taints { - if taint.Key == "ToBeDeletedByClusterAutoscaler" { - nodeIndicesToDelete = append(nodeIndicesToDelete, i) - break - } - } - } - if len(nodeIndicesToDelete) != amount { - return []core.Node{}, errors.New(fmt.Sprintf("Mismatch: found %d desired nodes to delete, got %d tainted nodes", amount, len(nodeIndicesToDelete))) - } - for amount > 0 { - nodeToDelete := s.nodes.Items[nodeIndicesToDelete[amount-1]] - changedNodes = append(changedNodes, nodeToDelete) - // Delete node from node slice (https://stackoverflow.com/questions/37334119/how-to-delete-an-element-from-a-slice-in-golang) - s.nodes.Items[nodeIndicesToDelete[amount-1]] = s.nodes.Items[len(s.nodes.Items)-1] - s.nodes.Items = s.nodes.Items[:len(s.nodes.Items)-1] - amount = amount - 1 - } - // TODO: Check whether this works (especially pointer to changedNodes should not be overwritten) - for i, _ := range changedNodes { - nodeDeleteEvent := metav1.WatchEvent{Type: "DELETED", Object: runtime.RawExtension{Object: &changedNodes[i]}} - s.nodeEventChan <- nodeDeleteEvent - s.nodeDownscalingChan <- changedNodes[i] - } - } else if amount > 0 { - newNodes := make([]core.Node, len(addedMachines)) - for i, machine := range addedMachines { - cpuQuantity, _ := resource.ParseQuantity(machine.Annotations["cpu"]) - memoryQuantity, _ := resource.ParseQuantity(machine.Annotations["memory"]) - podsQuantity, _ := resource.ParseQuantity(machine.Annotations["pods"]) - newNodes[i] = core.Node{ - TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "Node"}, - ObjectMeta: metav1.ObjectMeta{Name: machine.Name + "-node"}, - Spec: core.NodeSpec{ProviderID: "clusterapi://" + machine.Name}, - Status: core.NodeStatus{Phase: "Running", Conditions: []core.NodeCondition{ - { - Type: "Ready", - Status: "True", - }, - }, Allocatable: map[core.ResourceName]resource.Quantity{ - "cpu": cpuQuantity, - "memory": memoryQuantity, - "pods": podsQuantity, - }, Capacity: map[core.ResourceName]resource.Quantity{ - "cpu": cpuQuantity, - "memory": memoryQuantity, - "pods": podsQuantity, - }}, - } - } - for i, _ := range newNodes { - s.nodes.Items = append(s.nodes.Items, newNodes[i]) - nodeAddEvent := metav1.WatchEvent{Type: "ADDED", Object: runtime.RawExtension{Object: &newNodes[i]}} - s.nodeEventChan <- nodeAddEvent - s.nodeUpscalingChan <- newNodes[i] - } - return nil, nil - } - return changedNodes, nil -} - -func (s *NodeInMemoryStorage) GetNodeUpscalingChannel() *broadcast.BroadcastServer[core.Node] { - return s.nodeUpscalingBroadcaster -} - -func (s *NodeInMemoryStorage) GetNodeDownscalingChannel() *broadcast.BroadcastServer[core.Node] { - return s.nodeDownscalingBroadcaster -} - -func (s *PodInMemoryStorage) GetPods() (core.PodList, *broadcast.BroadcastServer[metav1.WatchEvent]) { - return s.pods, s.podBroadcaster -} - -func (s *PodInMemoryStorage) StorePods(pods core.PodList, events []metav1.WatchEvent) { - s.pods = pods - for _, e := range events { - s.podEventChan <- e - } -} - -func (s *PodInMemoryStorage) DeletePods(events []metav1.WatchEvent) { - s.pods = core.PodList{} - for _, e := range events { - e.Type = "DELETED" - s.podEventChan <- e - } -} - -func (s *PodInMemoryStorage) BindPod(podIndex int, nodeName string) { - s.pods.Items[podIndex].ObjectMeta.ResourceVersion = s.getNextResourceId() - s.pods.Items[podIndex].Spec.NodeName = nodeName - s.pods.Items[podIndex].Status.Phase = "Running" - s.pods.Items[podIndex].Status.Conditions = append(s.pods.Items[podIndex].Status.Conditions, core.PodCondition{ - Type: core.PodScheduled, - Status: core.ConditionTrue, - }) - s.podEventChan <- metav1.WatchEvent{ - Type: "MODIFIED", - Object: runtime.RawExtension{Object: &s.pods.Items[podIndex]}, - } -} - -func (s *PodInMemoryStorage) FailedPod(podIndex int, status core.PodStatus) { - s.pods.Items[podIndex].Status = status - s.pods.Items[podIndex].Status.Phase = "Pending" - s.pods.Items[podIndex].ObjectMeta.ResourceVersion = s.getNextResourceId() - s.podEventChan <- metav1.WatchEvent{ - Type: "MODIFIED", - Object: runtime.RawExtension{Object: &s.pods.Items[podIndex]}, - } -} - -func (s *PodInMemoryStorage) getNextResourceId() string { - result := strconv.Itoa(s.nextResourceId) - s.nextResourceId = s.nextResourceId + 1 - return result -} - -func (s *NamespaceInMemoryStorage) GetNamespaces() (core.NamespaceList, *broadcast.BroadcastServer[metav1.WatchEvent]) { - return s.namespaces, s.namespaceBroadcaster -} - -func (s *NamespaceInMemoryStorage) StoreNamespaces(namespaces core.NamespaceList) { - s.namespaces = namespaces -} - -// "Constructors" - -func NewPodInMemoryStorage() PodInMemoryStorage { - podEventChan := make(chan metav1.WatchEvent, 500) - return PodInMemoryStorage{ - pods: core.PodList{TypeMeta: metav1.TypeMeta{Kind: "PodList", APIVersion: "v1"}, Items: nil}, - podEventChan: podEventChan, - podBroadcaster: broadcast.NewBroadcastServer(context.TODO(), "PodBroadcaster", podEventChan), - nextResourceId: 1, - } -} - -func NewNodeInMemoryStorage() NodeInMemoryStorage { - nodeEventChan := make(chan metav1.WatchEvent, 500) - nodeUpscalingChan := make(chan core.Node) - nodeDownscalingChan := make(chan core.Node) - return NodeInMemoryStorage{ - nodes: core.NodeList{TypeMeta: metav1.TypeMeta{Kind: "NodeList", APIVersion: "v1"}, Items: nil}, - nodeEventChan: nodeEventChan, - nodeBroadcaster: broadcast.NewBroadcastServer(context.TODO(), "NodeBroadcaster", nodeEventChan), - nodeUpscalingChan: nodeUpscalingChan, - nodeDownscalingChan: nodeDownscalingChan, - nodeDownscalingBroadcaster: broadcast.NewBroadcastServer(context.TODO(), "NodeDownscalingBroadcaster", nodeDownscalingChan), - nodeUpscalingBroadcaster: broadcast.NewBroadcastServer(context.TODO(), "NodeUpscalingBroadcaster", nodeUpscalingChan), - } -} - -func NewNamespaceInMemoryStorage() NamespaceInMemoryStorage { - var namespace core.Namespace - namespace.SetName("default") - namespace.Status = core.NamespaceStatus{Phase: "Active"} - namespaceEventChan := make(chan metav1.WatchEvent) - return NamespaceInMemoryStorage{ - namespaces: core.NamespaceList{TypeMeta: metav1.TypeMeta{Kind: "NamespaceList", APIVersion: "v1"}, Items: []core.Namespace{namespace}}, - namespaceEventChan: namespaceEventChan, - namespaceBroadcaster: broadcast.NewBroadcastServer(context.TODO(), "NamespaceBroadcaster", namespaceEventChan), - } -} diff --git a/pkg/control/idgenerator.go b/pkg/control/idgenerator.go new file mode 100644 index 0000000..3f71c8c --- /dev/null +++ b/pkg/control/idgenerator.go @@ -0,0 +1,18 @@ +package control + +import ( + "go-kube/pkg/storage" + "strconv" +) + +type IdGenerator struct { + idStorage storage.IdStorage +} + +func (s *IdGenerator) GetNextResourceId() string { + current := s.idStorage.GetNextId() + result := strconv.Itoa(current) + next := current + 1 + s.idStorage.StoreNextId(next) + return result +} diff --git a/pkg/control/kubeupdatecontroller.go b/pkg/control/kubeupdatecontroller.go deleted file mode 100644 index 72360b1..0000000 --- a/pkg/control/kubeupdatecontroller.go +++ /dev/null @@ -1,202 +0,0 @@ -package control - -import ( - "fmt" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "kube-rise/pkg/entity" - "kube-rise/pkg/storage" - cluster "sigs.k8s.io/cluster-api/api/v1beta1" -) - -// Controls the specific adapter logic for different Kubernetes API -type KubeUpdateController interface { - // Updates the nodes and returns a channel for the response - // The update might be executed async and returns the response via the channel - UpdateNodes(ur v1.NodeList, events []metav1.WatchEvent) entity.NodeUpdateResponse - // Initializes nodes and machines when cluster scaling is activated - InitMachinesNodes(ur v1.NodeList, events []metav1.WatchEvent, machineSets []cluster.MachineSet, machines []cluster.Machine) entity.NodeUpdateResponse - // Updates the pods and returns a channel for the response - // The update might be executed async and returns the response via the channel - UpdatePods(ur v1.PodList, events []metav1.WatchEvent, podsToBePlaced v1.PodList, deleteEvents bool) chan entity.PodsUpdateResponse - // Fallback function in case scheduler is stuck - CreateDefaultResponse() entity.PodsUpdateResponse - // Called when a pod is failed - Failed(status v1.PodStatus, podName string) v1.Pod - // Called when a pod is binded - Binded(binding v1.Binding, podName string) -} - -type KubeUpdateControllerImpl struct { - storageContainer *storage.StorageContainer - - nodesUpdateChannel chan entity.NodeUpdateResponse - podsUpdateChannel chan entity.PodsUpdateResponse - - failedPodBuffer []entity.BindingFailureInformation - bindedPodBuffer []entity.BindingInformation - podsToBePlaced v1.PodList - clusterAutoscalerActive bool - clusterAutoscalerDone bool - newNodes []v1.Node - deletedNodes []v1.Node -} - -func NewKubeUpdateController(storageContainer *storage.StorageContainer) KubeUpdateController { - return &KubeUpdateControllerImpl{storageContainer: storageContainer, - failedPodBuffer: []entity.BindingFailureInformation{}, - bindedPodBuffer: []entity.BindingInformation{}, - podsToBePlaced: v1.PodList{Items: []v1.Pod{}}, - clusterAutoscalerActive: false, - clusterAutoscalerDone: false, - newNodes: []v1.Node{}, - deletedNodes: []v1.Node{}, - } -} - -func (k *KubeUpdateControllerImpl) UpdateNodes(ur v1.NodeList, events []metav1.WatchEvent) entity.NodeUpdateResponse { - k.storageContainer.Nodes.StoreNodes(ur, events) - return entity.NodeUpdateResponse{Data: ur} -} - -func (k *KubeUpdateControllerImpl) InitMachinesNodes(ur v1.NodeList, events []metav1.WatchEvent, machineSets []cluster.MachineSet, machines []cluster.Machine) entity.NodeUpdateResponse { - k.clusterAutoscalerActive = true - - // first register machine sets - machineSetList := cluster.MachineSetList{ - TypeMeta: metav1.TypeMeta{APIVersion: "cluster.x-k8s.io/v1beta1", Kind: "MachineSetList"}, - Items: machineSets, - } - var machineSetsAddedEvents []metav1.WatchEvent - for i := range machineSets { - temp := metav1.WatchEvent{Type: "ADDED", Object: runtime.RawExtension{Object: &machineSets[i]}} - machineSetsAddedEvents = append(machineSetsAddedEvents, temp) - } - k.storageContainer.MachineSets.StoreMachineSets(machineSetList, machineSetsAddedEvents) - - // second, store the machines - machineList := cluster.MachineList{ - TypeMeta: metav1.TypeMeta{APIVersion: "cluster.x-k8s.io/v1beta1", Kind: "MachineList"}, - Items: machines, - } - var machineAddedEvents []metav1.WatchEvent - for i := range machines { - temp := metav1.WatchEvent{Type: "ADDED", Object: runtime.RawExtension{Object: &machines[i]}} - machineAddedEvents = append(machineAddedEvents, temp) - } - k.storageContainer.Machines.StoreMachines(machineList, machineAddedEvents) - - // third, store the nodes - k.storageContainer.Nodes.StoreNodes(ur, events) - - return entity.NodeUpdateResponse{Data: ur} -} - -func (k *KubeUpdateControllerImpl) UpdatePods(ur v1.PodList, events []metav1.WatchEvent, podsToBePlaced v1.PodList, deleteEvents bool) chan entity.PodsUpdateResponse { - if deleteEvents == false { - // We need to reset all buffers - k.podsToBePlaced = podsToBePlaced - k.failedPodBuffer = make([]entity.BindingFailureInformation, 0) - k.bindedPodBuffer = make([]entity.BindingInformation, 0) - k.clusterAutoscalerDone = false - k.newNodes = []v1.Node{} - k.deletedNodes = []v1.Node{} - k.storageContainer.Pods.StorePods(ur, events) - if len(k.podsToBePlaced.Items) > 0 { - k.podsUpdateChannel = make(chan entity.PodsUpdateResponse) - } else { - k.podsUpdateChannel = nil - } - return k.podsUpdateChannel - } else { - k.storageContainer.Pods.DeletePods(events) - return nil - } -} - -func (k *KubeUpdateControllerImpl) CreateDefaultResponse() entity.PodsUpdateResponse { - failedList := make([]entity.BindingFailureInformation, 0) - bindedList := make([]entity.BindingInformation, 0) - for _, pod := range k.podsToBePlaced.Items { - // Check if we received information for some pods from the scheduler - podReported := false - for _, bindFailure := range k.failedPodBuffer { - if bindFailure.Pod == pod.Name { - failedList = append(failedList, bindFailure) - podReported = true - break - } - } - if podReported == true { - continue - } - for _, bindSuccess := range k.bindedPodBuffer { - if bindSuccess.Pod == pod.Name { - bindedList = append(bindedList, bindSuccess) - podReported = true - break - } - } - if podReported == true { - continue - } - failedList = append(failedList, entity.BindingFailureInformation{Pod: pod.Name, Message: "No new situation for the scheduler"}) - } - if len(failedList)+len(bindedList) == 0 && k.clusterAutoscalerActive && !k.clusterAutoscalerDone && k.storageContainer.MachineSets.IsDownscalingPossible() { - // TODO: Integrate downscaling - } - return entity.PodsUpdateResponse{Binded: bindedList, Failed: failedList, NewNodes: k.newNodes, DeletedNodes: k.deletedNodes} -} - -func (k *KubeUpdateControllerImpl) updatePodChannel() { - if len(k.failedPodBuffer)+len(k.bindedPodBuffer) == len(k.podsToBePlaced.Items) { - if len(k.failedPodBuffer) > 0 && k.clusterAutoscalerActive && !k.clusterAutoscalerDone && k.storageContainer.MachineSets.IsUpscalingPossible() { - broadcaster := k.storageContainer.Nodes.GetNodeUpscalingChannel() - nodeChannel := broadcaster.Subscribe() - defer broadcaster.CancelSubscription(nodeChannel) - var newNode v1.Node - fmt.Println("Waiting for cluster-autoscaler upscaling") - newNode = <-nodeChannel - k.newNodes = append(k.newNodes, newNode) - // TODO: Check if cluster autoscaler could react two times - // TODO: read from status config map of cluster autoscaler to track status - k.clusterAutoscalerDone = true - // Empty failed pod buffer, they should be scheduled now - k.failedPodBuffer = make([]entity.BindingFailureInformation, 0) - } else { - k.podsUpdateChannel <- entity.PodsUpdateResponse{Failed: k.failedPodBuffer, Binded: k.bindedPodBuffer, NewNodes: k.newNodes, DeletedNodes: k.deletedNodes} - } - } -} - -func (k *KubeUpdateControllerImpl) Failed(status v1.PodStatus, podName string) v1.Pod { - allPods, _ := k.storageContainer.Pods.GetPods() - var result v1.Pod - for i, element := range allPods.Items { - if element.Name == podName { - fmt.Printf("Pod %s cannot be scheduled, reason: %s\n", podName, status.Conditions[0].Message) - k.failedPodBuffer = append(k.failedPodBuffer, entity.BindingFailureInformation{Pod: podName, Message: status.Conditions[0].Message}) - result = element - k.storageContainer.Pods.FailedPod(i, status) - break - } - } - k.updatePodChannel() - result.Status = status - return result -} - -func (k *KubeUpdateControllerImpl) Binded(binding v1.Binding, podName string) { - allPods, _ := k.storageContainer.Pods.GetPods() - for i, element := range allPods.Items { - if element.Name == podName { - fmt.Printf("Pod %s will be bound to Node %s\n", podName, binding.Target.Name) - k.storageContainer.Pods.BindPod(i, binding.Target.Name) - k.bindedPodBuffer = append(k.bindedPodBuffer, entity.BindingInformation{Pod: podName, Node: binding.Target.Name}) - // k.storageContainer.PodStorage.UpdatePodStatus(element) - break - } - } - k.updatePodChannel() -} diff --git a/pkg/control/nodecontroller.go b/pkg/control/nodecontroller.go new file mode 100644 index 0000000..a65e561 --- /dev/null +++ b/pkg/control/nodecontroller.go @@ -0,0 +1,68 @@ +package control + +import ( + "go-kube/pkg/misim" + "go-kube/pkg/storage" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/v2" + cluster "sigs.k8s.io/cluster-api/api/v1beta1" +) + +type NodeController struct { + storage *storage.StorageContainer +} + +func (c NodeController) UpdateNodes(nodes v1.NodeList, events []metav1.WatchEvent) misim.NodeUpdateResponse { + klog.V(3).Info("Node-Update: ", len(nodes.Items), " nodes") + c.storage.Nodes.StoreNodes(nodes, events) + return misim.NodeUpdateResponse{ + Data: nodes, + } +} + +func (c NodeController) InitMachinesNodes(nodes v1.NodeList, events []metav1.WatchEvent, machineSets []cluster.MachineSet, machines []cluster.Machine) misim.NodeUpdateResponse { + klog.V(3).Infof("Machine-Node-Init: %d nodes, %d machine sets, %d machines", len(nodes.Items), len(machineSets), len(machines)) + + // Activate the cluster autoscaling! + c.storage.AdapterState.StoreClusterAutoscalerActive(true) + + // first register machine sets + machineSetList := cluster.MachineSetList{ + TypeMeta: metav1.TypeMeta{APIVersion: "cluster.x-k8s.io/v1beta1", Kind: "MachineSetList"}, + Items: machineSets, + } + var machineSetsAddedEvents []metav1.WatchEvent + // Each + for i := range machineSets { + temp := metav1.WatchEvent{Type: "ADDED", Object: runtime.RawExtension{Object: &machineSets[i]}} + machineSetsAddedEvents = append(machineSetsAddedEvents, temp) + } + c.storage.MachineSets.StoreMachineSets(machineSetList, machineSetsAddedEvents) + + // second, store the machines + machineList := cluster.MachineList{ + TypeMeta: metav1.TypeMeta{APIVersion: "cluster.x-k8s.io/v1beta1", Kind: "MachineList"}, + Items: machines, + } + var machineAddedEvents []metav1.WatchEvent + for i := range machines { + temp := metav1.WatchEvent{Type: "ADDED", Object: runtime.RawExtension{Object: &machines[i]}} + machineAddedEvents = append(machineAddedEvents, temp) + } + c.storage.Machines.StoreMachines(machineList, machineAddedEvents) + + // third, store the nodes + c.storage.Nodes.StoreNodes(nodes, events) + + return misim.NodeUpdateResponse{ + Data: nodes, + } +} + +func NewNodeController(storage *storage.StorageContainer) NodeController { + return NodeController{ + storage: storage, + } +} diff --git a/pkg/control/nodeupdates.go b/pkg/control/nodeupdates.go new file mode 100644 index 0000000..32cb432 --- /dev/null +++ b/pkg/control/nodeupdates.go @@ -0,0 +1,32 @@ +package control + +import ( + "go-kube/pkg/misim" + "go-kube/pkg/storage" +) + +type NodeUpdatesResource interface { + Post(misim.NodeUpdateRequest) misim.NodeUpdateResponse +} + +type NodeUpdatesResourceImpl struct { + storage *storage.StorageContainer +} + +func (impl NodeUpdatesResourceImpl) Post(u misim.NodeUpdateRequest) misim.NodeUpdateResponse { + controller := NewNodeController(impl.storage) + + if u.MachineSets == nil || len(u.MachineSets) == 0 { + // No cluster scaling + return controller.UpdateNodes(u.AllNodes, u.Events) + } else { + // If the request contains machines set, we use only the machines + return controller.InitMachinesNodes(u.AllNodes, u.Events, u.MachineSets, u.Machines) + } +} + +func NewNodeUpdateResource(storage *storage.StorageContainer) NodeUpdatesResourceImpl { + return NodeUpdatesResourceImpl{ + storage: storage, + } +} diff --git a/pkg/control/podcontroller.go b/pkg/control/podcontroller.go new file mode 100644 index 0000000..06b4cd1 --- /dev/null +++ b/pkg/control/podcontroller.go @@ -0,0 +1,195 @@ +package control + +import ( + "go-kube/pkg/misim" + "go-kube/pkg/storage" + core "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" + "strconv" + "sync" +) + +type PodController struct { + storage *storage.StorageContainer + idGenerator IdGenerator + mu sync.Mutex +} + +func (c *PodController) UpdatePods(ur v1.PodList, events []metav1.WatchEvent, podsToBePlaced v1.PodList, deleteEvents bool) misim.PodsUpdateResponse { + if !deleteEvents { + klog.V(3).Info("Pod-Update: ", len(ur.Items), " pods, ", len(podsToBePlaced.Items), " to be placed") + // Buffers have to be cleared before storing new pods + c.storage.Pods.PodsToBePlaced().Clear() + c.storage.Pods.PodsToBePlaced().PutAll(podsToBePlaced.Items) + c.storage.Pods.FailedPodBuffer().Clear() + c.storage.Pods.BindedPodBuffer().Clear() + c.storage.AdapterState.StoreClusterAutoscalingDone(false) + c.storage.Nodes.NewNodes().Clear() + c.storage.Nodes.DeletedNodes().Clear() + + // Store pods + c.storage.Pods.StorePods(ur, events) + + // If there were pods to be placed, wait for the response + if !c.storage.Pods.PodsToBePlaced().Empty() { + podUpdateChannel := c.storage.Pods.PodsUpdateChannel().InitChannel() + //c.storage.Pods.PodsUpdateChannel().InitChannel() + klog.V(3).Infof("Wait for pods to be placed...") + // wait for it + select { + case response := <-podUpdateChannel: + return response + } + } else { + return c.createDefaultResponse() + } + } else { + klog.V(3).Info("Pod-Update: Deleted pods") + c.storage.Pods.DeletePods(events) + } + return c.createDefaultResponse() +} + +// Generates an update about all the pods that should be placed +func (c *PodController) createDefaultResponse() misim.PodsUpdateResponse { + failedList := make([]misim.BindingFailureInformation, 0) + bindedList := make([]misim.BindingInformation, 0) + for _, pod := range c.storage.Pods.PodsToBePlaced().Items() { + // Check if we received information for some pods from the scheduler + podReported := false + for _, bindFailure := range c.storage.Pods.FailedPodBuffer().Items() { + if bindFailure.Pod == pod.Name { + failedList = append(failedList, bindFailure) + podReported = true + break + } + } + if podReported == true { + continue + } + for _, bindSuccess := range c.storage.Pods.BindedPodBuffer().Items() { + if bindSuccess.Pod == pod.Name { + bindedList = append(bindedList, bindSuccess) + podReported = true + break + } + } + if podReported == true { + continue + } + failedList = append(failedList, misim.BindingFailureInformation{Pod: pod.Name, Message: "No new situation for the scheduler"}) + } + if c.storage.Pods.FailedPodBuffer().Empty() && c.storage.Pods.BindedPodBuffer().Empty() && c.storage.AdapterState.IsClusterAutoscalerActive() && !c.storage.AdapterState.IsClusterAutoscalingDone() && c.storage.MachineSets.IsDownscalingPossible() { + // TODO [Cluster Downscaling]: Integrate downscaling + // (but maybe not here???) + } + return misim.PodsUpdateResponse{ + Binded: bindedList, + Failed: failedList, + NewNodes: c.storage.Nodes.NewNodes().Items(), + DeletedNodes: c.storage.Nodes.DeletedNodes().Items(), + } + +} + +func (c *PodController) BindPod(podName string, nodeName string) { + c.storage.Pods.BeginTransaction() + + klog.V(3).Info("Bound: " + podName + " to " + nodeName) + // Get pod reference + pod := c.storage.Pods.GetPod(podName) + + // Put binding information into buffer + bindingInformation := misim.BindingInformation{Pod: podName, Node: nodeName} + c.storage.Pods.BindedPodBuffer().Put(bindingInformation) + + // Update pod data and store it updated + pod.ObjectMeta.ResourceVersion = c.idGenerator.GetNextResourceId() + pod.Spec.NodeName = nodeName + pod.Status.Phase = "Running" + pod.Status.Conditions = append(pod.Status.Conditions, core.PodCondition{ + Type: core.PodScheduled, + Status: core.ConditionTrue, + }) + + c.storage.Pods.UpdatePod(podName, pod) + c.updatePodChannel() + + c.storage.Pods.EndTransaction() +} + +func (c *PodController) FailedPod(podName string, status core.PodStatus) { + c.storage.Pods.BeginTransaction() + + klog.V(3).Info("Failed: " + podName) + + // Get pod reference + pod := c.storage.Pods.GetPod(podName) + + // Put binding information in buffer + failureInformation := misim.BindingFailureInformation{ + Pod: podName, + Message: status.Conditions[0].Message, + } + c.storage.Pods.FailedPodBuffer().Put(failureInformation) + + // Update pods data + pod.Status = status + pod.Status.Phase = "Pending" + pod.ObjectMeta.ResourceVersion = c.idGenerator.GetNextResourceId() + + c.storage.Pods.UpdatePod(podName, pod) + c.updatePodChannel() + + c.storage.Pods.EndTransaction() +} + +func (c *PodController) updatePodChannel() { + processedPodCount := c.storage.Pods.FailedPodBuffer().Size() + c.storage.Pods.BindedPodBuffer().Size() + podsToBePlacedCount := c.storage.Pods.PodsToBePlaced().Size() + klog.V(3).Info("Processessed " + strconv.Itoa(processedPodCount) + " of " + strconv.Itoa(podsToBePlacedCount)) + if processedPodCount == podsToBePlacedCount { + if c.shouldScaleUp() { + // @Martin Was? + // Copied over from KubeUpdateController + // Do we subscribe just to cancel it again??? + // Why? + broadcaster := c.storage.Nodes.GetNodeUpscalingChannel() + nodeChannel := broadcaster.Subscribe() + defer broadcaster.CancelSubscription(nodeChannel) + var newNode v1.Node + klog.V(6).Info("Waiting for cluster-autoscaler upscaling") + newNode = <-nodeChannel + c.storage.Nodes.NewNodes().Put(newNode) + // TODO [Process Status Config map from Cluster Autoscaler]: read from status config map of cluster autoscaler to track status + c.storage.AdapterState.StoreClusterAutoscalingDone(true) + // Empty failed pod buffer, they should be scheduled now + c.storage.Pods.FailedPodBuffer().Clear() + } else { + c.storage.Pods.PodsUpdateChannel().Get() <- misim.PodsUpdateResponse{ + Failed: c.storage.Pods.FailedPodBuffer().Items(), + Binded: c.storage.Pods.BindedPodBuffer().Items(), + NewNodes: c.storage.Nodes.NewNodes().Items(), + DeletedNodes: c.storage.Nodes.DeletedNodes().Items(), + } + } + } +} + +func (c *PodController) shouldScaleUp() bool { + return !c.storage.Pods.FailedPodBuffer().Empty() && + c.storage.AdapterState.IsClusterAutoscalerActive() && + !c.storage.AdapterState.IsClusterAutoscalingDone() && + c.storage.MachineSets.IsUpscalingPossible() +} + +func NewPodController(storage *storage.StorageContainer) PodController { + return PodController{ + storage: storage, + idGenerator: IdGenerator{ + idStorage: storage.PodIds, + }, + } +} diff --git a/pkg/control/podupdates.go b/pkg/control/podupdates.go new file mode 100644 index 0000000..4e20526 --- /dev/null +++ b/pkg/control/podupdates.go @@ -0,0 +1,25 @@ +package control + +import ( + "go-kube/pkg/misim" + "go-kube/pkg/storage" +) + +type PodUpdatesResource interface { + Post(misim.PodsUpdateRequest) misim.PodsUpdateResponse +} + +type PodUpdatesResourceImpl struct { + storage *storage.StorageContainer +} + +func (impl PodUpdatesResourceImpl) Post(u misim.PodsUpdateRequest) misim.PodsUpdateResponse { + controller := NewPodController(impl.storage) + return controller.UpdatePods(u.AllPods, u.Events, u.PodsToBePlaced, false) +} + +func NewPodUpdateResource(storage *storage.StorageContainer) PodUpdatesResourceImpl { + return PodUpdatesResourceImpl{ + storage: storage, + } +} diff --git a/pkg/control/scalecontroller.go b/pkg/control/scalecontroller.go new file mode 100644 index 0000000..eeea05b --- /dev/null +++ b/pkg/control/scalecontroller.go @@ -0,0 +1,181 @@ +package control + +import ( + "errors" + "fmt" + "go-kube/pkg/storage" + autoscaling "k8s.io/api/autoscaling/v1" + core "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" + cluster "sigs.k8s.io/cluster-api/api/v1beta1" +) + +type ScaleController struct { + storage *storage.StorageContainer +} + +func (c ScaleController) ScaleMachineSet(machineSetName string, u autoscaling.Scale) error { + desiredReplicas := u.Spec.Replicas + scaleAmount := int32(0) + machineSet := c.storage.MachineSets.GetMachineSet(machineSetName) + scaleAmount = desiredReplicas - *machineSet.Spec.Replicas + klog.V(3).Infof("Scaling machine set %s to desired amount %d (scale amount %d)", machineSetName, desiredReplicas, scaleAmount) + + // Update machineset + machineSet.Spec.Replicas = &desiredReplicas + machineSet.Status.AvailableReplicas = desiredReplicas + machineSet.Status.FullyLabeledReplicas = desiredReplicas + machineSet.Status.ReadyReplicas = desiredReplicas + + c.storage.MachineSets.PutMachineSet(machineSetName, machineSet) + + // Check if we have to scale down + if scaleAmount < 0 { + // For downscaling, we first delete nodes then machines + scaledDownNodes, err := c.ScaleDownNodes(-int(scaleAmount)) + + if err != nil { + return err + } + + // Scale machines + c.ScaleDownMachines(machineSet, scaledDownNodes, -int(scaleAmount)) + } else if scaleAmount > 0 { + addedMachines, err := c.ScaleUpMachines(machineSet, int(scaleAmount)) + + if err != nil { + return err + } + + // Scale nodes + c.ScaleUpNodes(addedMachines) + } + + return nil +} + +// TODO [Cluster Downscaling]: Fix this method +func (c ScaleController) ScaleDownMachines(machineSet cluster.MachineSet, changedNodes []core.Node, amount int) { + // In case of downscaling we need to delete machines + for _, changedNode := range changedNodes { + allMachines, _ := c.storage.Machines.GetMachines() + var nodeMachine cluster.Machine + for _, machine := range allMachines.Items { + if machine.Status.NodeRef.Name == changedNode.Name { + nodeMachine = machine + break + } + } + + c.storage.Machines.DeleteMachine(nodeMachine.Name) + } +} + +func (c ScaleController) ScaleUpMachines(machineSet cluster.MachineSet, amount int) ([]cluster.Machine, error) { + var addedMachines []cluster.Machine + providerIds := make([]string, amount) + nodeRefs := make([]core.ObjectReference, amount) + for amount > 0 { + nextMachineId := c.storage.Machines.GetMachineCount() + providerIds[amount-1] = "clusterapi://" + fmt.Sprintf("%s-machine-%d", machineSet.Name, nextMachineId) + nodeRefs[amount-1] = core.ObjectReference{Kind: "Node", APIVersion: "v1", Name: fmt.Sprintf("%s-machine-%d", machineSet.Name, nextMachineId) + "-node"} + + newMachine := cluster.Machine{ + TypeMeta: metav1.TypeMeta{APIVersion: "cluster.x-k8s.io/v1beta1", Kind: "Machine"}, + ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("%s-machine-%d", machineSet.Name, nextMachineId), Namespace: "kube-system", Annotations: map[string]string{ + "machine-set-name": machineSet.Name, + "cpu": machineSet.Annotations["capacity.cluster-autoscaler.kubernetes.io/cpu"], + "memory": machineSet.Annotations["capacity.cluster-autoscaler.kubernetes.io/memory"], + "pods": machineSet.Annotations["capacity.cluster-autoscaler.kubernetes.io/maxPods"], + }, OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "cluster.x-k8s.io/v1beta1", + Kind: "MachineSet", + Name: machineSet.Name, + }, + }}, + Spec: cluster.MachineSpec{ProviderID: &providerIds[amount-1]}, + Status: cluster.MachineStatus{Phase: "Running", NodeRef: &nodeRefs[amount-1]}, + } + klog.V(5).Infof("Prepared new machine %s", newMachine.Name) + c.storage.Machines.IncrementMachineCount() + amount = amount - 1 + addedMachines = append(addedMachines, newMachine) + } + for _, machine := range addedMachines { + klog.V(5).Infof("Adding machine %s", machine.Name) + c.storage.Machines.AddMachine(machine) + } + return addedMachines, nil +} + +func (c ScaleController) ScaleDownNodes(amount int) ([]core.Node, error) { + // Find nodes that should be deleted to scale down + var nodesToDelete []core.Node + allNodes, _ := c.storage.Nodes.GetNodes() + for _, node := range allNodes.Items { + for _, taint := range node.Spec.Taints { + if taint.Key == "ToBeDeletedByClusterAutoscaler" { + nodesToDelete = append(nodesToDelete, node) + // Break taint iteration (not node iteration) + break + } + } + } + + // Check if the right amount of nodes is marked for deletion + if len(nodesToDelete) != amount { + return nil, errors.New(fmt.Sprintf("Mismatch: found %d desired nodes to delete, got %d tainted nodes", amount, len(nodesToDelete))) + } + + var changedNodes []core.Node + for amount > 0 { + nodeToDelete := nodesToDelete[amount-1] + + changedNodes = append(changedNodes, nodeToDelete) + c.storage.Nodes.DeleteNode(nodeToDelete.Name) + + amount = amount - 1 + } + // TODO [Cluster Downscaling]: Check whether this works (especially pointer to changedNodes should not be overwritten) + + return changedNodes, nil +} + +func (c ScaleController) ScaleUpNodes(addedMachines []cluster.Machine) { + newNodes := make([]core.Node, len(addedMachines)) + for i, machine := range addedMachines { + cpuQuantity, _ := resource.ParseQuantity(machine.Annotations["cpu"]) + memoryQuantity, _ := resource.ParseQuantity(machine.Annotations["memory"]) + podsQuantity, _ := resource.ParseQuantity(machine.Annotations["pods"]) + newNodes[i] = core.Node{ + TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "Node"}, + ObjectMeta: metav1.ObjectMeta{Name: machine.Name + "-node"}, + Spec: core.NodeSpec{ProviderID: "clusterapi://" + machine.Name}, + Status: core.NodeStatus{Phase: "Running", Conditions: []core.NodeCondition{ + { + Type: "Ready", + Status: "True", + }, + }, Allocatable: map[core.ResourceName]resource.Quantity{ + "cpu": cpuQuantity, + "memory": memoryQuantity, + "pods": podsQuantity, + }, Capacity: map[core.ResourceName]resource.Quantity{ + "cpu": cpuQuantity, + "memory": memoryQuantity, + "pods": podsQuantity, + }}, + } + + } + for i := range newNodes { + c.storage.Nodes.AddNode(newNodes[i]) + } +} + +func NewScaleController(storage *storage.StorageContainer) ScaleController { + return ScaleController{storage: storage} +} diff --git a/pkg/entity/binding.go b/pkg/entity/binding.go deleted file mode 100644 index c67dd0e..0000000 --- a/pkg/entity/binding.go +++ /dev/null @@ -1,13 +0,0 @@ -package entity - -// Information about a successfully binded pod -type BindingInformation struct { - Pod string - Node string -} - -// Information about a failed binding for a pod -type BindingFailureInformation struct { - Pod string - Message string -} diff --git a/pkg/entity/pod.go b/pkg/entity/pod.go deleted file mode 100644 index 3d4ee57..0000000 --- a/pkg/entity/pod.go +++ /dev/null @@ -1,22 +0,0 @@ -package entity - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Update request from the simulation for pods -type PodsUpdateRequest struct { - AllPods v1.PodList - Events []metav1.WatchEvent - PodsToBePlaced v1.PodList -} - -// Response of the adapter to a PodsUpdateRequest from the simulation -// with the information about bindings and failures from the kubescheduler -type PodsUpdateResponse struct { - Failed []BindingFailureInformation - Binded []BindingInformation - NewNodes []v1.Node - DeletedNodes []v1.Node -} diff --git a/pkg/entity/update.go b/pkg/entity/update.go deleted file mode 100644 index 967bc66..0000000 --- a/pkg/entity/update.go +++ /dev/null @@ -1,20 +0,0 @@ -package entity - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - cluster "sigs.k8s.io/cluster-api/api/v1beta1" -) - -// Update request from the simulation for nodes -type NodeUpdateRequest struct { - AllNodes v1.NodeList - Events []metav1.WatchEvent - MachineSets []cluster.MachineSet - Machines []cluster.Machine -} - -// Response of the adapter to a NodeUpdateRequest from the simulation -type NodeUpdateResponse struct { - Data v1.NodeList `json:"Updated NodeList with"` -} diff --git a/pkg/interfaces/kubeapi/api/api.go b/pkg/interfaces/kubeapi/api/api.go new file mode 100644 index 0000000..ba91b70 --- /dev/null +++ b/pkg/interfaces/kubeapi/api/api.go @@ -0,0 +1,32 @@ +package api + +import ( + v1 "go-kube/pkg/interfaces/kubeapi/api/v1" + "go-kube/pkg/storage" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type ApiResource interface { + Get() metav1.APIVersions + V1() v1.V1Resource +} + +type ApiResourceImpl struct { + storage *storage.StorageContainer +} + +func (api ApiResourceImpl) Get() metav1.APIVersions { + return metav1.APIVersions{ + Versions: []string{"v1"}, + } +} + +func (api ApiResourceImpl) V1() v1.V1Resource { + return v1.NewV1Resource(api.storage) +} + +func NewApiResource(storage *storage.StorageContainer) ApiResourceImpl { + return ApiResourceImpl{ + storage: storage, + } +} diff --git a/pkg/interfaces/kubeapi/api/v1/namespaces/namespace/configmaps/cluster-autoscaler-status/clusterautoscalerstatus.go b/pkg/interfaces/kubeapi/api/v1/namespaces/namespace/configmaps/cluster-autoscaler-status/clusterautoscalerstatus.go new file mode 100644 index 0000000..9302cfe --- /dev/null +++ b/pkg/interfaces/kubeapi/api/v1/namespaces/namespace/configmaps/cluster-autoscaler-status/clusterautoscalerstatus.go @@ -0,0 +1,32 @@ +package clusterautoscalerstatus + +import ( + "go-kube/pkg/storage" + core "k8s.io/api/core/v1" +) + +type ClusterAutoscalerStatusResource interface { + Get() core.ConfigMap + Put(core.ConfigMap) core.ConfigMap +} + +type ClusterAutoscalerStatusResourceImpl struct { + namespaceName string + storage *storage.StorageContainer +} + +func (impl ClusterAutoscalerStatusResourceImpl) Get() core.ConfigMap { + return impl.storage.StatusConfigMap.GetStatusConfigMap() +} + +func (impl ClusterAutoscalerStatusResourceImpl) Put(configMap core.ConfigMap) core.ConfigMap { + impl.storage.StatusConfigMap.StoreStatusConfigMap(configMap) + return configMap +} + +func NewClusterAutoscalerStatusResource(namespace string, storage *storage.StorageContainer) ClusterAutoscalerStatusResourceImpl { + return ClusterAutoscalerStatusResourceImpl{ + namespaceName: namespace, + storage: storage, + } +} diff --git a/pkg/interfaces/kubeapi/api/v1/namespaces/namespace/configmaps/configmaps.go b/pkg/interfaces/kubeapi/api/v1/namespaces/namespace/configmaps/configmaps.go new file mode 100644 index 0000000..9407fbf --- /dev/null +++ b/pkg/interfaces/kubeapi/api/v1/namespaces/namespace/configmaps/configmaps.go @@ -0,0 +1,26 @@ +package configmaps + +import ( + clusterautoscalerstatus "go-kube/pkg/interfaces/kubeapi/api/v1/namespaces/namespace/configmaps/cluster-autoscaler-status" + "go-kube/pkg/storage" +) + +type ConfigmapsResource interface { + ClusterAutoscalerStatus() clusterautoscalerstatus.ClusterAutoscalerStatusResource +} + +type ConfigmapsResourceImpl struct { + namespaceName string + storage *storage.StorageContainer +} + +func (impl ConfigmapsResourceImpl) ClusterAutoscalerStatus() clusterautoscalerstatus.ClusterAutoscalerStatusResource { + return clusterautoscalerstatus.NewClusterAutoscalerStatusResource(impl.namespaceName, impl.storage) +} + +func NewConfigmapsResource(name string, storage *storage.StorageContainer) ConfigmapsResourceImpl { + return ConfigmapsResourceImpl{ + namespaceName: name, + storage: storage, + } +} diff --git a/pkg/interfaces/kubeapi/api/v1/namespaces/namespace/namespace.go b/pkg/interfaces/kubeapi/api/v1/namespaces/namespace/namespace.go new file mode 100644 index 0000000..1793f14 --- /dev/null +++ b/pkg/interfaces/kubeapi/api/v1/namespaces/namespace/namespace.go @@ -0,0 +1,39 @@ +package namespace + +import ( + "go-kube/pkg/interfaces/kubeapi/api/v1/namespaces/namespace/configmaps" + "go-kube/pkg/interfaces/kubeapi/api/v1/pods" + "go-kube/pkg/storage" + v1 "k8s.io/api/core/v1" +) + +type NamespaceResource interface { + Get() v1.Namespace + Pods() pods.PodsResource + Configmaps() configmaps.ConfigmapsResource +} + +type NamespaceResourceImpl struct { + namespaceName string + storage *storage.StorageContainer +} + +func (impl NamespaceResourceImpl) Get() v1.Namespace { + return impl.storage.Namespaces.GetNamespace(impl.namespaceName) +} + +func (impl NamespaceResourceImpl) Pods() pods.PodsResource { + // TODO [Support for multiple namespaces]: pass namespace name somehow if we want to support multiple namespaces + return pods.NewPodsResource(impl.storage) +} + +func (impl NamespaceResourceImpl) Configmaps() configmaps.ConfigmapsResource { + return configmaps.NewConfigmapsResource(impl.namespaceName, impl.storage) +} + +func NewNamespaceResource(name string, storage *storage.StorageContainer) NamespaceResourceImpl { + return NamespaceResourceImpl{ + namespaceName: name, + storage: storage, + } +} diff --git a/pkg/interfaces/kubeapi/api/v1/namespaces/namespaces.go b/pkg/interfaces/kubeapi/api/v1/namespaces/namespaces.go new file mode 100644 index 0000000..e71def6 --- /dev/null +++ b/pkg/interfaces/kubeapi/api/v1/namespaces/namespaces.go @@ -0,0 +1,33 @@ +package namespaces + +import ( + "go-kube/internal/broadcast" + "go-kube/pkg/interfaces/kubeapi/api/v1/namespaces/namespace" + "go-kube/pkg/storage" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type NamespacesResource interface { + Get() (v1.NamespaceList, *broadcast.BroadcastServer[metav1.WatchEvent]) + Namespace(namespaceName string) namespace.NamespaceResource +} + +type NamespacesResourceImpl struct { + storage *storage.StorageContainer +} + +func (impl NamespacesResourceImpl) Get() (v1.NamespaceList, *broadcast.BroadcastServer[metav1.WatchEvent]) { + namespaces, br := impl.storage.Namespaces.GetNamespaces() + return namespaces, br +} + +func (impl NamespacesResourceImpl) Namespace(namespaceName string) namespace.NamespaceResource { + return namespace.NewNamespaceResource(namespaceName, impl.storage) +} + +func NewNamespacesResource(storage *storage.StorageContainer) NamespacesResourceImpl { + return NamespacesResourceImpl{ + storage: storage, + } +} diff --git a/pkg/interfaces/kubeapi/api/v1/nodes/node/node.go b/pkg/interfaces/kubeapi/api/v1/nodes/node/node.go new file mode 100644 index 0000000..1f0c46d --- /dev/null +++ b/pkg/interfaces/kubeapi/api/v1/nodes/node/node.go @@ -0,0 +1,32 @@ +package node + +import ( + "go-kube/pkg/storage" + v1 "k8s.io/api/core/v1" +) + +type NodeResource interface { + Get() v1.Node + Put(v1.Node) v1.Node +} + +type NodeResourceImpl struct { + name string + nodesStorage storage.NodeStorage +} + +func (impl NodeResourceImpl) Get() v1.Node { + nodes := impl.nodesStorage.GetNode(impl.name) + return nodes +} + +func (impl NodeResourceImpl) Put(node v1.Node) v1.Node { + return impl.nodesStorage.PutNode(impl.name, node) +} + +func NewNodeResource(nodeName string, nodeStorage storage.NodeStorage) NodeResourceImpl { + return NodeResourceImpl{ + name: nodeName, + nodesStorage: nodeStorage, + } +} diff --git a/pkg/interfaces/kubeapi/api/v1/nodes/nodes.go b/pkg/interfaces/kubeapi/api/v1/nodes/nodes.go new file mode 100644 index 0000000..4556999 --- /dev/null +++ b/pkg/interfaces/kubeapi/api/v1/nodes/nodes.go @@ -0,0 +1,33 @@ +package nodes + +import ( + "go-kube/internal/broadcast" + "go-kube/pkg/interfaces/kubeapi/api/v1/nodes/node" + "go-kube/pkg/storage" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type NodesResource interface { + Get() (v1.NodeList, *broadcast.BroadcastServer[metav1.WatchEvent]) + Node(nodeName string) node.NodeResource +} + +type NodesResourceImpl struct { + storage *storage.StorageContainer + nodeImpl node.NodeResource +} + +func (impl NodesResourceImpl) Node(nodeName string) node.NodeResource { + return node.NewNodeResource(nodeName, impl.storage.Nodes) +} + +func (impl NodesResourceImpl) Get() (v1.NodeList, *broadcast.BroadcastServer[metav1.WatchEvent]) { + return impl.storage.Nodes.GetNodes() +} + +func NewNodesResource(storage *storage.StorageContainer) NodesResourceImpl { + return NodesResourceImpl{ + storage: storage, + } +} diff --git a/pkg/interfaces/kubeapi/api/v1/pods/pod/binding/binding.go b/pkg/interfaces/kubeapi/api/v1/pods/pod/binding/binding.go new file mode 100644 index 0000000..ba98356 --- /dev/null +++ b/pkg/interfaces/kubeapi/api/v1/pods/pod/binding/binding.go @@ -0,0 +1,28 @@ +package binding + +import ( + "go-kube/pkg/control" + "go-kube/pkg/storage" + v1 "k8s.io/api/core/v1" +) + +type BindingResource interface { + Post(v1.Binding) +} + +type BindingResourceImpl struct { + podName string + storage *storage.StorageContainer +} + +func (impl BindingResourceImpl) Post(binding v1.Binding) { + controller := control.NewPodController(impl.storage) + controller.BindPod(impl.podName, binding.Target.Name) +} + +func NewBindingResource(podName string, storage *storage.StorageContainer) BindingResourceImpl { + return BindingResourceImpl{ + podName: podName, + storage: storage, + } +} diff --git a/pkg/interfaces/kubeapi/api/v1/pods/pod/pod.go b/pkg/interfaces/kubeapi/api/v1/pods/pod/pod.go new file mode 100644 index 0000000..66583ca --- /dev/null +++ b/pkg/interfaces/kubeapi/api/v1/pods/pod/pod.go @@ -0,0 +1,32 @@ +package pod + +import ( + "go-kube/pkg/interfaces/kubeapi/api/v1/pods/pod/binding" + "go-kube/pkg/interfaces/kubeapi/api/v1/pods/pod/status" + "go-kube/pkg/storage" +) + +type PodResource interface { + Status() status.StatusResource + Binding() binding.BindingResource +} + +type PodResourceImpl struct { + podName string + storage *storage.StorageContainer +} + +func (impl PodResourceImpl) Status() status.StatusResource { + return status.NewStatusResource(impl.podName, impl.storage) +} + +func (impl PodResourceImpl) Binding() binding.BindingResource { + return binding.NewBindingResource(impl.podName, impl.storage) +} + +func NewPodResource(podName string, storage *storage.StorageContainer) PodResourceImpl { + return PodResourceImpl{ + podName: podName, + storage: storage, + } +} diff --git a/pkg/interfaces/kubeapi/api/v1/pods/pod/status/status.go b/pkg/interfaces/kubeapi/api/v1/pods/pod/status/status.go new file mode 100644 index 0000000..aaafb47 --- /dev/null +++ b/pkg/interfaces/kubeapi/api/v1/pods/pod/status/status.go @@ -0,0 +1,30 @@ +package status + +import ( + "go-kube/pkg/control" + "go-kube/pkg/storage" + v1 "k8s.io/api/core/v1" +) + +type StatusResource interface { + Patch(status v1.PodStatusResult) v1.Pod +} + +type StatusResourceImpl struct { + podName string + storageContainer *storage.StorageContainer +} + +func (impl StatusResourceImpl) Patch(status v1.PodStatusResult) v1.Pod { + controller := control.NewPodController(impl.storageContainer) + // We always assume this means it's failed + controller.FailedPod(impl.podName, status.Status) + return impl.storageContainer.Pods.GetPod(impl.podName) +} + +func NewStatusResource(podName string, storage *storage.StorageContainer) StatusResourceImpl { + return StatusResourceImpl{ + podName: podName, + storageContainer: storage, + } +} diff --git a/pkg/interfaces/kubeapi/api/v1/pods/pods.go b/pkg/interfaces/kubeapi/api/v1/pods/pods.go new file mode 100644 index 0000000..2c1ae71 --- /dev/null +++ b/pkg/interfaces/kubeapi/api/v1/pods/pods.go @@ -0,0 +1,32 @@ +package pods + +import ( + "go-kube/internal/broadcast" + "go-kube/pkg/interfaces/kubeapi/api/v1/pods/pod" + "go-kube/pkg/storage" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type PodsResource interface { + Get() (v1.PodList, *broadcast.BroadcastServer[metav1.WatchEvent]) + Pod(podName string) pod.PodResource +} + +type PodsResourceImpl struct { + storage *storage.StorageContainer +} + +func (impl PodsResourceImpl) Get() (v1.PodList, *broadcast.BroadcastServer[metav1.WatchEvent]) { + return impl.storage.Pods.GetPods() +} + +func (impl PodsResourceImpl) Pod(podName string) pod.PodResource { + return pod.NewPodResource(podName, impl.storage) +} + +func NewPodsResource(storage *storage.StorageContainer) PodsResourceImpl { + return PodsResourceImpl{ + storage: storage, + } +} diff --git a/pkg/interfaces/kubeapi/api/v1/v1.go b/pkg/interfaces/kubeapi/api/v1/v1.go new file mode 100644 index 0000000..7f6ab70 --- /dev/null +++ b/pkg/interfaces/kubeapi/api/v1/v1.go @@ -0,0 +1,128 @@ +package v1 + +import ( + "go-kube/pkg/interfaces/kubeapi/api/v1/namespaces" + "go-kube/pkg/interfaces/kubeapi/api/v1/nodes" + "go-kube/pkg/interfaces/kubeapi/api/v1/pods" + "go-kube/pkg/storage" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type V1Resource interface { + Get() metav1.APIResourceList + Nodes() nodes.NodesResource + Pods() pods.PodsResource + Namespaces() namespaces.NamespacesResource +} + +type V1ResourceImpl struct { + storage *storage.StorageContainer +} + +func (impl V1ResourceImpl) Nodes() nodes.NodesResource { + return nodes.NewNodesResource(impl.storage) +} + +func (impl V1ResourceImpl) Pods() pods.PodsResource { + return pods.NewPodsResource(impl.storage) +} + +func (impl V1ResourceImpl) Namespaces() namespaces.NamespacesResource { + return namespaces.NewNamespacesResource(impl.storage) +} + +func (impl V1ResourceImpl) Get() metav1.APIResourceList { + return metav1.APIResourceList{TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "APIResourceList"}, + GroupVersion: "v1", + APIResources: []metav1.APIResource{ + { + Name: "namespaces", + SingularName: "", + Namespaced: false, + Kind: "Namespace", + Verbs: []string{"create", "delete", "get", "list", "patch", "update", "watch"}, + }, + { + Name: "nodes", + SingularName: "", + Namespaced: false, + Kind: "Node", + Verbs: []string{"create", "delete", "deletecollection", "get", "list", "patch", "update", "watch"}, + }, + { + Name: "nodes/status", + SingularName: "", + Namespaced: false, + Kind: "Node", + Verbs: []string{"get", "patch", "update"}, + }, + { + Name: "persistentvolumeclaims", + SingularName: "", + Namespaced: true, + Kind: "PersistentVolumeClaim", + Verbs: []string{"create", "delete", "deletecollection", "get", "list", "patch", "update", "watch"}, + }, + { + Name: "persistentvolumes", + SingularName: "", + Namespaced: false, + Kind: "PersistentVolume", + Verbs: []string{"create", "delete", "deletecollection", "get", "list", "patch", "update", "watch"}, + }, + { + Name: "pods", + SingularName: "", + Namespaced: true, + Kind: "Pod", + Verbs: []string{"create", "delete", "deletecollection", "get", "list", "patch", "update", "watch"}, + Categories: []string{"all"}, + }, + { + Name: "pods/binding", + SingularName: "", + Namespaced: true, + Kind: "Binding", + Verbs: []string{"create"}, + }, + { + Name: "pods/eviction", + SingularName: "", + Namespaced: true, + Group: "policy", + Version: "v1", + Kind: "Eviction", + Verbs: []string{"create"}, + }, + { + Name: "pods/status", + SingularName: "", + Namespaced: true, + Kind: "Pod", + Verbs: []string{"get", "patch", "update"}, + }, + { + Name: "replicationcontrollers", + SingularName: "", + Namespaced: true, + Kind: "ReplicationController", + Verbs: []string{"create", "delete", "deletecollection", "get", "list", "patch", "update", "watch"}, + Categories: []string{"all"}, + }, + { + Name: "services", + SingularName: "", + Namespaced: true, + Kind: "Service", + Verbs: []string{"create", "delete", "deletecollection", "get", "list", "patch", "update", "watch"}, + Categories: []string{"all"}, + }, + }, + } +} + +func NewV1Resource(storage *storage.StorageContainer) V1ResourceImpl { + return V1ResourceImpl{ + storage: storage, + } +} diff --git a/pkg/interfaces/kubeapi/apis/apis.go b/pkg/interfaces/kubeapi/apis/apis.go new file mode 100644 index 0000000..09e0f27 --- /dev/null +++ b/pkg/interfaces/kubeapi/apis/apis.go @@ -0,0 +1,71 @@ +package apis + +import ( + "go-kube/pkg/interfaces/kubeapi/apis/apps" + "go-kube/pkg/interfaces/kubeapi/apis/autoscaling" + "go-kube/pkg/interfaces/kubeapi/apis/cluster" + "go-kube/pkg/storage" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type ApisResource interface { + Get() meta.APIGroupList + Apps() apps.AppsResource + Autoscaling() autoscaling.AutoscalingResource + Cluster() cluster.ClusterResource +} + +type ApisResourceImpl struct { + storage *storage.StorageContainer +} + +func (api ApisResourceImpl) Get() meta.APIGroupList { + return meta.APIGroupList{ + Groups: []meta.APIGroup{ + { + Name: "cluster.x-k8s.io", + PreferredVersion: meta.GroupVersionForDiscovery{ + GroupVersion: "cluster.x-k8s.io/v1beta1", + Version: "v1beta1", + }, + Versions: []meta.GroupVersionForDiscovery{ + { + GroupVersion: "cluster.x-k8s.io/v1beta1", + Version: "v1beta1", + }, + }, + }, + { + Name: "autoscaling", + Versions: []meta.GroupVersionForDiscovery{ + { + GroupVersion: "autoscaling/v1", + Version: "v1", + }, + }, + PreferredVersion: meta.GroupVersionForDiscovery{ + GroupVersion: "autoscaling/v1", + Version: "v1", + }, + }, + }, + } +} + +func (api ApisResourceImpl) Apps() apps.AppsResource { + return apps.NewAppsResource(api.storage) +} + +func (api ApisResourceImpl) Autoscaling() autoscaling.AutoscalingResource { + return autoscaling.NewAutoscalingResource(api.storage) +} + +func (api ApisResourceImpl) Cluster() cluster.ClusterResource { + return cluster.NewClusterResource(api.storage) +} + +func NewApisResource(storage *storage.StorageContainer) ApisResource { + return ApisResourceImpl{ + storage: storage, + } +} diff --git a/pkg/interfaces/kubeapi/apis/apps/apps.go b/pkg/interfaces/kubeapi/apis/apps/apps.go new file mode 100644 index 0000000..7e3f61c --- /dev/null +++ b/pkg/interfaces/kubeapi/apis/apps/apps.go @@ -0,0 +1,24 @@ +package apps + +import ( + v1 "go-kube/pkg/interfaces/kubeapi/apis/apps/v1" + "go-kube/pkg/storage" +) + +type AppsResource interface { + V1() v1.V1Resource +} + +type AppsResourceImpl struct { + storage *storage.StorageContainer +} + +func (impl AppsResourceImpl) V1() v1.V1Resource { + return v1.NewV1Resource(impl.storage) +} + +func NewAppsResource(storage *storage.StorageContainer) AppsResourceImpl { + return AppsResourceImpl{ + storage: storage, + } +} diff --git a/pkg/interfaces/kubeapi/apis/apps/v1/daemonsets/daemonsets.go b/pkg/interfaces/kubeapi/apis/apps/v1/daemonsets/daemonsets.go new file mode 100644 index 0000000..bc40dee --- /dev/null +++ b/pkg/interfaces/kubeapi/apis/apps/v1/daemonsets/daemonsets.go @@ -0,0 +1,24 @@ +package daemonsets + +import ( + "go-kube/internal/broadcast" + "go-kube/pkg/storage" + v1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type DaemonSetsResource interface { + Get() (v1.DaemonSetList, *broadcast.BroadcastServer[metav1.WatchEvent]) +} + +type DaemonSetsResourceImpl struct { + storage *storage.StorageContainer +} + +func (impl DaemonSetsResourceImpl) Get() (v1.DaemonSetList, *broadcast.BroadcastServer[metav1.WatchEvent]) { + return impl.storage.DaemonSets.GetDaemonSets() +} + +func NewDeamonSetsResource(storage *storage.StorageContainer) DaemonSetsResourceImpl { + return DaemonSetsResourceImpl{storage: storage} +} diff --git a/pkg/interfaces/kubeapi/apis/apps/v1/v1.go b/pkg/interfaces/kubeapi/apis/apps/v1/v1.go new file mode 100644 index 0000000..6392ef1 --- /dev/null +++ b/pkg/interfaces/kubeapi/apis/apps/v1/v1.go @@ -0,0 +1,24 @@ +package v1 + +import ( + "go-kube/pkg/interfaces/kubeapi/apis/apps/v1/daemonsets" + "go-kube/pkg/storage" +) + +type V1Resource interface { + DaemonSets() daemonsets.DaemonSetsResource +} + +type V1ResourceImpl struct { + storage *storage.StorageContainer +} + +func (impl V1ResourceImpl) DaemonSets() daemonsets.DaemonSetsResource { + return daemonsets.NewDeamonSetsResource(impl.storage) +} + +func NewV1Resource(storage *storage.StorageContainer) V1ResourceImpl { + return V1ResourceImpl{ + storage: storage, + } +} diff --git a/pkg/interfaces/kubeapi/apis/autoscaling/autoscaling.go b/pkg/interfaces/kubeapi/apis/autoscaling/autoscaling.go new file mode 100644 index 0000000..cbd279c --- /dev/null +++ b/pkg/interfaces/kubeapi/apis/autoscaling/autoscaling.go @@ -0,0 +1,24 @@ +package autoscaling + +import ( + v1 "go-kube/pkg/interfaces/kubeapi/apis/autoscaling/v1" + "go-kube/pkg/storage" +) + +type AutoscalingResource interface { + V1() v1.V1Resource +} + +type AutoscalingResourceImpl struct { + storage *storage.StorageContainer +} + +func (impl AutoscalingResourceImpl) V1() v1.V1Resource { + return v1.NewV1Resource(impl.storage) +} + +func NewAutoscalingResource(storage *storage.StorageContainer) AutoscalingResource { + return AutoscalingResourceImpl{ + storage: storage, + } +} diff --git a/pkg/interfaces/kubeapi/apis/autoscaling/v1/v1.go b/pkg/interfaces/kubeapi/apis/autoscaling/v1/v1.go new file mode 100644 index 0000000..3a7283c --- /dev/null +++ b/pkg/interfaces/kubeapi/apis/autoscaling/v1/v1.go @@ -0,0 +1,41 @@ +package v1 + +import ( + "go-kube/pkg/storage" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type V1Resource interface { + Get() metav1.APIResourceList +} + +type V1ResourceImpl struct { + storage *storage.StorageContainer +} + +func (impl V1ResourceImpl) Get() metav1.APIResourceList { + return metav1.APIResourceList{TypeMeta: metav1.TypeMeta{Kind: "APIResourceList", APIVersion: "v1"}, + GroupVersion: "autoscaling/v1", + APIResources: []metav1.APIResource{ + { + Name: "horizontalpodautoscalers", + SingularName: "", + Namespaced: true, + Kind: "HorizontalPodAutoscaler", + Verbs: []string{"delete", "deletecollection", "get", "list", "patch", "create", "update", "watch"}, + Categories: []string{"all"}, + }, + { + Name: "horizontalpodautoscalers/status", + SingularName: "", + Namespaced: true, + Kind: "HorizontalPodAutoscaler", + Verbs: []string{"get", "patch", "update"}, + }, + }, + } +} + +func NewV1Resource(storage *storage.StorageContainer) V1Resource { + return V1ResourceImpl{storage: storage} +} diff --git a/pkg/interfaces/kubeapi/apis/cluster/cluster.go b/pkg/interfaces/kubeapi/apis/cluster/cluster.go new file mode 100644 index 0000000..6adf397 --- /dev/null +++ b/pkg/interfaces/kubeapi/apis/cluster/cluster.go @@ -0,0 +1,24 @@ +package cluster + +import ( + "go-kube/pkg/interfaces/kubeapi/apis/cluster/v1beta1" + "go-kube/pkg/storage" +) + +type ClusterResource interface { + V1Beta1() v1beta1.V1Beta1Resource +} + +type ClusterResourceImpl struct { + storage *storage.StorageContainer +} + +func (impl ClusterResourceImpl) V1Beta1() v1beta1.V1Beta1Resource { + return v1beta1.NewV1Beta1Resource(impl.storage) +} + +func NewClusterResource(storage *storage.StorageContainer) ClusterResource { + return ClusterResourceImpl{ + storage: storage, + } +} diff --git a/pkg/interfaces/kubeapi/apis/cluster/v1beta1/machines/machine/machine.go b/pkg/interfaces/kubeapi/apis/cluster/v1beta1/machines/machine/machine.go new file mode 100644 index 0000000..8375931 --- /dev/null +++ b/pkg/interfaces/kubeapi/apis/cluster/v1beta1/machines/machine/machine.go @@ -0,0 +1,31 @@ +package machine + +import ( + "go-kube/pkg/storage" + cluster "sigs.k8s.io/cluster-api/api/v1beta1" +) + +type MachineResource interface { + Get() cluster.Machine + Put(cluster.Machine) cluster.Machine +} + +type MachineResourceImpl struct { + machineName string + storage *storage.StorageContainer +} + +func (impl MachineResourceImpl) Get() cluster.Machine { + return impl.storage.Machines.GetMachine(impl.machineName) +} + +func (impl MachineResourceImpl) Put(m cluster.Machine) cluster.Machine { + return impl.storage.Machines.PutMachine(impl.machineName, m) +} + +func NewMachineResource(name string, storage *storage.StorageContainer) MachineResource { + return MachineResourceImpl{ + machineName: name, + storage: storage, + } +} diff --git a/pkg/interfaces/kubeapi/apis/cluster/v1beta1/machines/machines.go b/pkg/interfaces/kubeapi/apis/cluster/v1beta1/machines/machines.go new file mode 100644 index 0000000..ecdab2c --- /dev/null +++ b/pkg/interfaces/kubeapi/apis/cluster/v1beta1/machines/machines.go @@ -0,0 +1,30 @@ +package machines + +import ( + "go-kube/internal/broadcast" + "go-kube/pkg/interfaces/kubeapi/apis/cluster/v1beta1/machines/machine" + "go-kube/pkg/storage" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + cluster "sigs.k8s.io/cluster-api/api/v1beta1" +) + +type MachinesResource interface { + Get() (cluster.MachineList, *broadcast.BroadcastServer[metav1.WatchEvent]) + Machine(machineName string) machine.MachineResource +} + +type MachinesResourceImpl struct { + storage *storage.StorageContainer +} + +func (impl MachinesResourceImpl) Get() (cluster.MachineList, *broadcast.BroadcastServer[metav1.WatchEvent]) { + return impl.storage.Machines.GetMachines() +} + +func (impl MachinesResourceImpl) Machine(machineName string) machine.MachineResource { + return machine.NewMachineResource(machineName, impl.storage) +} + +func NewMachinesResource(storage *storage.StorageContainer) MachinesResource { + return MachinesResourceImpl{storage: storage} +} diff --git a/pkg/interfaces/kubeapi/apis/cluster/v1beta1/machinesets/machineset/machineset.go b/pkg/interfaces/kubeapi/apis/cluster/v1beta1/machinesets/machineset/machineset.go new file mode 100644 index 0000000..8ab2ece --- /dev/null +++ b/pkg/interfaces/kubeapi/apis/cluster/v1beta1/machinesets/machineset/machineset.go @@ -0,0 +1,26 @@ +package machineset + +import ( + "go-kube/pkg/interfaces/kubeapi/apis/cluster/v1beta1/machinesets/machineset/scale" + "go-kube/pkg/storage" +) + +type MachineSetResource interface { + Scale() scale.ScaleResource +} + +type MachineSetResourceImpl struct { + machineSetName string + storage *storage.StorageContainer +} + +func (impl MachineSetResourceImpl) Scale() scale.ScaleResource { + return scale.NewScaleResource(impl.machineSetName, impl.storage) +} + +func NewMachineSetResource(name string, storage *storage.StorageContainer) MachineSetResource { + return MachineSetResourceImpl{ + machineSetName: name, + storage: storage, + } +} diff --git a/pkg/interfaces/kubeapi/apis/cluster/v1beta1/machinesets/machineset/scale/scale.go b/pkg/interfaces/kubeapi/apis/cluster/v1beta1/machinesets/machineset/scale/scale.go new file mode 100644 index 0000000..f682eda --- /dev/null +++ b/pkg/interfaces/kubeapi/apis/cluster/v1beta1/machinesets/machineset/scale/scale.go @@ -0,0 +1,34 @@ +package scale + +import ( + "go-kube/pkg/control" + "go-kube/pkg/storage" + v1 "k8s.io/api/autoscaling/v1" +) + +type ScaleResource interface { + Get() v1.Scale + Put(v1.Scale) v1.Scale +} + +type ScaleResourceImpl struct { + machineSetName string + storage *storage.StorageContainer +} + +func (impl ScaleResourceImpl) Get() v1.Scale { + return impl.storage.MachineSets.GetMachineSetsScale(impl.machineSetName) +} + +func (impl ScaleResourceImpl) Put(m v1.Scale) v1.Scale { + controller := control.NewScaleController(impl.storage) + controller.ScaleMachineSet(impl.machineSetName, m) + return impl.storage.MachineSets.GetMachineSetsScale(impl.machineSetName) +} + +func NewScaleResource(name string, storage *storage.StorageContainer) ScaleResource { + return ScaleResourceImpl{ + machineSetName: name, + storage: storage, + } +} diff --git a/pkg/interfaces/kubeapi/apis/cluster/v1beta1/machinesets/machinesets.go b/pkg/interfaces/kubeapi/apis/cluster/v1beta1/machinesets/machinesets.go new file mode 100644 index 0000000..7dd8a30 --- /dev/null +++ b/pkg/interfaces/kubeapi/apis/cluster/v1beta1/machinesets/machinesets.go @@ -0,0 +1,30 @@ +package machinesets + +import ( + "go-kube/internal/broadcast" + "go-kube/pkg/interfaces/kubeapi/apis/cluster/v1beta1/machinesets/machineset" + "go-kube/pkg/storage" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + cluster "sigs.k8s.io/cluster-api/api/v1beta1" +) + +type MachineSetsResource interface { + Get() (cluster.MachineSetList, *broadcast.BroadcastServer[metav1.WatchEvent]) + MachineSet(machineSetName string) machineset.MachineSetResource +} + +type MachineSetsResourceImpl struct { + storage *storage.StorageContainer +} + +func (impl MachineSetsResourceImpl) Get() (cluster.MachineSetList, *broadcast.BroadcastServer[metav1.WatchEvent]) { + return impl.storage.MachineSets.GetMachineSets() +} + +func (impl MachineSetsResourceImpl) MachineSet(name string) machineset.MachineSetResource { + return machineset.NewMachineSetResource(name, impl.storage) +} + +func NewMachineSetsResource(storage *storage.StorageContainer) MachineSetsResource { + return MachineSetsResourceImpl{storage: storage} +} diff --git a/pkg/interfaces/kubeapi/apis/cluster/v1beta1/namespaces/namespace/namespace.go b/pkg/interfaces/kubeapi/apis/cluster/v1beta1/namespaces/namespace/namespace.go new file mode 100644 index 0000000..20d1d6a --- /dev/null +++ b/pkg/interfaces/kubeapi/apis/cluster/v1beta1/namespaces/namespace/namespace.go @@ -0,0 +1,34 @@ +package namespace + +import ( + "go-kube/pkg/interfaces/kubeapi/apis/cluster/v1beta1/machines" + "go-kube/pkg/interfaces/kubeapi/apis/cluster/v1beta1/machinesets" + "go-kube/pkg/storage" +) + +type NamespaceResource interface { + Machines() machines.MachinesResource + MachineSets() machinesets.MachineSetsResource +} + +type NamespaceResourceImpl struct { + namespaceName string + storage *storage.StorageContainer +} + +func (impl NamespaceResourceImpl) Machines() machines.MachinesResource { + // TODO [Support for multiple namespaces]: pass namespace name somehow if we want to support multiple namespaces + return machines.NewMachinesResource(impl.storage) +} + +func (impl NamespaceResourceImpl) MachineSets() machinesets.MachineSetsResource { + // TODO [Support for multiple namespaces]: pass namespace name somehow if we want to support multiple namespaces + return machinesets.NewMachineSetsResource(impl.storage) +} + +func NewNamespaceResource(name string, storage *storage.StorageContainer) NamespaceResource { + return NamespaceResourceImpl{ + namespaceName: name, + storage: storage, + } +} diff --git a/pkg/interfaces/kubeapi/apis/cluster/v1beta1/namespaces/namespaces.go b/pkg/interfaces/kubeapi/apis/cluster/v1beta1/namespaces/namespaces.go new file mode 100644 index 0000000..a8ede10 --- /dev/null +++ b/pkg/interfaces/kubeapi/apis/cluster/v1beta1/namespaces/namespaces.go @@ -0,0 +1,32 @@ +package namespaces + +import ( + "go-kube/internal/broadcast" + "go-kube/pkg/interfaces/kubeapi/apis/cluster/v1beta1/namespaces/namespace" + "go-kube/pkg/storage" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type NamespacesResource interface { + Get() (v1.NamespaceList, *broadcast.BroadcastServer[metav1.WatchEvent]) + Namespace(namespaceName string) namespace.NamespaceResource +} + +type NamespacesResourceImpl struct { + storage *storage.StorageContainer +} + +func (impl NamespacesResourceImpl) Get() (v1.NamespaceList, *broadcast.BroadcastServer[metav1.WatchEvent]) { + return impl.storage.Namespaces.GetNamespaces() +} + +func (impl NamespacesResourceImpl) Namespace(namespaceName string) namespace.NamespaceResource { + return namespace.NewNamespaceResource(namespaceName, impl.storage) +} + +func NewNamespacesResource(storage *storage.StorageContainer) NamespacesResourceImpl { + return NamespacesResourceImpl{ + storage: storage, + } +} diff --git a/pkg/interfaces/kubeapi/apis/cluster/v1beta1/v1beta1.go b/pkg/interfaces/kubeapi/apis/cluster/v1beta1/v1beta1.go new file mode 100644 index 0000000..5a79c25 --- /dev/null +++ b/pkg/interfaces/kubeapi/apis/cluster/v1beta1/v1beta1.go @@ -0,0 +1,110 @@ +package v1beta1 + +import ( + "go-kube/pkg/interfaces/kubeapi/apis/cluster/v1beta1/machines" + "go-kube/pkg/interfaces/kubeapi/apis/cluster/v1beta1/machinesets" + "go-kube/pkg/interfaces/kubeapi/apis/cluster/v1beta1/namespaces" + "go-kube/pkg/storage" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type V1Beta1Resource interface { + Get() metav1.APIResourceList + Machines() machines.MachinesResource + MachineSets() machinesets.MachineSetsResource + Namespaces() namespaces.NamespacesResource +} + +type V1Beta1ResourceImpl struct { + storage *storage.StorageContainer +} + +func (impl V1Beta1ResourceImpl) Get() metav1.APIResourceList { + return metav1.APIResourceList{TypeMeta: metav1.TypeMeta{Kind: "APIResourceList", APIVersion: "v1"}, + GroupVersion: "cluster.x-k8s.io/v1beta1", + APIResources: []metav1.APIResource{ + { + Name: "machines", + SingularName: "machine", + Namespaced: true, + Group: "cluster.x-k8s.io", + Version: "v1beta1", + Kind: "Machine", + Verbs: []string{"delete", "deletecollection", "get", "list", "patch", "create", "update", "watch"}, + Categories: []string{"cluster-api"}, + }, + { + Name: "machines/status", + SingularName: "", + Namespaced: true, + Group: "cluster.x-k8s.io", + Version: "v1beta1", + Kind: "Machine", + Verbs: []string{"get", "patch", "update"}, + }, + { + Name: "clusters", + SingularName: "cluster", + Namespaced: true, + Group: "cluster.x-k8s.io", + Version: "v1beta1", + Kind: "Cluster", + Verbs: []string{"delete", "deletecollection", "get", "list", "patch", "create", "update", "watch"}, + Categories: []string{"cluster-api"}, + }, + { + Name: "clusters/status", + SingularName: "", + Namespaced: true, + Group: "cluster.x-k8s.io", + Version: "v1beta1", + Kind: "Cluster", + Verbs: []string{"get", "patch", "update"}, + }, + { + Name: "machinesets", + SingularName: "machineset", + Namespaced: true, + Group: "cluster.x-k8s.io", + Version: "v1beta1", + Kind: "MachineSet", + Verbs: []string{"delete", "deletecollection", "get", "list", "patch", "create", "update", "watch"}, + Categories: []string{"cluster-api"}, + }, + { + Name: "machinesets/status", + SingularName: "", + Namespaced: true, + Group: "cluster.x-k8s.io", + Version: "v1beta1", + Kind: "MachineSet", + Verbs: []string{"get", "patch", "update"}, + }, + { + Name: "machinesets/scale", + SingularName: "", + Namespaced: true, + Group: "autoscaling", + Version: "v1", + Kind: "Scale", + Verbs: []string{"get", "patch", "update"}, + }, + }, + } +} + +func (impl V1Beta1ResourceImpl) Machines() machines.MachinesResource { + return machines.NewMachinesResource(impl.storage) +} + +func (impl V1Beta1ResourceImpl) MachineSets() machinesets.MachineSetsResource { + return machinesets.NewMachineSetsResource(impl.storage) +} + +func (impl V1Beta1ResourceImpl) Namespaces() namespaces.NamespacesResource { + return namespaces.NewNamespacesResource(impl.storage) +} + +func NewV1Beta1Resource(storage *storage.StorageContainer) V1Beta1Resource { + return V1Beta1ResourceImpl{storage: storage} +} diff --git a/pkg/interfaces/kubeapi/kubeapi.go b/pkg/interfaces/kubeapi/kubeapi.go new file mode 100644 index 0000000..c0b4a38 --- /dev/null +++ b/pkg/interfaces/kubeapi/kubeapi.go @@ -0,0 +1,30 @@ +package kubeapi + +import ( + "go-kube/pkg/interfaces/kubeapi/api" + "go-kube/pkg/interfaces/kubeapi/apis" + "go-kube/pkg/storage" +) + +type KubeApi interface { + Api() api.ApiResource + Apis() apis.ApisResource +} + +type KubeApiImpl struct { + storage *storage.StorageContainer +} + +func (impl KubeApiImpl) Api() api.ApiResource { + return api.NewApiResource(impl.storage) +} + +func (impl KubeApiImpl) Apis() apis.ApisResource { + return apis.NewApisResource(impl.storage) +} + +func NewKubeApi(storage *storage.StorageContainer) KubeApiImpl { + return KubeApiImpl{ + storage: storage, + } +} diff --git a/pkg/interfaces/router.go b/pkg/interfaces/router.go new file mode 100644 index 0000000..c17ff21 --- /dev/null +++ b/pkg/interfaces/router.go @@ -0,0 +1,136 @@ +package interfaces + +import ( + "github.com/gorilla/mux" + "go-kube/internal/infrastructure" + "go-kube/pkg/interfaces/kubeapi" + "go-kube/pkg/interfaces/simulation" + "go-kube/pkg/storage" + "io" + autoscaling "k8s.io/api/autoscaling/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/protobuf" + "k8s.io/klog/v2" + "net/http" + cluster "sigs.k8s.io/cluster-api/api/v1beta1" +) + +type AdapterApplication struct { + router *mux.Router + kube2 kubeapi.KubeApi + sim2 simulation.SimulationApi +} + +func NewAdapterApplication(storageContainer *storage.StorageContainer) *AdapterApplication { + var router = mux.NewRouter().StrictSlash(true) + return &AdapterApplication{ + router: router, + kube2: kubeapi.NewKubeApi(storageContainer), + sim2: simulation.NewSimulationApi(storageContainer), + } +} + +func (app *AdapterApplication) Start() { + app.registerRoutes() + var port = "8000" + klog.V(1).Info("Starting adapter on port ", port) + err := http.ListenAndServe(":"+port, app.router) + if err != nil { + klog.V(1).ErrorS(err, "Error when calling http.ListenAndServe, error is: %v", err) + return + } +} + +func (app *AdapterApplication) registerRoutes() { + // Simulator API + app.router.HandleFunc("/updateNodes", infrastructure.HandleRequestWithBody(app.sim2.NodeUpdates().Post)).Methods("POST") + app.router.HandleFunc("/updatePods", infrastructure.HandleRequestWithBody(app.sim2.PodUpdates().Post)).Methods("POST") + + // Kubeserver API + app.router.HandleFunc("/api", infrastructure.HandleRequest(app.kube2.Api().Get)).Methods("GET") + app.router.HandleFunc("/api/v1", infrastructure.HandleRequest(app.kube2.Api().V1().Get)).Methods("GET") + // app.router.HandleFunc("/api/v1/namespaces/kube-system/configmaps", infrastructure.DoNothing()).Methods("POST") + app.router.HandleFunc("/api/v1/pods", infrastructure.HandleWatchableRequest(app.kube2.Api().V1().Pods().Get)).Methods("GET") + app.router.HandleFunc("/api/v1/nodes", infrastructure.HandleWatchableRequest(app.kube2.Api().V1().Nodes().Get)).Methods("GET") + app.router.HandleFunc("/api/v1/nodes/{nodeName}", infrastructure.HandleRequestWithParams(func(params map[string]string) v1.Node { + return app.kube2.Api().V1().Nodes().Node(params["nodeName"]).Get() + })).Methods("GET") + app.router.HandleFunc("/api/v1/nodes/{nodeName}", infrastructure.HandleRequestWithParamsAndBody(func(params map[string]string, body v1.Node) v1.Node { + return app.kube2.Api().V1().Nodes().Node(params["nodeName"]).Put(body) + })).Methods("PUT") + app.router.HandleFunc("/api/v1/namespaces", infrastructure.HandleWatchableRequest(app.kube2.Api().V1().Namespaces().Get)).Methods("GET") + app.router.HandleFunc("/api/v1/namespaces/default/pods/{podName}/status", infrastructure.HandleRequestWithParamsAndBody(func(params map[string]string, body v1.PodStatusResult) v1.Pod { + return app.kube2.Api().V1().Namespaces().Namespace("default").Pods().Pod(params["podName"]).Status().Patch(body) + })).Methods("PATCH") + + // Special protobuf behavior for binding + app.router.HandleFunc("/api/v1/namespaces/default/pods/{podName}/binding", func(w http.ResponseWriter, r *http.Request) { + klog.V(7).Infof("Req: %s%s?%s", r.Host, r.URL.Path, r.URL.RawQuery) + // Loop over header names + w.Header().Set("Content-Type", "application/json") + + reqBody, _ := io.ReadAll(r.Body) + // https://github.com/kubernetes/kubernetes/blob/61d455ed1173cd89a98442adf4623a29c5681c58/staging/src/k8s.io/apimachinery/pkg/test/runtime_serializer_protobuf_protobuf_test.go#L87 + scheme := runtime.NewScheme() + scheme.AddKnownTypes(schema.GroupVersion{Version: "v1"}, &v1.Binding{}) + serializer := protobuf.NewSerializer(scheme, scheme) + u := &v1.Binding{} + err := runtime.DecodeInto(serializer, reqBody, u) + if err != nil { + klog.V(1).ErrorS(err, "There was an error decoding the protobuf. err = ", err) + w.WriteHeader(500) + return + } + + // Get pod name as path parameter + pathParams := mux.Vars(r) + podName := pathParams["podName"] + + // We always asume this means it's binded + app.kube2.Api().V1().Namespaces().Namespace("default").Pods().Pod(podName).Binding().Post(*u) + }).Methods("POST") + + app.router.HandleFunc("/api/v1/namespaces/kube-system/configmaps/cluster-autoscaler-status", infrastructure.HandleRequest(app.kube2.Api().V1().Namespaces().Namespace("kube-system").Configmaps().ClusterAutoscalerStatus().Get)).Methods("GET") + app.router.HandleFunc("/api/v1/namespaces/kube-system/configmaps/cluster-autoscaler-status", infrastructure.HandleRequestWithBody(app.kube2.Api().V1().Namespaces().Namespace("kube-system").Configmaps().ClusterAutoscalerStatus().Put)).Methods("PUT") + app.router.HandleFunc("/api/v1/services", infrastructure.UnsupportedResource()).Methods("GET") + app.router.HandleFunc("/api/v1/persistentvolumes", infrastructure.UnsupportedResource()).Methods("GET") + app.router.HandleFunc("/api/v1/persistentvolumeclaims", infrastructure.UnsupportedResource()).Methods("GET") + app.router.HandleFunc("/api/v1/replicationcontrollers", infrastructure.UnsupportedResource()).Methods("GET") + app.router.HandleFunc("/apis", infrastructure.HandleRequest(app.kube2.Apis().Get)).Methods("GET") + app.router.HandleFunc("/apis/apps", infrastructure.UnsupportedResource()).Methods("GET") + app.router.HandleFunc("/apis/apps/v1/daemonsets", infrastructure.HandleWatchableRequest(app.kube2.Apis().Apps().V1().DaemonSets().Get)).Methods("GET") + app.router.HandleFunc("/apis/apps/v1/replicasets", infrastructure.UnsupportedResource()).Methods("GET") + app.router.HandleFunc("/apis/apps/v1/statefulsets", infrastructure.UnsupportedResource()).Methods("GET") + app.router.HandleFunc("/apis/autoscaling/v1", infrastructure.HandleRequest(app.kube2.Apis().Autoscaling().V1().Get)).Methods("GET") + app.router.HandleFunc("/apis/batch/v1/jobs", infrastructure.UnsupportedResource()).Methods("GET") + app.router.HandleFunc("/apis/events.k8s.io/v1", infrastructure.UnsupportedResource()).Methods("GET") + app.router.HandleFunc("/apis/events.k8s.io/v1/namespaces/default/events", infrastructure.UnsupportedResource()).Methods("GET", "POST") + app.router.HandleFunc("/apis/policy/v1/poddisruptionbudgets", infrastructure.UnsupportedResource()).Methods("GET") + app.router.HandleFunc("/apis/storage.k8s.io/v1/storageclasses", infrastructure.UnsupportedResource()).Methods("GET") + app.router.HandleFunc("/apis/storage.k8s.io/v1/csidrivers", infrastructure.UnsupportedResource()).Methods("GET") + app.router.HandleFunc("/apis/storage.k8s.io/v1/csinodes", infrastructure.UnsupportedResource()).Methods("GET") + app.router.HandleFunc("/apis/storage.k8s.io/v1/csistoragecapacities", infrastructure.UnsupportedResource()).Methods("GET") + app.router.HandleFunc("/apis/storage.k8s.io/v1beta1/csistoragecapacities", infrastructure.UnsupportedResource()).Methods("GET") + // Clusterx API + app.router.HandleFunc("/apis/cluster.x-k8s.io/v1beta1", infrastructure.HandleRequest(app.kube2.Apis().Cluster().V1Beta1().Get)).Methods("GET") + app.router.HandleFunc("/apis/cluster.x-k8s.io/v1beta1/clusters", infrastructure.UnsupportedResource()).Methods("GET") + app.router.HandleFunc("/apis/cluster.x-k8s.io/v1beta1/machines", infrastructure.HandleWatchableRequest(app.kube2.Apis().Cluster().V1Beta1().Machines().Get)).Methods("GET") + app.router.HandleFunc("/apis/cluster.x-k8s.io/v1beta1/machinesets", infrastructure.HandleWatchableRequest(app.kube2.Apis().Cluster().V1Beta1().MachineSets().Get)).Methods("GET") + app.router.HandleFunc("/apis/cluster.x-k8s.io/v1beta1/machinedeployments", infrastructure.UnsupportedResource()).Methods("GET") + app.router.HandleFunc("/apis/cluster.x-k8s.io/v1beta1/machinepools", infrastructure.UnsupportedResource()).Methods("GET") + app.router.HandleFunc("/apis/cluster.x-k8s.io/v1beta1/namespaces/{namespace}/machines/{machineName}", infrastructure.HandleRequestWithParams(func(params map[string]string) cluster.Machine { + return app.kube2.Apis().Cluster().V1Beta1().Namespaces().Namespace(params["namespace"]).Machines().Machine(params["machineName"]).Get() + })).Methods("GET") + app.router.HandleFunc("/apis/cluster.x-k8s.io/v1beta1/namespaces/{namespace}/machines/{machineName}", infrastructure.HandleRequestWithParamsAndBody(func(params map[string]string, body cluster.Machine) cluster.Machine { + return app.kube2.Apis().Cluster().V1Beta1().Namespaces().Namespace(params["namespace"]).Machines().Machine(params["machineName"]).Put(body) + })).Methods("PUT") + app.router.HandleFunc("/apis/cluster.x-k8s.io/v1beta1/namespaces/{namespace}/machinesets/{machinesetName}/scale", infrastructure.HandleRequestWithParams(func(params map[string]string) autoscaling.Scale { + return app.kube2.Apis().Cluster().V1Beta1().Namespaces().Namespace(params["namespace"]).MachineSets().MachineSet(params["machinesetName"]).Scale().Get() + })).Methods("GET") + app.router.HandleFunc("/apis/cluster.x-k8s.io/v1beta1/namespaces/{namespace}/machinesets/{machinesetName}/scale", infrastructure.HandleRequestWithParamsAndBody(func(params map[string]string, body autoscaling.Scale) autoscaling.Scale { + return app.kube2.Apis().Cluster().V1Beta1().Namespaces().Namespace(params["namespace"]).MachineSets().MachineSet(params["machinesetName"]).Scale().Put(body) + })).Methods("PUT") + +} diff --git a/pkg/interfaces/simulation/simulationapi.go b/pkg/interfaces/simulation/simulationapi.go new file mode 100644 index 0000000..6055094 --- /dev/null +++ b/pkg/interfaces/simulation/simulationapi.go @@ -0,0 +1,27 @@ +package simulation + +import ( + "go-kube/pkg/control" + "go-kube/pkg/storage" +) + +type SimulationApi interface { + NodeUpdates() control.NodeUpdatesResource + PodUpdates() control.PodUpdatesResource +} + +type SimulationApiImpl struct { + storage *storage.StorageContainer +} + +func (impl SimulationApiImpl) NodeUpdates() control.NodeUpdatesResource { + return control.NewNodeUpdateResource(impl.storage) +} + +func (impl SimulationApiImpl) PodUpdates() control.PodUpdatesResource { + return control.NewPodUpdateResource(impl.storage) +} + +func NewSimulationApi(storage *storage.StorageContainer) SimulationApiImpl { + return SimulationApiImpl{storage: storage} +} diff --git a/pkg/misim/types.go b/pkg/misim/types.go new file mode 100644 index 0000000..0143017 --- /dev/null +++ b/pkg/misim/types.go @@ -0,0 +1,52 @@ +package misim + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + cluster "sigs.k8s.io/cluster-api/api/v1beta1" +) + +// Information about a successfully binded pod +type BindingInformation struct { + Pod string + Node string +} + +// Information about a failed binding for a pod +type BindingFailureInformation struct { + Pod string + Message string +} + +// Update request from the simulation for nodes +type NodeUpdateRequest struct { + // All nodes the should be scheduled on the machines + AllNodes v1.NodeList + Events []metav1.WatchEvent + MachineSets []cluster.MachineSet + // Machines available for the nodes + Machines []cluster.Machine +} + +// Response of the adapter to a NodeUpdateRequest from the simulation +type NodeUpdateResponse struct { + Data v1.NodeList `json:"Updated NodeList with"` +} + +// Update request from the simulation for pods +type PodsUpdateRequest struct { + // All pods in the simulation + AllPods v1.PodList + Events []metav1.WatchEvent + // Pods that still have to be placed + PodsToBePlaced v1.PodList +} + +// Response of the adapter to a PodsUpdateRequest from the simulation +// with the information about bindings and failures from the kubescheduler +type PodsUpdateResponse struct { + Failed []BindingFailureInformation + Binded []BindingInformation + NewNodes []v1.Node + DeletedNodes []v1.Node +} diff --git a/pkg/mocks/cluster.go b/pkg/mocks/cluster.go deleted file mode 100644 index b446821..0000000 --- a/pkg/mocks/cluster.go +++ /dev/null @@ -1,10 +0,0 @@ -package mocks - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - cluster "sigs.k8s.io/cluster-api/api/v1beta1" -) - -func GetClusters() cluster.ClusterList { - return cluster.ClusterList{TypeMeta: metav1.TypeMeta{Kind: "ClusterList", APIVersion: "cluster.x-k8s.io/v1beta1"}, Items: nil} -} diff --git a/pkg/mocks/meta.go b/pkg/mocks/meta.go deleted file mode 100644 index 7875299..0000000 --- a/pkg/mocks/meta.go +++ /dev/null @@ -1,232 +0,0 @@ -package mocks - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func GetApiVersions() metav1.APIVersions { - return metav1.APIVersions{ - Versions: []string{"v1"}, - } -} - -func GetV1Api() metav1.APIResourceList { - return metav1.APIResourceList{TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "APIResourceList"}, - GroupVersion: "v1", - APIResources: []metav1.APIResource{ - { - Name: "namespaces", - SingularName: "", - Namespaced: false, - Kind: "Namespace", - Verbs: []string{"create", "delete", "get", "list", "patch", "update", "watch"}, - }, - { - Name: "nodes", - SingularName: "", - Namespaced: false, - Kind: "Node", - Verbs: []string{"create", "delete", "deletecollection", "get", "list", "patch", "update", "watch"}, - }, - { - Name: "nodes/status", - SingularName: "", - Namespaced: false, - Kind: "Node", - Verbs: []string{"get", "patch", "update"}, - }, - { - Name: "persistentvolumeclaims", - SingularName: "", - Namespaced: true, - Kind: "PersistentVolumeClaim", - Verbs: []string{"create", "delete", "deletecollection", "get", "list", "patch", "update", "watch"}, - }, - { - Name: "persistentvolumes", - SingularName: "", - Namespaced: false, - Kind: "PersistentVolume", - Verbs: []string{"create", "delete", "deletecollection", "get", "list", "patch", "update", "watch"}, - }, - { - Name: "pods", - SingularName: "", - Namespaced: true, - Kind: "Pod", - Verbs: []string{"create", "delete", "deletecollection", "get", "list", "patch", "update", "watch"}, - Categories: []string{"all"}, - }, - { - Name: "pods/binding", - SingularName: "", - Namespaced: true, - Kind: "Binding", - Verbs: []string{"create"}, - }, - { - Name: "pods/eviction", - SingularName: "", - Namespaced: true, - Group: "policy", - Version: "v1", - Kind: "Eviction", - Verbs: []string{"create"}, - }, - { - Name: "pods/status", - SingularName: "", - Namespaced: true, - Kind: "Pod", - Verbs: []string{"get", "patch", "update"}, - }, - { - Name: "replicationcontrollers", - SingularName: "", - Namespaced: true, - Kind: "ReplicationController", - Verbs: []string{"create", "delete", "deletecollection", "get", "list", "patch", "update", "watch"}, - Categories: []string{"all"}, - }, - { - Name: "services", - SingularName: "", - Namespaced: true, - Kind: "Service", - Verbs: []string{"create", "delete", "deletecollection", "get", "list", "patch", "update", "watch"}, - Categories: []string{"all"}, - }, - }, - } -} - -func GetApis() metav1.APIGroupList { - return metav1.APIGroupList{ - Groups: []metav1.APIGroup{ - { - Name: "cluster.x-k8s.io", - PreferredVersion: metav1.GroupVersionForDiscovery{ - GroupVersion: "cluster.x-k8s.io/v1beta1", - Version: "v1beta1", - }, - Versions: []metav1.GroupVersionForDiscovery{ - { - GroupVersion: "cluster.x-k8s.io/v1beta1", - Version: "v1beta1", - }, - }, - }, - { - Name: "autoscaling", - Versions: []metav1.GroupVersionForDiscovery{ - { - GroupVersion: "autoscaling/v1", - Version: "v1", - }, - }, - PreferredVersion: metav1.GroupVersionForDiscovery{ - GroupVersion: "autoscaling/v1", - Version: "v1", - }, - }, - }, - } -} - -func GetClusterAPIDescription() metav1.APIResourceList { - // TODO Maybe delete cluster endpoint - return metav1.APIResourceList{TypeMeta: metav1.TypeMeta{Kind: "APIResourceList", APIVersion: "v1"}, - GroupVersion: "cluster.x-k8s.io/v1beta1", - APIResources: []metav1.APIResource{ - { - Name: "machines", - SingularName: "machine", - Namespaced: true, - Group: "cluster.x-k8s.io", - Version: "v1beta1", - Kind: "Machine", - Verbs: []string{"delete", "deletecollection", "get", "list", "patch", "create", "update", "watch"}, - Categories: []string{"cluster-api"}, - }, - { - Name: "machines/status", - SingularName: "", - Namespaced: true, - Group: "cluster.x-k8s.io", - Version: "v1beta1", - Kind: "Machine", - Verbs: []string{"get", "patch", "update"}, - }, - { - Name: "clusters", - SingularName: "cluster", - Namespaced: true, - Group: "cluster.x-k8s.io", - Version: "v1beta1", - Kind: "Cluster", - Verbs: []string{"delete", "deletecollection", "get", "list", "patch", "create", "update", "watch"}, - Categories: []string{"cluster-api"}, - }, - { - Name: "clusters/status", - SingularName: "", - Namespaced: true, - Group: "cluster.x-k8s.io", - Version: "v1beta1", - Kind: "Cluster", - Verbs: []string{"get", "patch", "update"}, - }, - { - Name: "machinesets", - SingularName: "machineset", - Namespaced: true, - Group: "cluster.x-k8s.io", - Version: "v1beta1", - Kind: "MachineSet", - Verbs: []string{"delete", "deletecollection", "get", "list", "patch", "create", "update", "watch"}, - Categories: []string{"cluster-api"}, - }, - { - Name: "machinesets/status", - SingularName: "", - Namespaced: true, - Group: "cluster.x-k8s.io", - Version: "v1beta1", - Kind: "MachineSet", - Verbs: []string{"get", "patch", "update"}, - }, - { - Name: "machinesets/scale", - SingularName: "", - Namespaced: true, - Group: "autoscaling", - Version: "v1", - Kind: "Scale", - Verbs: []string{"get", "patch", "update"}, - }, - }, - } -} - -func GetAutoscalingApi() metav1.APIResourceList { - return metav1.APIResourceList{TypeMeta: metav1.TypeMeta{Kind: "APIResourceList", APIVersion: "v1"}, - GroupVersion: "autoscaling/v1", - APIResources: []metav1.APIResource{ - { - Name: "horizontalpodautoscalers", - SingularName: "", - Namespaced: true, - Kind: "HorizontalPodAutoscaler", - Verbs: []string{"delete", "deletecollection", "get", "list", "patch", "create", "update", "watch"}, - Categories: []string{"all"}, - }, - { - Name: "horizontalpodautoscalers/status", - SingularName: "", - Namespaced: true, - Kind: "HorizontalPodAutoscaler", - Verbs: []string{"get", "patch", "update"}, - }, - }, - } -} diff --git a/pkg/server/debugging/debugging.go b/pkg/server/debugging/debugging.go deleted file mode 100644 index 76178c2..0000000 --- a/pkg/server/debugging/debugging.go +++ /dev/null @@ -1,162 +0,0 @@ -package debugging - -import ( - "fmt" - core "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "kube-rise/pkg/storage" - "net/http" - cluster "sigs.k8s.io/cluster-api/api/v1beta1" - "strconv" -) - -type DebugServer struct { - storages *storage.StorageContainer - podCount int - podList core.PodList -} - -func NewDebugServer(storages *storage.StorageContainer) *DebugServer { - return &DebugServer{storages: storages, podCount: 0, podList: core.PodList{ - TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "PodList"}, - Items: []core.Pod{}, - }} -} - -func (s *DebugServer) InitTestValues(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - - // Example machine set - replicas := int32(1) - exampleMachineSet := cluster.MachineSet{ - TypeMeta: metav1.TypeMeta{APIVersion: "cluster.x-k8s.io/v1beta1", Kind: "MachineSet"}, - ObjectMeta: metav1.ObjectMeta{Name: "my-machine-set", Namespace: "kube-system", Annotations: map[string]string{ - "cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size": "0", - "cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size": "5", - "capacity.cluster-autoscaler.kubernetes.io/memory": "128G", - "capacity.cluster-autoscaler.kubernetes.io/cpu": "16", - "capacity.cluster-autoscaler.kubernetes.io/maxPods": "200", - }}, - Spec: cluster.MachineSetSpec{Replicas: &replicas, Selector: metav1.LabelSelector{MatchLabels: map[string]string{ - "machine-set-name": "my-machine-set", - }}}, - Status: cluster.MachineSetStatus{Replicas: replicas, - FullyLabeledReplicas: replicas, - ReadyReplicas: replicas, - AvailableReplicas: replicas, - Conditions: []cluster.Condition{ - { - Type: "Ready", - Status: "True", - }, - }, - }, - } - machineSetList := cluster.MachineSetList{TypeMeta: metav1.TypeMeta{Kind: "MachineSetList", APIVersion: "cluster-x.k8s.io/v1beta1"}, Items: []cluster.MachineSet{exampleMachineSet}} - machineSetAddEvent := metav1.WatchEvent{Type: "ADDED", Object: runtime.RawExtension{Object: &exampleMachineSet}} - s.storages.MachineSets.StoreMachineSets(machineSetList, []metav1.WatchEvent{machineSetAddEvent}) - - // Example machine - providerid := "clusterapi://test-node" - nodeReference := core.ObjectReference{Kind: "Node", APIVersion: "v1", Name: "test-node"} - exampleMachine := cluster.Machine{ - TypeMeta: metav1.TypeMeta{APIVersion: "cluster.x-k8s.io/v1beta1", Kind: "Machine"}, - ObjectMeta: metav1.ObjectMeta{Name: "my-machine", Namespace: "kube-system", Labels: map[string]string{ - "machine-set-name": "my-machine-set", - }, OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: "cluster.x-k8s.io/v1beta1", - Kind: "MachineSet", - Name: "my-machine-set", - }, - }}, - Spec: cluster.MachineSpec{ProviderID: &providerid}, - Status: cluster.MachineStatus{Phase: "Running", NodeRef: &nodeReference}, - } - machineList := cluster.MachineList{TypeMeta: metav1.TypeMeta{Kind: "MachineList", APIVersion: "cluster-x.k8s.io/v1beta1"}, Items: []cluster.Machine{exampleMachine}} - machineAddEvent := metav1.WatchEvent{Type: "ADDED", Object: runtime.RawExtension{Object: &exampleMachine}} - s.storages.Machines.StoreMachines(machineList, []metav1.WatchEvent{machineAddEvent}) - - // Example node - quantity, _ := resource.ParseQuantity("4") - podQuantity, _ := resource.ParseQuantity("120") - exampleNode := core.Node{ - TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "Node"}, - ObjectMeta: metav1.ObjectMeta{Name: "test-node"}, - Spec: core.NodeSpec{ProviderID: providerid}, - Status: core.NodeStatus{Phase: "Running", Conditions: []core.NodeCondition{ - { - Type: "Ready", - Status: "True", - }, - }, Allocatable: map[core.ResourceName]resource.Quantity{ - "cpu": quantity, - "memory": quantity, - "pods": podQuantity, - }, Capacity: map[core.ResourceName]resource.Quantity{ - "cpu": quantity, - "memory": quantity, - "pods": podQuantity, - }}, - } - nodeList := core.NodeList{TypeMeta: metav1.TypeMeta{Kind: "NodeList", APIVersion: "v1"}, Items: []core.Node{exampleNode}} - nodeAddEvent := metav1.WatchEvent{Type: "ADDED", Object: runtime.RawExtension{Object: &exampleNode}} - s.storages.Nodes.StoreNodes(nodeList, []metav1.WatchEvent{nodeAddEvent}) -} - -func (s *DebugServer) InitTestPods(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - podsToBeCreated := r.URL.Query().Get("n") - if podsToBeCreated == "" { - podsToBeCreated = "1" - } - podsToBeCreatedNumber, _ := strconv.Atoi(podsToBeCreated) - createdPods := make([]core.Pod, podsToBeCreatedNumber) - for podsToBeCreatedNumber > 0 { - s.podCount = s.podCount + 1 - cpuQuantity, _ := resource.ParseQuantity("2") - // Example pod - examplePod := core.Pod{ - TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "Pod"}, - ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("pod-%d", s.podCount), Namespace: "default", UID: types.UID(fmt.Sprintf("pod-%d", s.podCount))}, - Spec: core.PodSpec{ - Containers: []core.Container{ - { - Name: fmt.Sprintf("pod-%d-container", s.podCount), - Resources: core.ResourceRequirements{ - Limits: map[core.ResourceName]resource.Quantity{ - "cpu": cpuQuantity, - "memory": cpuQuantity, - }, - Requests: map[core.ResourceName]resource.Quantity{ - "cpu": cpuQuantity, - "memory": cpuQuantity, - }, - }, - }, - }, - SchedulerName: "default-scheduler", - }, - Status: core.PodStatus{ - Phase: "Pending", - Conditions: []core.PodCondition{ - { - Type: core.PodScheduled, - Status: core.ConditionFalse, - Reason: core.PodReasonUnschedulable, - }, - }, - }, - } - createdPods = append(createdPods, examplePod) - podsToBeCreatedNumber = podsToBeCreatedNumber - 1 - } - for i, _ := range createdPods { - s.podList.Items = append(s.podList.Items, createdPods[i]) - podCreatedEvent := metav1.WatchEvent{Type: "ADDED", Object: runtime.RawExtension{Object: &createdPods[i]}} - s.storages.Pods.StorePods(s.podList, []metav1.WatchEvent{podCreatedEvent}) - } -} diff --git a/pkg/server/infrastructure/endpoint.go b/pkg/server/infrastructure/endpoint.go deleted file mode 100644 index bba2f14..0000000 --- a/pkg/server/infrastructure/endpoint.go +++ /dev/null @@ -1,176 +0,0 @@ -package infrastructure - -import ( - "encoding/json" - "fmt" - apps "k8s.io/api/apps/v1" - batch "k8s.io/api/batch/v1" - core "k8s.io/api/core/v1" - policy "k8s.io/api/policy/v1" - storage "k8s.io/api/storage/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "kube-rise/internal/broadcast" - "net/http" - cluster "sigs.k8s.io/cluster-api/api/v1beta1" - exp "sigs.k8s.io/cluster-api/exp/api/v1beta1" - "strings" -) - -type Endpoint func(w http.ResponseWriter, r *http.Request) - -func HandleRequest[T any](supplier func() T) Endpoint { - return func(w http.ResponseWriter, r *http.Request) { - fmt.Printf("Req: %s %s %s\n", r.Host, r.URL.Path, r.URL.RawQuery) - w.Header().Set("Content-Type", "application/json") - resourceList := supplier() - json.NewEncoder(w).Encode(resourceList) - } -} - -func DoNothing() Endpoint { - return func(w http.ResponseWriter, r *http.Request) { - fmt.Printf("Req: %s %s %s\n", r.Host, r.URL.Path, r.URL.RawQuery) - w.Header().Set("Content-Type", "application/json") - } -} - -func HandleWatchableRequest[T any](supplier func() (T, *broadcast.BroadcastServer[metav1.WatchEvent])) Endpoint { - return func(w http.ResponseWriter, r *http.Request) { - fmt.Printf("Req: %s %s %s\n", r.Host, r.URL.Path, r.URL.RawQuery) - w.Header().Set("Content-Type", "application/json") - resourceList, broadcastServer := supplier() - if r.URL.Query().Get("watch") != "" { - // watch initiated HTTP streaming answers - // Sources: https://gist.github.com/vmarmol/b967b29917a34d9307ce - // https://github.com/kubernetes/kubernetes/blob/828495bcc013b77bb63bcb64111e094e455715bb/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/watch.go#L181 - // https://stackoverflow.com/questions/54890809/how-to-use-request-context-instead-of-closenotifier - ctx := r.Context() - flusher, ok := w.(http.Flusher) - if !ok { - http.NotFound(w, r) - return - } - - // Send the initial headers saying we're gonna stream the response. - w.Header().Set("Transfer-Encoding", "chunked") - w.WriteHeader(http.StatusOK) - flusher.Flush() - - enc := json.NewEncoder(w) - - eventChannel := broadcastServer.Subscribe() - defer broadcastServer.CancelSubscription(eventChannel) - - for { - select { - case <-ctx.Done(): - fmt.Println("Client stopped listening") - return - case event := <-eventChannel: - if err := enc.Encode(event); err != nil { - fmt.Printf("unable to encode watch object %T: %v", event, err) - // client disconnect. - return - } - if len(eventChannel) == 0 { - flusher.Flush() - } - } - } - } else { - // if no watch we just list the resource - err := json.NewEncoder(w).Encode(resourceList) - if err != nil { - fmt.Printf("unable to encode resource list, error is: %v", err) - return - } - } - } -} - -func UnsupportedResource() Endpoint { - return func(w http.ResponseWriter, r *http.Request) { - // fmt.Printf("Req: %s %s %s\n", r.Host, r.URL.Path, r.URL.RawQuery) - // w.Header().Set("Retry-After", "9999") - // w.WriteHeader(410) - // json.NewEncoder(w).Encode(errors.NewResourceExpired("resource is unsupported by this adapter").ErrStatus) - fmt.Printf("Req: %s %s %s\n", r.Host, r.URL.Path, r.URL.RawQuery) - w.Header().Set("Content-Type", "application/json") - if r.URL.Query().Get("watch") != "" { - ctx := r.Context() - flusher, ok := w.(http.Flusher) - if !ok { - http.NotFound(w, r) - return - } - - // Send the initial headers saying we're gonna stream the response. - w.Header().Set("Transfer-Encoding", "chunked") - w.WriteHeader(http.StatusOK) - flusher.Flush() - - for { - select { - case <-ctx.Done(): - fmt.Println("Client stopped listening") - return - } - } - } else { - // if no watch we just list the resource - // just return nothing here, to *string datatype enables us to use nil - // y := map[string]*string{"metadata": nil, "items": nil} - resourceType := strings.Split(r.URL.Path, "/") - - y := GetEmptyResourceList(resourceType[len(resourceType)-1]) - var err error - if y == nil { - fmt.Printf("unseen type %s\n", resourceType[len(resourceType)-1]) - z := map[string]*string{"metadata": nil, "items": nil} - err = json.NewEncoder(w).Encode(z) - } else { - err = json.NewEncoder(w).Encode(y) - } - if err != nil { - fmt.Printf("unable to encode empty resource list, error is: %v", err) - return - } - } - } -} - -func GetEmptyResourceList(resourceType string) runtime.Object { - switch resourceType { - case "replicasets": - return &apps.ReplicaSetList{TypeMeta: metav1.TypeMeta{Kind: "ReplicaSetList", APIVersion: "apps/v1"}, Items: nil} - case "persistentvolumes": - return &core.PersistentVolumeList{TypeMeta: metav1.TypeMeta{Kind: "PersistentVolumeList", APIVersion: "v1"}, Items: nil} - case "statefulsets": - return &apps.StatefulSetList{TypeMeta: metav1.TypeMeta{Kind: "StatefulSetList", APIVersion: "apps/v1"}, Items: nil} - case "storageclasses": - return &storage.StorageClassList{TypeMeta: metav1.TypeMeta{Kind: "StorageClassList", APIVersion: "storage.k8s.io/v1"}, Items: nil} - case "csidrivers": - return &storage.CSIDriverList{TypeMeta: metav1.TypeMeta{Kind: "CSIDriverList", APIVersion: "storage.k8s.io/v1"}, Items: nil} - case "poddisruptionbudgets": - return &policy.PodDisruptionBudgetList{TypeMeta: metav1.TypeMeta{Kind: "PodDisruptionBudgetList", APIVersion: "policy/v1"}, Items: nil} - case "csinodes": - return &storage.CSINodeList{TypeMeta: metav1.TypeMeta{Kind: "CSINodeList", APIVersion: "storage.k8s.io/v1"}, Items: nil} - case "persistentvolumeclaims": - return &core.PersistentVolumeClaimList{TypeMeta: metav1.TypeMeta{Kind: "PersistentVolumeClaimList", APIVersion: "v1"}, Items: nil} - case "csistoragecapacities": - return &storage.CSIStorageCapacityList{TypeMeta: metav1.TypeMeta{Kind: "CSIStorageCapacityList", APIVersion: "storage.k8s.io/v1"}, Items: nil} - case "services": - return &core.ServiceList{TypeMeta: metav1.TypeMeta{Kind: "ServiceList", APIVersion: "v1"}, Items: nil} - case "replicationcontrollers": - return &core.ReplicationControllerList{TypeMeta: metav1.TypeMeta{Kind: "ReplicationControllerList", APIVersion: "v1"}, Items: nil} - case "jobs": - return &batch.JobList{TypeMeta: metav1.TypeMeta{Kind: "JobList", APIVersion: "batch/v1"}, Items: nil} - case "machinedeployments": - return &cluster.MachineDeploymentList{TypeMeta: metav1.TypeMeta{Kind: "MachineDeploymentList", APIVersion: "cluster.x-k8s.io/v1beta1"}, Items: nil} - case "machinepools": - return &exp.MachinePoolList{TypeMeta: metav1.TypeMeta{Kind: "MachinePoolList", APIVersion: "cluster.x-k8s.io/v1beta1"}, Items: nil} - default: - return nil - } -} diff --git a/pkg/server/kubeapi/apps.go b/pkg/server/kubeapi/apps.go deleted file mode 100644 index c5edb83..0000000 --- a/pkg/server/kubeapi/apps.go +++ /dev/null @@ -1,16 +0,0 @@ -package kubeapi - -import ( - "kube-rise/pkg/server/infrastructure" - "kube-rise/pkg/storage" -) - -type AppsKubeAPIServer struct { - GetDaemonSets infrastructure.Endpoint -} - -func NewAppsKubeAPIServer(storageContainer *storage.StorageContainer) *AppsKubeAPIServer { - var s = &AppsKubeAPIServer{} - s.GetDaemonSets = infrastructure.HandleWatchableRequest(storageContainer.DaemonSets.GetDaemonSets) - return s -} diff --git a/pkg/server/kubeapi/cluster.go b/pkg/server/kubeapi/cluster.go deleted file mode 100644 index 341051f..0000000 --- a/pkg/server/kubeapi/cluster.go +++ /dev/null @@ -1,33 +0,0 @@ -package kubeapi - -import ( - "kube-rise/pkg/mocks" - "kube-rise/pkg/server/infrastructure" - "kube-rise/pkg/storage" -) - -type ClusterKubeAPIServer struct { - GetMachineSets infrastructure.Endpoint - GetMachines infrastructure.Endpoint - GetClusters infrastructure.Endpoint - GetStatusConfigMap infrastructure.Endpoint - PutStatusConfigMap infrastructure.Endpoint - GetMachineSetsScale infrastructure.Endpoint - PutMachineSetsScale infrastructure.Endpoint - GetMachine infrastructure.Endpoint - PutMachine infrastructure.Endpoint -} - -func NewClusterKubeAPIServer(storageContainer *storage.StorageContainer) *ClusterKubeAPIServer { - var server = &ClusterKubeAPIServer{} - server.GetMachines = infrastructure.HandleWatchableRequest(storageContainer.Machines.GetMachines) - server.GetMachineSets = infrastructure.HandleWatchableRequest(storageContainer.MachineSets.GetMachineSets) - server.GetClusters = infrastructure.HandleRequest(mocks.GetClusters) - server.GetStatusConfigMap = infrastructure.HandleRequest(storageContainer.StatusConfigMap.GetStatusConfigMap) - server.PutStatusConfigMap = storageContainer.StatusConfigMap.StoreStatusConfigMap - server.GetMachineSetsScale = storageContainer.MachineSets.GetMachineSetsScale - server.PutMachineSetsScale = storageContainer.MachineSets.PutMachineSetsScale - server.GetMachine = storageContainer.Machines.GetMachine - server.PutMachine = storageContainer.Machines.PutMachine - return server -} diff --git a/pkg/server/kubeapi/core.go b/pkg/server/kubeapi/core.go deleted file mode 100644 index e83a201..0000000 --- a/pkg/server/kubeapi/core.go +++ /dev/null @@ -1,24 +0,0 @@ -package kubeapi - -import ( - "kube-rise/pkg/server/infrastructure" - "kube-rise/pkg/storage" -) - -type CoreKubeAPIServer struct { - GetPods infrastructure.Endpoint - GetNodes infrastructure.Endpoint - GetNamespaces infrastructure.Endpoint - PutNode infrastructure.Endpoint - GetNode infrastructure.Endpoint -} - -func NewCoreKubeAPIServer(storageContainer *storage.StorageContainer) *CoreKubeAPIServer { - var s = &CoreKubeAPIServer{} - s.GetPods = infrastructure.HandleWatchableRequest(storageContainer.Pods.GetPods) - s.GetNodes = infrastructure.HandleWatchableRequest(storageContainer.Nodes.GetNodes) - s.GetNamespaces = infrastructure.HandleWatchableRequest(storageContainer.Namespaces.GetNamespaces) - s.PutNode = storageContainer.Nodes.PutNode - s.GetNode = storageContainer.Nodes.GetNode - return s -} diff --git a/pkg/server/kubeapi/meta.go b/pkg/server/kubeapi/meta.go deleted file mode 100644 index aefda0a..0000000 --- a/pkg/server/kubeapi/meta.go +++ /dev/null @@ -1,24 +0,0 @@ -package kubeapi - -import ( - "kube-rise/pkg/mocks" - "kube-rise/pkg/server/infrastructure" -) - -type MetaKubeAPIServer struct { - GetApi infrastructure.Endpoint - GetApis infrastructure.Endpoint - GetClusterAPIDescription infrastructure.Endpoint - GetV1Api infrastructure.Endpoint - GetAutoscalingApi infrastructure.Endpoint -} - -func NewMetaKubeAPIServer() *MetaKubeAPIServer { - var server = &MetaKubeAPIServer{} - server.GetApi = infrastructure.HandleRequest(mocks.GetApiVersions) - server.GetApis = infrastructure.HandleRequest(mocks.GetApis) - server.GetClusterAPIDescription = infrastructure.HandleRequest(mocks.GetClusterAPIDescription) - server.GetV1Api = infrastructure.HandleRequest(mocks.GetV1Api) - server.GetAutoscalingApi = infrastructure.HandleRequest(mocks.GetAutoscalingApi) - return server -} diff --git a/pkg/server/router.go b/pkg/server/router.go deleted file mode 100644 index 267ad23..0000000 --- a/pkg/server/router.go +++ /dev/null @@ -1,113 +0,0 @@ -package server - -import ( - "fmt" - "github.com/gorilla/mux" - "kube-rise/pkg/control" - "kube-rise/pkg/server/debugging" - "kube-rise/pkg/server/infrastructure" - "kube-rise/pkg/server/kubeapi" - "kube-rise/pkg/server/simulation" - "kube-rise/pkg/storage" - "net/http" -) - -type KubeAPI struct { - core *kubeapi.CoreKubeAPIServer - meta *kubeapi.MetaKubeAPIServer - apps *kubeapi.AppsKubeAPIServer - cluster *kubeapi.ClusterKubeAPIServer -} - -type AdapterApplication struct { - router *mux.Router - sim *simulation.SimServer - kube *KubeAPI - debugging *debugging.DebugServer -} - -func NewAdapterApplication(storageContainer *storage.StorageContainer) *AdapterApplication { - var router = mux.NewRouter().StrictSlash(true) - var kubeUpdateController = control.NewKubeUpdateController(storageContainer) - return &AdapterApplication{ - router: router, - sim: simulation.NewSimServer(kubeUpdateController), - kube: &KubeAPI{ - core: kubeapi.NewCoreKubeAPIServer(storageContainer), - meta: kubeapi.NewMetaKubeAPIServer(), - apps: kubeapi.NewAppsKubeAPIServer(storageContainer), - cluster: kubeapi.NewClusterKubeAPIServer(storageContainer), - }, - debugging: debugging.NewDebugServer(storageContainer), - } -} - -func (app *AdapterApplication) Start() { - app.registerRoutes() - var port = "8000" - fmt.Println("Starting server on port %v", port) - err := http.ListenAndServe(":"+port, app.router) - if err != nil { - fmt.Printf("Error when calling http.ListenAndServe, error is: %v", err) - return - } -} - -func (app *AdapterApplication) registerRoutes() { - // Simulator API - app.router.HandleFunc("/updateNodes", app.sim.UpdateNodes).Methods("POST") - app.router.HandleFunc("/updatePods", app.sim.UpdatePods).Methods("POST") - - // Kubeserver API - // Currently with function - app.router.HandleFunc("/api/v1/namespaces/default/pods/{podName}/status", app.sim.UpdateStatus).Methods("PATCH") - app.router.HandleFunc("/api/v1/namespaces/default/pods/{podName}/binding", app.sim.UpdateBinding).Methods("POST") - app.router.HandleFunc("/api/v1/pods", app.kube.core.GetPods).Methods("GET") - app.router.HandleFunc("/api/v1/nodes", app.kube.core.GetNodes).Methods("GET") - app.router.HandleFunc("/api/v1/namespaces", app.kube.core.GetNamespaces).Methods("GET") - - // Other Kubernetes API mocks - app.router.HandleFunc("/apis/apps/v1/replicasets", infrastructure.UnsupportedResource()).Methods("GET") - app.router.HandleFunc("/api/v1/persistentvolumes", infrastructure.UnsupportedResource()).Methods("GET") - app.router.HandleFunc("/apis/apps/v1/statefulsets", infrastructure.UnsupportedResource()).Methods("GET") - app.router.HandleFunc("/apis/storage.k8s.io/v1/storageclasses", infrastructure.UnsupportedResource()).Methods("GET") - app.router.HandleFunc("/apis/storage.k8s.io/v1/csidrivers", infrastructure.UnsupportedResource()).Methods("GET") - app.router.HandleFunc("/apis/policy/v1/poddisruptionbudgets", infrastructure.UnsupportedResource()).Methods("GET") - app.router.HandleFunc("/apis/storage.k8s.io/v1/csinodes", infrastructure.UnsupportedResource()).Methods("GET") - app.router.HandleFunc("/api/v1/persistentvolumeclaims", infrastructure.UnsupportedResource()).Methods("GET") - app.router.HandleFunc("/apis/storage.k8s.io/v1/csistoragecapacities", infrastructure.UnsupportedResource()).Methods("GET") - app.router.HandleFunc("/api/v1/services", infrastructure.UnsupportedResource()).Methods("GET") - app.router.HandleFunc("/api/v1/replicationcontrollers", infrastructure.UnsupportedResource()).Methods("GET") - app.router.HandleFunc("/apis/events.k8s.io/v1", infrastructure.UnsupportedResource()).Methods("GET") - app.router.HandleFunc("/apis/events.k8s.io/v1/namespaces/default/events", infrastructure.UnsupportedResource()).Methods("GET", "POST") - app.router.HandleFunc("/apis/apps", infrastructure.UnsupportedResource()).Methods("GET") - - // Additional dependencies for cluster-autoscaler - app.router.HandleFunc("/api", app.kube.meta.GetApi).Methods("GET") - app.router.HandleFunc("/api/v1", app.kube.meta.GetV1Api).Methods("GET") - app.router.HandleFunc("/apis/autoscaling/v1", app.kube.meta.GetAutoscalingApi).Methods("GET") - app.router.HandleFunc("/apis", app.kube.meta.GetApis).Methods("GET") - // app.router.HandleFunc("/api/v1/namespaces/kube-system/configmaps", infrastructure.DoNothing()).Methods("POST") - app.router.HandleFunc("/apis/apps/v1/daemonsets", app.kube.apps.GetDaemonSets).Methods("GET") - app.router.HandleFunc("/apis/batch/v1/jobs", infrastructure.UnsupportedResource()).Methods("GET") - app.router.HandleFunc("/api/v1/namespaces/kube-system/configmaps/cluster-autoscaler-status", app.kube.cluster.GetStatusConfigMap).Methods("GET") - app.router.HandleFunc("/api/v1/namespaces/kube-system/configmaps/cluster-autoscaler-status", app.kube.cluster.PutStatusConfigMap).Methods("PUT") - app.router.HandleFunc("/api/v1/nodes/{nodeName}", app.kube.core.PutNode).Methods("PUT") - app.router.HandleFunc("/api/v1/nodes/{nodeName}", app.kube.core.GetNode).Methods("GET") - - // Clusterx API - app.router.HandleFunc("/apis/cluster.x-k8s.io/v1beta1", app.kube.meta.GetClusterAPIDescription).Methods("GET") - app.router.HandleFunc("/apis/cluster.x-k8s.io/v1beta1/clusters", infrastructure.UnsupportedResource()).Methods("GET") - app.router.HandleFunc("/apis/cluster.x-k8s.io/v1beta1/machines", app.kube.cluster.GetMachines).Methods("GET") - app.router.HandleFunc("/apis/cluster.x-k8s.io/v1beta1/namespaces/{namespace}/machines/{machineName}", app.kube.cluster.GetMachine).Methods("GET") - app.router.HandleFunc("/apis/cluster.x-k8s.io/v1beta1/namespaces/{namespace}/machines/{machineName}", app.kube.cluster.PutMachine).Methods("PUT") - app.router.HandleFunc("/apis/cluster.x-k8s.io/v1beta1/machinesets", app.kube.cluster.GetMachineSets).Methods("GET") - app.router.HandleFunc("/apis/cluster.x-k8s.io/v1beta1/namespaces/{namespace}/machinesets/{machinesetName}/scale", app.kube.cluster.GetMachineSetsScale).Methods("GET") - app.router.HandleFunc("/apis/cluster.x-k8s.io/v1beta1/namespaces/{namespace}/machinesets/{machinesetName}/scale", app.kube.cluster.PutMachineSetsScale).Methods("PUT") - app.router.HandleFunc("/apis/cluster.x-k8s.io/v1beta1/machinepools", infrastructure.UnsupportedResource()).Methods("GET") - app.router.HandleFunc("/apis/cluster.x-k8s.io/v1beta1/machinedeployments", infrastructure.UnsupportedResource()).Methods("GET") - - // For debugging - app.router.HandleFunc("/debugging", app.debugging.InitTestValues) - app.router.HandleFunc("/debugPods", app.debugging.InitTestPods) -} diff --git a/pkg/server/simulation/simulation.go b/pkg/server/simulation/simulation.go deleted file mode 100644 index 29450ad..0000000 --- a/pkg/server/simulation/simulation.go +++ /dev/null @@ -1,163 +0,0 @@ -package simulation - -import ( - "encoding/json" - "fmt" - "github.com/gorilla/mux" - "io" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer/protobuf" - "kube-rise/pkg/control" - "kube-rise/pkg/entity" - "net/http" -) - -type SimServer struct { - kubeupdatecontroller control.KubeUpdateController -} - -func NewSimServer(kuc control.KubeUpdateController) *SimServer { - return &SimServer{kubeupdatecontroller: kuc} -} - -func (s *SimServer) UpdateNodes(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - - reqBody, _ := io.ReadAll(r.Body) - // fmt.Println(string(reqBody)) - var u entity.NodeUpdateRequest - err := json.Unmarshal(reqBody, &u) - if err != nil { - fmt.Printf("There was an error decoding the json. err = %s", err) - w.WriteHeader(500) - return - } - - fmt.Println("Update nodes") - // fmt.Println(u) - - var response entity.NodeUpdateResponse - if u.MachineSets == nil || len(u.MachineSets) == 0 { - // No cluster scaling - response = s.kubeupdatecontroller.UpdateNodes(u.AllNodes, u.Events) - } else { - response = s.kubeupdatecontroller.InitMachinesNodes(u.AllNodes, u.Events, u.MachineSets, u.Machines) - } - - err = json.NewEncoder(w).Encode(response) - if err != nil { - fmt.Printf("Unable to encode response for node update, error is: %v", err) - return - } -} - -func (s *SimServer) UpdatePods(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - - reqBody, _ := io.ReadAll(r.Body) - - // fmt.Println(string(reqBody)) - - var u entity.PodsUpdateRequest - err := json.Unmarshal(reqBody, &u) - if err != nil { - fmt.Printf("There was an error decoding the json. err = %s", err) - w.WriteHeader(500) - return - } - - fmt.Println("Update pods") - // fmt.Println(u) - - var responseChan = s.kubeupdatecontroller.UpdatePods(u.AllPods, u.Events, u.PodsToBePlaced, false) - var finalResponse entity.PodsUpdateResponse - if len(u.PodsToBePlaced.Items) > 0 { - // Immediately wait for the response from the channel - select { - case response := <-responseChan: - finalResponse = response - // Should not get stuck with the new scheduling logic - // case <-time.After(1000 * time.Second): - // fmt.Println("Scheduler took to long using default response") - // finalResponse = s.kubeupdatecontroller.CreateDefaultResponse() - } - } else { - finalResponse = s.kubeupdatecontroller.CreateDefaultResponse() - } - // s.kubeupdatecontroller.UpdatePods(u.AllPods, u.Events, u.PodsToBePlaced, true) - err = json.NewEncoder(w).Encode(finalResponse) - if err != nil { - fmt.Printf("Unable to encode response for pod update, error is: %v", err) - return - } -} - -func (s *SimServer) UpdateStatus(w http.ResponseWriter, r *http.Request) { - fmt.Printf("Req: %s %s %s\n", r.Host, r.URL.Path, r.URL.RawQuery) - // Loop over header names - // for name, values := range r.Header { - // Loop over all values for the name. - // for _, value := range values { - // fmt.Println(name, value) - // } - // } - w.Header().Set("Content-Type", "application/json") - - reqBody, _ := io.ReadAll(r.Body) - // fmt.Println(string(reqBody)) - var u v1.PodStatusResult - err := json.Unmarshal(reqBody, &u) - // fmt.Println(u) - if err != nil { - fmt.Printf("There was an error decoding the json. err = %s", err) - w.WriteHeader(500) - return - } - - // Get pod name as path parameter - pathParams := mux.Vars(r) - podName := pathParams["podName"] - - // We always asume this means it's failed - finalResponse := s.kubeupdatecontroller.Failed(u.Status, podName) - - err = json.NewEncoder(w).Encode(finalResponse) - if err != nil { - fmt.Printf("Unable to encode response for pod status update, error is: %v", err) - return - } -} - -func (s *SimServer) UpdateBinding(w http.ResponseWriter, r *http.Request) { - fmt.Printf("Req: %s %s %s\n", r.Host, r.URL.Path, r.URL.RawQuery) - // Loop over header names - // for name, values := range r.Header { - // Loop over all values for the name. - // for _, value := range values { - // fmt.Println(name, value) - // } - // } - w.Header().Set("Content-Type", "application/json") - - reqBody, _ := io.ReadAll(r.Body) - // https://github.com/kubernetes/kubernetes/blob/61d455ed1173cd89a98442adf4623a29c5681c58/staging/src/k8s.io/apimachinery/pkg/test/runtime_serializer_protobuf_protobuf_test.go#L87 - scheme := runtime.NewScheme() - scheme.AddKnownTypes(schema.GroupVersion{Version: "v1"}, &v1.Binding{}) - serializer := protobuf.NewSerializer(scheme, scheme) - u := &v1.Binding{} - err := runtime.DecodeInto(serializer, reqBody, u) - if err != nil { - fmt.Printf("There was an error decoding the protobuf. err = %s", err) - w.WriteHeader(500) - return - } - - // Get pod name as path parameter - pathParams := mux.Vars(r) - podName := pathParams["podName"] - - // We always asume this means it's binded - s.kubeupdatecontroller.Binded(*u, podName) -} diff --git a/pkg/storage/adapterstate.go b/pkg/storage/adapterstate.go new file mode 100644 index 0000000..f0f6db6 --- /dev/null +++ b/pkg/storage/adapterstate.go @@ -0,0 +1,13 @@ +package storage + +type IdStorage interface { + GetNextId() int + StoreNextId(id int) +} + +type AdapterStateStorage interface { + StoreClusterAutoscalerActive(active bool) + IsClusterAutoscalerActive() bool + StoreClusterAutoscalingDone(done bool) + IsClusterAutoscalingDone() bool +} diff --git a/pkg/storage/buffer.go b/pkg/storage/buffer.go new file mode 100644 index 0000000..09a8224 --- /dev/null +++ b/pkg/storage/buffer.go @@ -0,0 +1,10 @@ +package storage + +type Buffer[T any] interface { + Size() int + Empty() bool + Items() []T + Clear() []T + Put(newItem T) + PutAll(newItems []T) +} diff --git a/pkg/storage/channelwrapper.go b/pkg/storage/channelwrapper.go new file mode 100644 index 0000000..205b06a --- /dev/null +++ b/pkg/storage/channelwrapper.go @@ -0,0 +1,6 @@ +package storage + +type ChannelWrapper[T any] interface { + InitChannel() chan T + Get() chan T +} diff --git a/pkg/storage/cluster.go b/pkg/storage/cluster.go deleted file mode 100644 index 78a78f8..0000000 --- a/pkg/storage/cluster.go +++ /dev/null @@ -1,44 +0,0 @@ -package storage - -import ( - core "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "kube-rise/internal/broadcast" - "net/http" - cluster "sigs.k8s.io/cluster-api/api/v1beta1" -) - -type MachineSetStorage interface { - // Stores a machineset list in the storage - StoreMachineSets(ms cluster.MachineSetList, events []metav1.WatchEvent) - // Returns the current machinesets - GetMachineSets() (cluster.MachineSetList, *broadcast.BroadcastServer[metav1.WatchEvent]) - // Returns the current scale of the machineset - GetMachineSetsScale(w http.ResponseWriter, r *http.Request) - // Puts the current scale of the machineset - PutMachineSetsScale(w http.ResponseWriter, r *http.Request) - // Is a upscaling possible on any MachineSet - IsUpscalingPossible() bool - // Is a dowscaling possible on any MachineSet - IsDownscalingPossible() bool -} - -type MachineStorage interface { - // Stores a machineset list in the storage - StoreMachines(ms cluster.MachineList, events []metav1.WatchEvent) - // Returns the current machinesets - GetMachines() (cluster.MachineList, *broadcast.BroadcastServer[metav1.WatchEvent]) - // Gets a single machine - GetMachine(w http.ResponseWriter, r *http.Request) - // Puts a machine - PutMachine(w http.ResponseWriter, r *http.Request) - // Scales machines - ScaleMachines(machineSet cluster.MachineSet, changedNodes []core.Node, amount int) ([]cluster.Machine, error) -} - -type StatusConfigMapStorage interface { - // Stores the status config map - StoreStatusConfigMap(w http.ResponseWriter, r *http.Request) - // Returns the current status config map - GetStatusConfigMap() core.ConfigMap -} diff --git a/pkg/storage/core.go b/pkg/storage/core.go deleted file mode 100644 index 5cc16a1..0000000 --- a/pkg/storage/core.go +++ /dev/null @@ -1,46 +0,0 @@ -package storage - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "kube-rise/internal/broadcast" - "net/http" - cluster "sigs.k8s.io/cluster-api/api/v1beta1" -) - -type PodStorage interface { - // Stores a podlist in the storage - StorePods(pods v1.PodList, events []metav1.WatchEvent) - // Retrieves the current podList from the storage - GetPods() (v1.PodList, *broadcast.BroadcastServer[metav1.WatchEvent]) - // UpdatePodStatus(pod v1.Pod) - DeletePods(events []metav1.WatchEvent) - // Binds a pod to a node - BindPod(podIndex int, nodeName string) - // Reacts on scheduling fail - FailedPod(podIndex int, status v1.PodStatus) -} - -type NodeStorage interface { - // Stores a nodelist in the storage - StoreNodes(nodes v1.NodeList, events []metav1.WatchEvent) - // Retrieves the current nodeList from the storage - GetNodes() (v1.NodeList, *broadcast.BroadcastServer[metav1.WatchEvent]) - // Edits a node - PutNode(w http.ResponseWriter, r *http.Request) - // Gets a single node - GetNode(w http.ResponseWriter, r *http.Request) - // Scale nodes - ScaleNodes(addedMachines []cluster.Machine, amount int) ([]v1.Node, error) - // Channel for Node Upscaling - GetNodeUpscalingChannel() *broadcast.BroadcastServer[v1.Node] - // Channel for node downscaling - GetNodeDownscalingChannel() *broadcast.BroadcastServer[v1.Node] -} - -type NamespaceStorage interface { - // Returns the current namespaces - GetNamespaces() (v1.NamespaceList, *broadcast.BroadcastServer[metav1.WatchEvent]) - // Stores a namespace list in the storage - StoreNamespaces(ns v1.NamespaceList) -} diff --git a/pkg/storage/apps.go b/pkg/storage/daemonset.go similarity index 91% rename from pkg/storage/apps.go rename to pkg/storage/daemonset.go index 233a858..90639f2 100644 --- a/pkg/storage/apps.go +++ b/pkg/storage/daemonset.go @@ -1,9 +1,9 @@ package storage import ( + "go-kube/internal/broadcast" v1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "kube-rise/internal/broadcast" ) type DaemonSetStorage interface { diff --git a/pkg/storage/inmemorystorage/adapterstate.go b/pkg/storage/inmemorystorage/adapterstate.go new file mode 100644 index 0000000..3f346f4 --- /dev/null +++ b/pkg/storage/inmemorystorage/adapterstate.go @@ -0,0 +1,47 @@ +package inmemorystorage + +type IdInMemoryStorage struct { + nextId int +} + +func (s *IdInMemoryStorage) GetNextId() int { + return s.nextId +} + +func (s *IdInMemoryStorage) StoreNextId(id int) { + s.nextId = id +} + +func NewIdInMemoryStorage() IdInMemoryStorage { + return IdInMemoryStorage{ + nextId: 1, + } +} + +type AdapterStateInMemoryStorage struct { + clusterAutoscalerActive bool + clusterAutoscalingDone bool +} + +func (s *AdapterStateInMemoryStorage) StoreClusterAutoscalerActive(active bool) { + s.clusterAutoscalerActive = active +} + +func (s *AdapterStateInMemoryStorage) IsClusterAutoscalerActive() bool { + return s.clusterAutoscalerActive +} + +func (s *AdapterStateInMemoryStorage) StoreClusterAutoscalingDone(done bool) { + s.clusterAutoscalingDone = done +} + +func (s *AdapterStateInMemoryStorage) IsClusterAutoscalingDone() bool { + return s.clusterAutoscalingDone +} + +func NewAdapterStateInMemoryStorage() AdapterStateInMemoryStorage { + return AdapterStateInMemoryStorage{ + clusterAutoscalerActive: false, + clusterAutoscalingDone: true, + } +} diff --git a/pkg/storage/inmemorystorage/buffer.go b/pkg/storage/inmemorystorage/buffer.go new file mode 100644 index 0000000..f92c681 --- /dev/null +++ b/pkg/storage/inmemorystorage/buffer.go @@ -0,0 +1,42 @@ +package inmemorystorage + +type InMemBuffer[T any] struct { + buffer []T +} + +func (b *InMemBuffer[T]) Size() int { + return len(b.buffer) +} + +func (b *InMemBuffer[T]) Empty() bool { + return len(b.buffer) == 0 +} + +func (b *InMemBuffer[T]) Items() []T { + // Create a copy of the actual buffer + cpy := make([]T, len(b.buffer)) + copy(cpy, b.buffer) + return cpy +} + +func (b *InMemBuffer[T]) Clear() []T { + cpy := b.Items() + b.buffer = make([]T, 0) + return cpy +} + +func (b *InMemBuffer[T]) Put(newItem T) { + b.buffer = append(b.buffer, newItem) +} + +func (b *InMemBuffer[T]) PutAll(newItems []T) { + for _, item := range newItems { + b.Put(item) + } +} + +func NewInMemBuffer[T any]() InMemBuffer[T] { + return InMemBuffer[T]{ + buffer: make([]T, 0), + } +} diff --git a/pkg/storage/inmemorystorage/channelwrapper.go b/pkg/storage/inmemorystorage/channelwrapper.go new file mode 100644 index 0000000..e87af1d --- /dev/null +++ b/pkg/storage/inmemorystorage/channelwrapper.go @@ -0,0 +1,20 @@ +package inmemorystorage + +type InMemChannelWrapper[T any] struct { + channel chan T +} + +func (w *InMemChannelWrapper[T]) InitChannel() chan T { + w.channel = make(chan T) + return w.channel +} + +func (w *InMemChannelWrapper[T]) Get() chan T { + return w.channel +} + +func NewInMemChannelWrapper[T any]() InMemChannelWrapper[T] { + return InMemChannelWrapper[T]{ + channel: make(chan T), + } +} diff --git a/internal/inmemorystorage/apps.go b/pkg/storage/inmemorystorage/daemonset.go similarity index 97% rename from internal/inmemorystorage/apps.go rename to pkg/storage/inmemorystorage/daemonset.go index e2f262a..4a495c4 100644 --- a/internal/inmemorystorage/apps.go +++ b/pkg/storage/inmemorystorage/daemonset.go @@ -2,9 +2,9 @@ package inmemorystorage import ( "context" + "go-kube/internal/broadcast" apps "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "kube-rise/internal/broadcast" ) type DaemonSetInMemoryStorage struct { diff --git a/pkg/storage/inmemorystorage/machine.go b/pkg/storage/inmemorystorage/machine.go new file mode 100644 index 0000000..d733291 --- /dev/null +++ b/pkg/storage/inmemorystorage/machine.go @@ -0,0 +1,97 @@ +package inmemorystorage + +import ( + "context" + "go-kube/internal/broadcast" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/v2" + cluster "sigs.k8s.io/cluster-api/api/v1beta1" +) + +type MachineInMemoryStorage struct { + machines cluster.MachineList + machineEventChan chan metav1.WatchEvent + machineBroadcaster *broadcast.BroadcastServer[metav1.WatchEvent] + machineCount int +} + +func (s *MachineInMemoryStorage) GetMachines() (cluster.MachineList, *broadcast.BroadcastServer[metav1.WatchEvent]) { + return s.machines, s.machineBroadcaster +} + +func (s *MachineInMemoryStorage) StoreMachines(ms cluster.MachineList, events []metav1.WatchEvent) { + s.machines = ms + for _, n := range events { + s.machineEventChan <- n + if n.Type == "ADDED" { + s.IncrementMachineCount() + } + } +} + +func (s *MachineInMemoryStorage) GetMachine(machineName string) cluster.Machine { + var machineRef cluster.Machine + for _, ms := range s.machines.Items { + if ms.Name == machineName { + machineRef = ms + break + } + } + return machineRef +} + +func (s *MachineInMemoryStorage) PutMachine(machineName string, u cluster.Machine) cluster.Machine { + indexForReplacement := -1 + for index, machine := range s.machines.Items { + if machine.Name == machineName { + indexForReplacement = index + break + } + } + s.machines.Items[indexForReplacement] = u + return u +} + +func (s *MachineInMemoryStorage) AddMachine(machine cluster.Machine) { + s.machines.Items = append(s.machines.Items, machine) + // Fire watch event + machineAddEvent := metav1.WatchEvent{Type: "ADDED", Object: runtime.RawExtension{Object: &machine}} + s.machineEventChan <- machineAddEvent +} + +func (s *MachineInMemoryStorage) DeleteMachine(machineName string) cluster.Machine { + var index int + var deletedMachine cluster.Machine + for i, machine := range s.machines.Items { + if machine.Name == machineName { + index = i + deletedMachine = machine + break + } + } + s.machines.Items[index] = s.machines.Items[len(s.machines.Items)-1] + s.machines.Items = s.machines.Items[:len(s.machines.Items)-1] + // Fire deleted event + s.machineEventChan <- metav1.WatchEvent{Type: "DELETED", Object: runtime.RawExtension{Object: &s.machines.Items[index]}} + return deletedMachine +} + +func (s *MachineInMemoryStorage) IncrementMachineCount() { + s.machineCount = s.machineCount + 1 + klog.V(4).Infof("Incremented machine count to %d", s.machineCount) +} + +func (s *MachineInMemoryStorage) GetMachineCount() int { + return s.machineCount +} + +func NewMachineInMemoryStorage() MachineInMemoryStorage { + machineEventChan := make(chan metav1.WatchEvent, 500) + return MachineInMemoryStorage{ + machines: cluster.MachineList{TypeMeta: metav1.TypeMeta{Kind: "MachineList", APIVersion: "cluster.x-k8s.io/v1beta1"}, Items: nil}, + machineEventChan: machineEventChan, + machineBroadcaster: broadcast.NewBroadcastServer(context.TODO(), "MachineBroadcaster", machineEventChan), + machineCount: 0, + } +} diff --git a/pkg/storage/inmemorystorage/machineset.go b/pkg/storage/inmemorystorage/machineset.go new file mode 100644 index 0000000..76dd7a4 --- /dev/null +++ b/pkg/storage/inmemorystorage/machineset.go @@ -0,0 +1,104 @@ +package inmemorystorage + +import ( + "context" + "go-kube/internal/broadcast" + "strconv" + + v1 "k8s.io/api/autoscaling/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + cluster "sigs.k8s.io/cluster-api/api/v1beta1" +) + +type MachineSetsInMemoryStorage struct { + machineSets cluster.MachineSetList + machineSetsEventChan chan metav1.WatchEvent + nodeStorage *NodeInMemoryStorage + machineStorage *MachineInMemoryStorage + machineSetBroadcaster *broadcast.BroadcastServer[metav1.WatchEvent] +} + +func (s *MachineSetsInMemoryStorage) GetMachineSets() (cluster.MachineSetList, *broadcast.BroadcastServer[metav1.WatchEvent]) { + return s.machineSets, s.machineSetBroadcaster +} + +func (s *MachineSetsInMemoryStorage) StoreMachineSets(ms cluster.MachineSetList, events []metav1.WatchEvent) { + s.machineSets = ms + for _, e := range events { + s.machineSetsEventChan <- e + } +} + +func (s *MachineSetsInMemoryStorage) GetMachineSet(machineSetName string) cluster.MachineSet { + var machineSet cluster.MachineSet + for _, set := range s.machineSets.Items { + if set.Name == machineSetName { + machineSet = set + break + } + } + return machineSet +} + +func (s *MachineSetsInMemoryStorage) PutMachineSet(machineSetName string, machineSet cluster.MachineSet) cluster.MachineSet { + index := -1 + for i, set := range s.machineSets.Items { + if set.Name == machineSetName { + index = i + break + } + } + s.machineSets.Items[index] = machineSet + // Fire MODIFIED event + s.machineSetsEventChan <- metav1.WatchEvent{Type: "MODIFIED", Object: runtime.RawExtension{Object: &machineSet}} + return machineSet +} + +func (s *MachineSetsInMemoryStorage) GetMachineSetsScale(machineSetName string) v1.Scale { + var machineSetRef cluster.MachineSet + for _, ms := range s.machineSets.Items { + if ms.Name == machineSetName { + machineSetRef = ms + break + } + } + + result := v1.Scale{TypeMeta: metav1.TypeMeta{APIVersion: "autoscaling/v1", Kind: "Scale"}, + ObjectMeta: metav1.ObjectMeta{Name: machineSetName}, + Spec: v1.ScaleSpec{Replicas: *machineSetRef.Spec.Replicas}, + Status: v1.ScaleStatus{Replicas: *machineSetRef.Spec.Replicas}} + + return result +} + +func (s *MachineSetsInMemoryStorage) IsUpscalingPossible() bool { + for _, ms := range s.machineSets.Items { + maxSize, _ := strconv.Atoi(ms.Annotations["cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size"]) + if *ms.Spec.Replicas < int32(maxSize) { + return true + } + } + return false +} + +func (s *MachineSetsInMemoryStorage) IsDownscalingPossible() bool { + for _, ms := range s.machineSets.Items { + minSize, _ := strconv.Atoi(ms.Annotations["cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size"]) + if *ms.Spec.Replicas > int32(minSize) { + return true + } + } + return false +} + +func NewMachineSetInMemoryStorage(nodeStorage *NodeInMemoryStorage, machineStorage *MachineInMemoryStorage) MachineSetsInMemoryStorage { + machineSetsEventChan := make(chan metav1.WatchEvent, 500) + return MachineSetsInMemoryStorage{ + machineSets: cluster.MachineSetList{TypeMeta: metav1.TypeMeta{Kind: "MachineSetList", APIVersion: "cluster-x.k8s.io/v1beta1"}, Items: nil}, + machineSetsEventChan: machineSetsEventChan, + nodeStorage: nodeStorage, + machineStorage: machineStorage, + machineSetBroadcaster: broadcast.NewBroadcastServer(context.TODO(), "MachineSetBroadcaster", machineSetsEventChan), + } +} diff --git a/pkg/storage/inmemorystorage/namespace.go b/pkg/storage/inmemorystorage/namespace.go new file mode 100644 index 0000000..46513e9 --- /dev/null +++ b/pkg/storage/inmemorystorage/namespace.go @@ -0,0 +1,45 @@ +package inmemorystorage + +import ( + "context" + "go-kube/internal/broadcast" + core "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type NamespaceInMemoryStorage struct { + namespaces core.NamespaceList + namespaceEventChan chan metav1.WatchEvent + namespaceBroadcaster *broadcast.BroadcastServer[metav1.WatchEvent] +} + +func (s *NamespaceInMemoryStorage) GetNamespaces() (core.NamespaceList, *broadcast.BroadcastServer[metav1.WatchEvent]) { + return s.namespaces, s.namespaceBroadcaster +} + +func (s *NamespaceInMemoryStorage) StoreNamespaces(namespaces core.NamespaceList) { + s.namespaces = namespaces +} + +func (s *NamespaceInMemoryStorage) GetNamespace(namespaceName string) core.Namespace { + var u core.Namespace + for _, element := range s.namespaces.Items { + if namespaceName == element.Name { + u = element + break + } + } + return u +} + +func NewNamespaceInMemoryStorage() NamespaceInMemoryStorage { + var namespace core.Namespace + namespace.SetName("default") + namespace.Status = core.NamespaceStatus{Phase: "Active"} + namespaceEventChan := make(chan metav1.WatchEvent) + return NamespaceInMemoryStorage{ + namespaces: core.NamespaceList{TypeMeta: metav1.TypeMeta{Kind: "NamespaceList", APIVersion: "v1"}, Items: []core.Namespace{namespace}}, + namespaceEventChan: namespaceEventChan, + namespaceBroadcaster: broadcast.NewBroadcastServer(context.TODO(), "NamespaceBroadcaster", namespaceEventChan), + } +} diff --git a/pkg/storage/inmemorystorage/node.go b/pkg/storage/inmemorystorage/node.go new file mode 100644 index 0000000..d1607ca --- /dev/null +++ b/pkg/storage/inmemorystorage/node.go @@ -0,0 +1,120 @@ +package inmemorystorage + +import ( + "context" + "go-kube/internal/broadcast" + "go-kube/pkg/storage" + core "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +type NodeInMemoryStorage struct { + nodes core.NodeList + nodeEventChan chan metav1.WatchEvent + nodeBroadcaster *broadcast.BroadcastServer[metav1.WatchEvent] + nodeUpscalingChan chan core.Node + nodeDownscalingChan chan core.Node + nodeUpscalingBroadcaster *broadcast.BroadcastServer[core.Node] + nodeDownscalingBroadcaster *broadcast.BroadcastServer[core.Node] + newNodes InMemBuffer[core.Node] + deletedNodes InMemBuffer[core.Node] +} + +func (s *NodeInMemoryStorage) GetNodes() (core.NodeList, *broadcast.BroadcastServer[metav1.WatchEvent]) { + return s.nodes, s.nodeBroadcaster +} + +func (s *NodeInMemoryStorage) StoreNodes(nodes core.NodeList, events []metav1.WatchEvent) { + s.nodes = nodes + for _, n := range events { + s.nodeEventChan <- n + } +} + +func (s *NodeInMemoryStorage) PutNode(nodeName string, node core.Node) core.Node { + indexForReplacement := -1 + for index, node := range s.nodes.Items { + if node.Name == nodeName { + indexForReplacement = index + break + } + } + s.nodes.Items[indexForReplacement] = node + return node +} + +func (s *NodeInMemoryStorage) GetNode(name string) core.Node { + var u core.Node + + for _, node := range s.nodes.Items { + if node.Name == name { + u = node + break + } + } + + return u +} + +func (s *NodeInMemoryStorage) AddNode(node core.Node) { + s.nodes.Items = append(s.nodes.Items, node) + // Fire added event + nodeAddEvent := metav1.WatchEvent{Type: "ADDED", Object: runtime.RawExtension{Object: &node}} + s.nodeEventChan <- nodeAddEvent + s.nodeUpscalingChan <- node +} + +func (s *NodeInMemoryStorage) DeleteNode(nodeName string) core.Node { + // Get index + var index int + var deletedNode core.Node + for i, node := range s.nodes.Items { + if node.Name == nodeName { + index = i + deletedNode = node + break + } + } + s.nodes.Items[index] = s.nodes.Items[len(s.nodes.Items)-1] + s.nodes.Items = s.nodes.Items[:len(s.nodes.Items)-1] + // Fire event + nodeDeleteEvent := metav1.WatchEvent{Type: "DELETED", Object: runtime.RawExtension{Object: &deletedNode}} + s.nodeEventChan <- nodeDeleteEvent + s.nodeDownscalingChan <- deletedNode + return deletedNode +} + +func (s *NodeInMemoryStorage) GetNodeUpscalingChannel() *broadcast.BroadcastServer[core.Node] { + return s.nodeUpscalingBroadcaster +} + +func (s *NodeInMemoryStorage) GetNodeDownscalingChannel() *broadcast.BroadcastServer[core.Node] { + return s.nodeDownscalingBroadcaster +} + +func (s *NodeInMemoryStorage) NewNodes() storage.Buffer[core.Node] { + return &s.newNodes +} + +func (s *NodeInMemoryStorage) DeletedNodes() storage.Buffer[core.Node] { + return &s.deletedNodes +} + +func NewNodeInMemoryStorage() NodeInMemoryStorage { + nodeEventChan := make(chan metav1.WatchEvent, 500) + nodeUpscalingChan := make(chan core.Node) + nodeDownscalingChan := make(chan core.Node) + return NodeInMemoryStorage{ + nodes: core.NodeList{TypeMeta: metav1.TypeMeta{Kind: "NodeList", APIVersion: "v1"}, Items: nil}, + nodeEventChan: nodeEventChan, + nodeBroadcaster: broadcast.NewBroadcastServer(context.TODO(), "NodeBroadcaster", nodeEventChan), + nodeUpscalingChan: nodeUpscalingChan, + nodeDownscalingChan: nodeDownscalingChan, + nodeDownscalingBroadcaster: broadcast.NewBroadcastServer(context.TODO(), "NodeDownscalingBroadcaster", nodeDownscalingChan), + nodeUpscalingBroadcaster: broadcast.NewBroadcastServer(context.TODO(), "NodeUpscalingBroadcaster", nodeUpscalingChan), + + newNodes: NewInMemBuffer[core.Node](), + deletedNodes: NewInMemBuffer[core.Node](), + } +} diff --git a/pkg/storage/inmemorystorage/pod.go b/pkg/storage/inmemorystorage/pod.go new file mode 100644 index 0000000..df38314 --- /dev/null +++ b/pkg/storage/inmemorystorage/pod.go @@ -0,0 +1,119 @@ +package inmemorystorage + +import ( + "context" + "go-kube/internal/broadcast" + "go-kube/pkg/misim" + storage2 "go-kube/pkg/storage" + core "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/v2" + "sync" +) + +type PodInMemoryStorage struct { + mu sync.Mutex + + pods core.PodList + podEventChan chan metav1.WatchEvent + podBroadcaster *broadcast.BroadcastServer[metav1.WatchEvent] + nextResourceId int + + failedPodBuffer InMemBuffer[misim.BindingFailureInformation] + bindedPodBuffer InMemBuffer[misim.BindingInformation] + podsToBePlaced InMemBuffer[core.Pod] + podsUpdateChannel InMemChannelWrapper[misim.PodsUpdateResponse] +} + +func (s *PodInMemoryStorage) BeginTransaction() { + s.mu.Lock() +} + +func (s *PodInMemoryStorage) EndTransaction() { + s.mu.Unlock() +} + +func (s *PodInMemoryStorage) GetPods() (core.PodList, *broadcast.BroadcastServer[metav1.WatchEvent]) { + return s.pods, s.podBroadcaster +} + +func (s *PodInMemoryStorage) StorePods(pods core.PodList, events []metav1.WatchEvent) { + s.pods = pods + for _, e := range events { + s.podEventChan <- e + } +} + +func (s *PodInMemoryStorage) DeletePods(events []metav1.WatchEvent) { + s.pods = core.PodList{} + for _, e := range events { + e.Type = "DELETED" + s.podEventChan <- e + } +} + +func (s *PodInMemoryStorage) GetPod(podName string) core.Pod { + var u core.Pod + for _, element := range s.pods.Items { + if element.Name == podName { + klog.V(8).Infof("Found pod %s", podName) + u = element + } + } + return u +} + +func (s *PodInMemoryStorage) UpdatePod(podName string, newValues core.Pod) { + // Find index, and replace + var index int = -1 + for i, element := range s.pods.Items { + if element.Name == podName { + klog.V(8).Info("Found pod %s\n", podName) + index = i + break + } + } + if index != -1 { + // Found in list => update + s.pods.Items[index] = newValues + + // Fire modified watch event + s.podEventChan <- metav1.WatchEvent{ + Type: "MODIFIED", + Object: runtime.RawExtension{Object: &s.pods.Items[index]}, + } + } + // else do nothing +} + +func (s *PodInMemoryStorage) FailedPodBuffer() storage2.Buffer[misim.BindingFailureInformation] { + return &s.failedPodBuffer +} + +func (s *PodInMemoryStorage) BindedPodBuffer() storage2.Buffer[misim.BindingInformation] { + return &s.bindedPodBuffer +} + +func (s *PodInMemoryStorage) PodsToBePlaced() storage2.Buffer[core.Pod] { + return &s.podsToBePlaced +} + +func (s *PodInMemoryStorage) PodsUpdateChannel() storage2.ChannelWrapper[misim.PodsUpdateResponse] { + return &s.podsUpdateChannel +} + +func NewPodInMemoryStorage() PodInMemoryStorage { + podEventChan := make(chan metav1.WatchEvent, 500) + return PodInMemoryStorage{ + pods: core.PodList{TypeMeta: metav1.TypeMeta{Kind: "PodList", APIVersion: "v1"}, Items: nil}, + podEventChan: podEventChan, + podBroadcaster: broadcast.NewBroadcastServer(context.TODO(), "PodBroadcaster", podEventChan), + nextResourceId: 1, + + failedPodBuffer: NewInMemBuffer[misim.BindingFailureInformation](), + bindedPodBuffer: NewInMemBuffer[misim.BindingInformation](), + podsToBePlaced: NewInMemBuffer[core.Pod](), + podsUpdateChannel: NewInMemChannelWrapper[misim.PodsUpdateResponse](), + } +} diff --git a/pkg/storage/inmemorystorage/statusconfigmap.go b/pkg/storage/inmemorystorage/statusconfigmap.go new file mode 100644 index 0000000..619aae0 --- /dev/null +++ b/pkg/storage/inmemorystorage/statusconfigmap.go @@ -0,0 +1,25 @@ +package inmemorystorage + +import ( + core "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type StatusConfigMapInMemoryStorage struct { + statusConfigMap core.ConfigMap +} + +func (s *StatusConfigMapInMemoryStorage) GetStatusConfigMap() core.ConfigMap { + return s.statusConfigMap +} + +func (s *StatusConfigMapInMemoryStorage) StoreStatusConfigMap(configMap core.ConfigMap) { + s.statusConfigMap = configMap +} + +func NewStatusMapInMemoryStorage() StatusConfigMapInMemoryStorage { + return StatusConfigMapInMemoryStorage{ + statusConfigMap: core.ConfigMap{TypeMeta: metav1.TypeMeta{Kind: "ConfigMap", APIVersion: "v1"}, + ObjectMeta: metav1.ObjectMeta{Name: "cluster-autoscaler-status", Namespace: "kube-system"}}, + } +} diff --git a/pkg/storage/machine.go b/pkg/storage/machine.go new file mode 100644 index 0000000..b7622bf --- /dev/null +++ b/pkg/storage/machine.go @@ -0,0 +1,25 @@ +package storage + +import ( + "go-kube/internal/broadcast" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + cluster "sigs.k8s.io/cluster-api/api/v1beta1" +) + +type MachineStorage interface { + // Stores a machineset list in the storage + StoreMachines(ms cluster.MachineList, events []metav1.WatchEvent) + // Returns the current machinesets + GetMachines() (cluster.MachineList, *broadcast.BroadcastServer[metav1.WatchEvent]) + // Gets a single machine + // GetMachine(w http.ResponseWriter, r *http.Request) + GetMachine(machineName string) cluster.Machine + // Deletes the machine + DeleteMachine(machineName string) cluster.Machine + AddMachine(cluster.Machine) + // Puts a machine + // PutMachine(w http.ResponseWriter, r *http.Request) + PutMachine(machineName string, machine cluster.Machine) cluster.Machine + IncrementMachineCount() + GetMachineCount() int +} diff --git a/pkg/storage/machineset.go b/pkg/storage/machineset.go new file mode 100644 index 0000000..3ddced3 --- /dev/null +++ b/pkg/storage/machineset.go @@ -0,0 +1,24 @@ +package storage + +import ( + "go-kube/internal/broadcast" + v1 "k8s.io/api/autoscaling/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + cluster "sigs.k8s.io/cluster-api/api/v1beta1" +) + +type MachineSetStorage interface { + // Stores a machineset list in the storage + StoreMachineSets(ms cluster.MachineSetList, events []metav1.WatchEvent) + // Returns the current machinesets + GetMachineSets() (cluster.MachineSetList, *broadcast.BroadcastServer[metav1.WatchEvent]) + // Finds machineset by name + GetMachineSet(machineSetName string) cluster.MachineSet + PutMachineSet(machineSetName string, machineSet cluster.MachineSet) cluster.MachineSet + // Is a upscaling possible on any MachineSet + IsUpscalingPossible() bool + // Is a dowscaling possible on any MachineSet + IsDownscalingPossible() bool + // Get scale + GetMachineSetsScale(machineSetName string) v1.Scale +} diff --git a/pkg/storage/namespace.go b/pkg/storage/namespace.go new file mode 100644 index 0000000..cc4e451 --- /dev/null +++ b/pkg/storage/namespace.go @@ -0,0 +1,16 @@ +package storage + +import ( + "go-kube/internal/broadcast" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type NamespaceStorage interface { + // Returns the current namespaces + GetNamespaces() (v1.NamespaceList, *broadcast.BroadcastServer[metav1.WatchEvent]) + // Stores a namespace list in the storage + StoreNamespaces(ns v1.NamespaceList) + // Returns a single namespace by name + GetNamespace(name string) v1.Namespace +} diff --git a/pkg/storage/node.go b/pkg/storage/node.go new file mode 100644 index 0000000..2fe4903 --- /dev/null +++ b/pkg/storage/node.go @@ -0,0 +1,30 @@ +package storage + +import ( + "go-kube/internal/broadcast" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type NodeStorage interface { + // Stores a nodelist in the storage + StoreNodes(nodes v1.NodeList, events []metav1.WatchEvent) + // Retrieves the current nodeList from the storage + GetNodes() (v1.NodeList, *broadcast.BroadcastServer[metav1.WatchEvent]) + // Edits a node + PutNode(name string, node v1.Node) v1.Node + // Gets a single node + GetNode(name string) v1.Node + // Deletes a node from the node list + DeleteNode(name string) v1.Node + // Adds a node + AddNode(v1.Node) + // Channel for Node Upscaling + GetNodeUpscalingChannel() *broadcast.BroadcastServer[v1.Node] + // Channel for node downscaling + GetNodeDownscalingChannel() *broadcast.BroadcastServer[v1.Node] + // New nodes that should be created because of updates + NewNodes() Buffer[v1.Node] + // Nodes that should be deleted + DeletedNodes() Buffer[v1.Node] +} diff --git a/pkg/storage/pod.go b/pkg/storage/pod.go new file mode 100644 index 0000000..e93cc15 --- /dev/null +++ b/pkg/storage/pod.go @@ -0,0 +1,34 @@ +package storage + +import ( + "go-kube/internal/broadcast" + "go-kube/pkg/misim" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type PodStorage interface { + BeginTransaction() + EndTransaction() + // Stores a podlist in the storage + StorePods(pods v1.PodList, events []metav1.WatchEvent) + // Retrieves the current podList from the storage + GetPods() (v1.PodList, *broadcast.BroadcastServer[metav1.WatchEvent]) + // UpdatePodStatus(pod v1.Pod) + DeletePods(events []metav1.WatchEvent) + // Get Pod by name + GetPod(podName string) v1.Pod + // Updates the pod with the passed name + // and triggers watch event + UpdatePod(podName string, newValues v1.Pod) + + // Buffer for failed pods + FailedPodBuffer() Buffer[misim.BindingFailureInformation] + // Buffer for binded pods + BindedPodBuffer() Buffer[misim.BindingInformation] + // Buffer for pods that should be placed + PodsToBePlaced() Buffer[v1.Pod] + + // Channel to return the pod update request when all pods are placed (or failed) + PodsUpdateChannel() ChannelWrapper[misim.PodsUpdateResponse] +} diff --git a/pkg/storage/statusconfigmap.go b/pkg/storage/statusconfigmap.go new file mode 100644 index 0000000..2b726ef --- /dev/null +++ b/pkg/storage/statusconfigmap.go @@ -0,0 +1,12 @@ +package storage + +import ( + core "k8s.io/api/core/v1" +) + +type StatusConfigMapStorage interface { + // Stores the status config map + StoreStatusConfigMap(core.ConfigMap) + // Returns the current status config map + GetStatusConfigMap() core.ConfigMap +} diff --git a/pkg/storage/container.go b/pkg/storage/storagecontainer.go similarity index 75% rename from pkg/storage/container.go rename to pkg/storage/storagecontainer.go index 95da40f..bc6cf9b 100644 --- a/pkg/storage/container.go +++ b/pkg/storage/storagecontainer.go @@ -8,4 +8,7 @@ type StorageContainer struct { Machines MachineStorage MachineSets MachineSetStorage StatusConfigMap StatusConfigMapStorage + PodIds IdStorage + MachineIds IdStorage + AdapterState AdapterStateStorage }