From 4e3c4c74c2ffff80fe7def5e258ca3b00c8aea8f Mon Sep 17 00:00:00 2001 From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com> Date: Tue, 15 Aug 2023 14:13:56 -0600 Subject: [PATCH 1/4] Add resource getters --- .../internal/kubelet/metadata.go | 174 +++++++++- .../internal/kubelet/metadata_test.go | 302 ++++++++++++++++++ 2 files changed, 475 insertions(+), 1 deletion(-) diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go b/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go index ab22853b22bc..4de0a00d177d 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "regexp" + "strconv" "strings" conventions "go.opentelemetry.io/collector/semconv/v1.6.1" @@ -50,15 +51,98 @@ type Metadata struct { Labels map[MetadataLabel]bool PodsMetadata *v1.PodList DetailedPVCResourceSetter func(rb *metadata.ResourceBuilder, volCacheID, volumeClaim, namespace string) error + PodResources map[string]resources + ContainerResources map[string]resources +} + +type resources struct { + cpuLimit float64 + cpuRequest float64 + memoryLimit float64 + memoryRequest float64 +} + +func getContainerResources(r *v1.ResourceRequirements) resources { + if r == nil { + return resources{} + } + var containerResource resources + cpuLimit, err := strconv.ParseFloat(r.Limits.Cpu().AsDec().String(), 64) + if err == nil { + containerResource.cpuLimit = cpuLimit + } + cpuRequest, err := strconv.ParseFloat(r.Requests.Cpu().AsDec().String(), 64) + if err == nil { + containerResource.cpuRequest = cpuRequest + } + memoryLimit, err := strconv.ParseFloat(r.Limits.Memory().AsDec().String(), 64) + if err == nil { + containerResource.memoryLimit = memoryLimit + } + memoryRequest, err := strconv.ParseFloat(r.Requests.Memory().AsDec().String(), 64) + if err == nil { + containerResource.memoryRequest = memoryRequest + } + return containerResource } func NewMetadata(labels []MetadataLabel, podsMetadata *v1.PodList, detailedPVCResourceSetter func(rb *metadata.ResourceBuilder, volCacheID, volumeClaim, namespace string) error) Metadata { - return Metadata{ + m := Metadata{ Labels: getLabelsMap(labels), PodsMetadata: podsMetadata, DetailedPVCResourceSetter: detailedPVCResourceSetter, + PodResources: make(map[string]resources, 0), + ContainerResources: make(map[string]resources, 0), } + + if podsMetadata != nil { + for _, pod := range podsMetadata.Items { + var podResource resources + allContainersCPULimitsDefined := true + allContainersCPURequestsDefined := true + allContainersMemoryLimitsDefined := true + allContainersMemoryRequestsDefined := true + for _, container := range pod.Spec.Containers { + containerResource := getContainerResources(&container.Resources) + + if allContainersCPULimitsDefined && containerResource.cpuLimit == 0 { + allContainersCPULimitsDefined = false + podResource.cpuLimit = 0 + } + if allContainersCPURequestsDefined && containerResource.cpuRequest == 0 { + allContainersCPURequestsDefined = false + podResource.cpuRequest = 0 + } + if allContainersMemoryLimitsDefined && containerResource.memoryLimit == 0 { + allContainersMemoryLimitsDefined = false + podResource.memoryLimit = 0 + } + if allContainersMemoryRequestsDefined && containerResource.memoryRequest == 0 { + allContainersMemoryRequestsDefined = false + podResource.memoryRequest = 0 + } + + if allContainersCPULimitsDefined { + podResource.cpuLimit += containerResource.cpuLimit + } + if allContainersCPURequestsDefined { + podResource.cpuRequest += containerResource.cpuRequest + } + if allContainersMemoryLimitsDefined { + podResource.memoryLimit += containerResource.memoryLimit + } + if allContainersMemoryRequestsDefined { + podResource.memoryRequest += containerResource.memoryRequest + } + + m.ContainerResources[string(pod.UID)+container.Name] = containerResource + } + m.PodResources[string(pod.UID)] = podResource + } + } + + return m } func getLabelsMap(metadataLabels []MetadataLabel) map[MetadataLabel]bool { @@ -151,3 +235,91 @@ func (m *Metadata) getPodVolume(podUID string, volumeName string) (v1.Volume, er return v1.Volume{}, fmt.Errorf("pod %q with volume %q not found in the fetched metadata", podUID, volumeName) } + +func (m *Metadata) getPodCPULimit(uid string) *float64 { + podResource, ok := m.PodResources[uid] + if !ok { + return nil + } + if podResource.cpuLimit > 0 { + return &podResource.cpuLimit + } + return nil +} + +func (m *Metadata) getContainerCPULimit(podUID string, containerName string) *float64 { + containerResource, ok := m.ContainerResources[podUID+containerName] + if !ok { + return nil + } + if containerResource.cpuLimit > 0 { + return &containerResource.cpuLimit + } + return nil +} + +func (m *Metadata) getPodCPURequest(uid string) *float64 { + podResource, ok := m.PodResources[uid] + if !ok { + return nil + } + if podResource.cpuRequest > 0 { + return &podResource.cpuRequest + } + return nil +} + +func (m *Metadata) getContainerCPURequest(podUID string, containerName string) *float64 { + containerResource, ok := m.ContainerResources[podUID+containerName] + if !ok { + return nil + } + if containerResource.cpuRequest > 0 { + return &containerResource.cpuRequest + } + return nil +} + +func (m *Metadata) getPodMemoryLimit(uid string) *float64 { + podResource, ok := m.PodResources[uid] + if !ok { + return nil + } + if podResource.memoryLimit > 0 { + return &podResource.memoryLimit + } + return nil +} + +func (m *Metadata) getContainerMemoryLimit(podUID string, containerName string) *float64 { + containerResource, ok := m.ContainerResources[podUID+containerName] + if !ok { + return nil + } + if containerResource.memoryLimit > 0 { + return &containerResource.memoryLimit + } + return nil +} + +func (m *Metadata) getPodMemoryRequest(uid string) *float64 { + podResource, ok := m.PodResources[uid] + if !ok { + return nil + } + if podResource.memoryRequest > 0 { + return &podResource.memoryRequest + } + return nil +} + +func (m *Metadata) getContainerMemoryRequest(podUID string, containerName string) *float64 { + containerResource, ok := m.ContainerResources[podUID+containerName] + if !ok { + return nil + } + if containerResource.memoryRequest > 0 { + return &containerResource.memoryRequest + } + return nil +} diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go index 396c4a32c73e..d9e5db204ab9 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" + k8sresource "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1" @@ -386,3 +387,304 @@ func TestSetExtraLabelsForVolumeTypes(t *testing.T) { }) } } + +func floatp(f float64) *float64 { + return &f +} + +// Test happy paths for volume type metadata. +func TestCpuAndMemoryGetters(t *testing.T) { + + tests := []struct { + name string + metadata Metadata + podUID string + containerName string + wantPodCPULimit *float64 + wantPodCPURequest *float64 + wantContainerCPULimit *float64 + wantContainerCPURequest *float64 + wantPodMemoryLimit *float64 + wantPodMemoryRequest *float64 + wantContainerMemoryLimit *float64 + wantContainerMemoryRequest *float64 + }{ + { + name: "no metadata", + metadata: NewMetadata([]MetadataLabel{}, nil, nil), + }, + { + name: "pod happy path", + metadata: NewMetadata([]MetadataLabel{}, &v1.PodList{ + Items: []v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + UID: "uid-1234", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "container-1", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: k8sresource.MustParse("1"), + v1.ResourceMemory: k8sresource.MustParse("1G"), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: k8sresource.MustParse("1"), + v1.ResourceMemory: k8sresource.MustParse("1G"), + }, + }, + }, + { + Name: "container-2", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: k8sresource.MustParse("2"), + v1.ResourceMemory: k8sresource.MustParse("3G"), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: k8sresource.MustParse("2"), + v1.ResourceMemory: k8sresource.MustParse("3G"), + }, + }, + }, + }, + }, + }, + }, + }, nil), + podUID: "uid-1234", + containerName: "container-2", + wantPodCPULimit: floatp(3), + wantPodCPURequest: floatp(3), + wantContainerCPULimit: floatp(2), + wantContainerCPURequest: floatp(2), + wantPodMemoryLimit: floatp(4000000000), + wantPodMemoryRequest: floatp(4000000000), + wantContainerMemoryLimit: floatp(3000000000), + wantContainerMemoryRequest: floatp(3000000000), + }, + { + name: "unknown pod", + metadata: NewMetadata([]MetadataLabel{}, &v1.PodList{ + Items: []v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + UID: "uid-1234", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "container-1", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: k8sresource.MustParse("1"), + v1.ResourceMemory: k8sresource.MustParse("1G"), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: k8sresource.MustParse("1"), + v1.ResourceMemory: k8sresource.MustParse("1G"), + }, + }, + }, + { + Name: "container-2", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: k8sresource.MustParse("2"), + v1.ResourceMemory: k8sresource.MustParse("3G"), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: k8sresource.MustParse("2"), + v1.ResourceMemory: k8sresource.MustParse("3G"), + }, + }, + }, + }, + }, + }, + }, + }, nil), + podUID: "uid-12345", + }, + { + name: "unknown container", + metadata: NewMetadata([]MetadataLabel{}, &v1.PodList{ + Items: []v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + UID: "uid-1234", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "container-1", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: k8sresource.MustParse("1"), + v1.ResourceMemory: k8sresource.MustParse("1G"), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: k8sresource.MustParse("1"), + v1.ResourceMemory: k8sresource.MustParse("1G"), + }, + }, + }, + { + Name: "container-2", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: k8sresource.MustParse("2"), + v1.ResourceMemory: k8sresource.MustParse("3G"), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: k8sresource.MustParse("2"), + v1.ResourceMemory: k8sresource.MustParse("3G"), + }, + }, + }, + }, + }, + }, + }, + }, nil), + podUID: "uid-1234", + containerName: "container-3", + wantPodCPULimit: floatp(3), + wantPodCPURequest: floatp(3), + wantPodMemoryLimit: floatp(4000000000), + wantPodMemoryRequest: floatp(4000000000), + }, + { + name: "container limit not set", + metadata: NewMetadata([]MetadataLabel{}, &v1.PodList{ + Items: []v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + UID: "uid-1234", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "container-1", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: k8sresource.MustParse("1"), + v1.ResourceMemory: k8sresource.MustParse("1G"), + }, + }, + }, + { + Name: "container-2", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: k8sresource.MustParse("1"), + v1.ResourceMemory: k8sresource.MustParse("1G"), + }, + }, + }, + }, + }, + }, + }, + }, nil), + podUID: "uid-1234", + containerName: "container-2", + wantPodCPURequest: floatp(2), + wantContainerCPURequest: floatp(1), + wantPodMemoryRequest: floatp(2000000000), + wantContainerMemoryRequest: floatp(1000000000), + }, + { + name: "container request not set", + metadata: NewMetadata([]MetadataLabel{}, &v1.PodList{ + Items: []v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + UID: "uid-1234", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "container-1", + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceCPU: k8sresource.MustParse("1"), + v1.ResourceMemory: k8sresource.MustParse("1G"), + }, + }, + }, + { + Name: "container-2", + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceCPU: k8sresource.MustParse("1"), + v1.ResourceMemory: k8sresource.MustParse("1G"), + }, + }, + }, + }, + }, + }, + }, + }, nil), + podUID: "uid-1234", + containerName: "container-2", + wantPodCPULimit: floatp(2), + wantContainerCPULimit: floatp(1), + wantPodMemoryLimit: floatp(2000000000), + wantContainerMemoryLimit: floatp(1000000000), + }, + { + name: "container limit not set but other is", + metadata: NewMetadata([]MetadataLabel{}, &v1.PodList{ + Items: []v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + UID: "uid-1234", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "container-1", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: k8sresource.MustParse("1"), + v1.ResourceMemory: k8sresource.MustParse("1G"), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: k8sresource.MustParse("1"), + v1.ResourceMemory: k8sresource.MustParse("1G"), + }, + }, + }, + { + Name: "container-2", + }, + }, + }, + }, + }, + }, nil), + podUID: "uid-1234", + containerName: "container-1", + wantContainerCPULimit: floatp(1), + wantContainerCPURequest: floatp(1), + wantContainerMemoryLimit: floatp(1000000000), + wantContainerMemoryRequest: floatp(1000000000), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require.Equal(t, tt.wantPodCPULimit, tt.metadata.getPodCPULimit(tt.podUID)) + require.Equal(t, tt.wantPodCPURequest, tt.metadata.getPodCPURequest(tt.podUID)) + require.Equal(t, tt.wantContainerCPULimit, tt.metadata.getContainerCPULimit(tt.podUID, tt.containerName)) + require.Equal(t, tt.wantContainerCPURequest, tt.metadata.getContainerCPURequest(tt.podUID, tt.containerName)) + require.Equal(t, tt.wantPodMemoryLimit, tt.metadata.getPodMemoryLimit(tt.podUID)) + require.Equal(t, tt.wantPodMemoryRequest, tt.metadata.getPodMemoryRequest(tt.podUID)) + require.Equal(t, tt.wantContainerMemoryLimit, tt.metadata.getContainerMemoryLimit(tt.podUID, tt.containerName)) + require.Equal(t, tt.wantContainerMemoryRequest, tt.metadata.getContainerMemoryRequest(tt.podUID, tt.containerName)) + }) + } +} From 6d29614b8410cf50cd4bcc4abc68ae81fe1d2aa9 Mon Sep 17 00:00:00 2001 From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com> Date: Tue, 19 Sep 2023 11:37:30 -0600 Subject: [PATCH 2/4] Use better methods --- .../internal/kubelet/metadata.go | 38 +++++-------- .../internal/kubelet/metadata_test.go | 56 ++++++++++--------- 2 files changed, 43 insertions(+), 51 deletions(-) diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go b/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go index 4de0a00d177d..5dc15906d381 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" "regexp" - "strconv" "strings" conventions "go.opentelemetry.io/collector/semconv/v1.6.1" @@ -56,34 +55,23 @@ type Metadata struct { } type resources struct { - cpuLimit float64 cpuRequest float64 - memoryLimit float64 - memoryRequest float64 + cpuLimit float64 + memoryRequest int64 + memoryLimit int64 } func getContainerResources(r *v1.ResourceRequirements) resources { if r == nil { return resources{} } - var containerResource resources - cpuLimit, err := strconv.ParseFloat(r.Limits.Cpu().AsDec().String(), 64) - if err == nil { - containerResource.cpuLimit = cpuLimit - } - cpuRequest, err := strconv.ParseFloat(r.Requests.Cpu().AsDec().String(), 64) - if err == nil { - containerResource.cpuRequest = cpuRequest - } - memoryLimit, err := strconv.ParseFloat(r.Limits.Memory().AsDec().String(), 64) - if err == nil { - containerResource.memoryLimit = memoryLimit - } - memoryRequest, err := strconv.ParseFloat(r.Requests.Memory().AsDec().String(), 64) - if err == nil { - containerResource.memoryRequest = memoryRequest + + return resources{ + cpuRequest: r.Requests.Cpu().AsApproximateFloat64(), + cpuLimit: r.Limits.Cpu().AsApproximateFloat64(), + memoryRequest: r.Requests.Memory().Value(), + memoryLimit: r.Limits.Memory().Value(), } - return containerResource } func NewMetadata(labels []MetadataLabel, podsMetadata *v1.PodList, @@ -280,7 +268,7 @@ func (m *Metadata) getContainerCPURequest(podUID string, containerName string) * return nil } -func (m *Metadata) getPodMemoryLimit(uid string) *float64 { +func (m *Metadata) getPodMemoryLimit(uid string) *int64 { podResource, ok := m.PodResources[uid] if !ok { return nil @@ -291,7 +279,7 @@ func (m *Metadata) getPodMemoryLimit(uid string) *float64 { return nil } -func (m *Metadata) getContainerMemoryLimit(podUID string, containerName string) *float64 { +func (m *Metadata) getContainerMemoryLimit(podUID string, containerName string) *int64 { containerResource, ok := m.ContainerResources[podUID+containerName] if !ok { return nil @@ -302,7 +290,7 @@ func (m *Metadata) getContainerMemoryLimit(podUID string, containerName string) return nil } -func (m *Metadata) getPodMemoryRequest(uid string) *float64 { +func (m *Metadata) getPodMemoryRequest(uid string) *int64 { podResource, ok := m.PodResources[uid] if !ok { return nil @@ -313,7 +301,7 @@ func (m *Metadata) getPodMemoryRequest(uid string) *float64 { return nil } -func (m *Metadata) getContainerMemoryRequest(podUID string, containerName string) *float64 { +func (m *Metadata) getContainerMemoryRequest(podUID string, containerName string) *int64 { containerResource, ok := m.ContainerResources[podUID+containerName] if !ok { return nil diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go index d9e5db204ab9..9d20079b131b 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go @@ -388,6 +388,10 @@ func TestSetExtraLabelsForVolumeTypes(t *testing.T) { } } +func intp(i int64) *int64 { + return &i +} + func floatp(f float64) *float64 { return &f } @@ -404,10 +408,10 @@ func TestCpuAndMemoryGetters(t *testing.T) { wantPodCPURequest *float64 wantContainerCPULimit *float64 wantContainerCPURequest *float64 - wantPodMemoryLimit *float64 - wantPodMemoryRequest *float64 - wantContainerMemoryLimit *float64 - wantContainerMemoryRequest *float64 + wantPodMemoryLimit *int64 + wantPodMemoryRequest *int64 + wantContainerMemoryLimit *int64 + wantContainerMemoryRequest *int64 }{ { name: "no metadata", @@ -427,11 +431,11 @@ func TestCpuAndMemoryGetters(t *testing.T) { Name: "container-1", Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - v1.ResourceCPU: k8sresource.MustParse("1"), + v1.ResourceCPU: k8sresource.MustParse("100m"), v1.ResourceMemory: k8sresource.MustParse("1G"), }, Limits: v1.ResourceList{ - v1.ResourceCPU: k8sresource.MustParse("1"), + v1.ResourceCPU: k8sresource.MustParse("100m"), v1.ResourceMemory: k8sresource.MustParse("1G"), }, }, @@ -456,14 +460,14 @@ func TestCpuAndMemoryGetters(t *testing.T) { }, nil), podUID: "uid-1234", containerName: "container-2", - wantPodCPULimit: floatp(3), - wantPodCPURequest: floatp(3), + wantPodCPULimit: floatp(2.1), + wantPodCPURequest: floatp(2.1), wantContainerCPULimit: floatp(2), wantContainerCPURequest: floatp(2), - wantPodMemoryLimit: floatp(4000000000), - wantPodMemoryRequest: floatp(4000000000), - wantContainerMemoryLimit: floatp(3000000000), - wantContainerMemoryRequest: floatp(3000000000), + wantPodMemoryLimit: intp(4000000000), + wantPodMemoryRequest: intp(4000000000), + wantContainerMemoryLimit: intp(3000000000), + wantContainerMemoryRequest: intp(3000000000), }, { name: "unknown pod", @@ -522,11 +526,11 @@ func TestCpuAndMemoryGetters(t *testing.T) { Name: "container-1", Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - v1.ResourceCPU: k8sresource.MustParse("1"), + v1.ResourceCPU: k8sresource.MustParse("300m"), v1.ResourceMemory: k8sresource.MustParse("1G"), }, Limits: v1.ResourceList{ - v1.ResourceCPU: k8sresource.MustParse("1"), + v1.ResourceCPU: k8sresource.MustParse("300m"), v1.ResourceMemory: k8sresource.MustParse("1G"), }, }, @@ -535,11 +539,11 @@ func TestCpuAndMemoryGetters(t *testing.T) { Name: "container-2", Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - v1.ResourceCPU: k8sresource.MustParse("2"), + v1.ResourceCPU: k8sresource.MustParse("400m"), v1.ResourceMemory: k8sresource.MustParse("3G"), }, Limits: v1.ResourceList{ - v1.ResourceCPU: k8sresource.MustParse("2"), + v1.ResourceCPU: k8sresource.MustParse("400m"), v1.ResourceMemory: k8sresource.MustParse("3G"), }, }, @@ -551,10 +555,10 @@ func TestCpuAndMemoryGetters(t *testing.T) { }, nil), podUID: "uid-1234", containerName: "container-3", - wantPodCPULimit: floatp(3), - wantPodCPURequest: floatp(3), - wantPodMemoryLimit: floatp(4000000000), - wantPodMemoryRequest: floatp(4000000000), + wantPodCPULimit: floatp(0.7), + wantPodCPURequest: floatp(0.7), + wantPodMemoryLimit: intp(4000000000), + wantPodMemoryRequest: intp(4000000000), }, { name: "container limit not set", @@ -593,8 +597,8 @@ func TestCpuAndMemoryGetters(t *testing.T) { containerName: "container-2", wantPodCPURequest: floatp(2), wantContainerCPURequest: floatp(1), - wantPodMemoryRequest: floatp(2000000000), - wantContainerMemoryRequest: floatp(1000000000), + wantPodMemoryRequest: intp(2000000000), + wantContainerMemoryRequest: intp(1000000000), }, { name: "container request not set", @@ -633,8 +637,8 @@ func TestCpuAndMemoryGetters(t *testing.T) { containerName: "container-2", wantPodCPULimit: floatp(2), wantContainerCPULimit: floatp(1), - wantPodMemoryLimit: floatp(2000000000), - wantContainerMemoryLimit: floatp(1000000000), + wantPodMemoryLimit: intp(2000000000), + wantContainerMemoryLimit: intp(1000000000), }, { name: "container limit not set but other is", @@ -671,8 +675,8 @@ func TestCpuAndMemoryGetters(t *testing.T) { containerName: "container-1", wantContainerCPULimit: floatp(1), wantContainerCPURequest: floatp(1), - wantContainerMemoryLimit: floatp(1000000000), - wantContainerMemoryRequest: floatp(1000000000), + wantContainerMemoryLimit: intp(1000000000), + wantContainerMemoryRequest: intp(1000000000), }, } for _, tt := range tests { From 8cacce72da8a46063279c31d0dcafdcb1c3df9e3 Mon Sep 17 00:00:00 2001 From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com> Date: Mon, 25 Sep 2023 10:39:41 -0600 Subject: [PATCH 3/4] Use 0 to represent missing value --- .../internal/kubelet/metadata.go | 72 +++++++------------ .../internal/kubelet/metadata_test.go | 72 +++++++++---------- 2 files changed, 56 insertions(+), 88 deletions(-) diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go b/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go index 5dc15906d381..9d9eb4ea2b40 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go @@ -224,90 +224,66 @@ func (m *Metadata) getPodVolume(podUID string, volumeName string) (v1.Volume, er return v1.Volume{}, fmt.Errorf("pod %q with volume %q not found in the fetched metadata", podUID, volumeName) } -func (m *Metadata) getPodCPULimit(uid string) *float64 { +func (m *Metadata) getPodCPULimit(uid string) float64 { podResource, ok := m.PodResources[uid] if !ok { - return nil - } - if podResource.cpuLimit > 0 { - return &podResource.cpuLimit + return 0 } - return nil + return podResource.cpuLimit } -func (m *Metadata) getContainerCPULimit(podUID string, containerName string) *float64 { +func (m *Metadata) getContainerCPULimit(podUID string, containerName string) float64 { containerResource, ok := m.ContainerResources[podUID+containerName] if !ok { - return nil - } - if containerResource.cpuLimit > 0 { - return &containerResource.cpuLimit + return 0 } - return nil + return containerResource.cpuLimit } -func (m *Metadata) getPodCPURequest(uid string) *float64 { +func (m *Metadata) getPodCPURequest(uid string) float64 { podResource, ok := m.PodResources[uid] if !ok { - return nil - } - if podResource.cpuRequest > 0 { - return &podResource.cpuRequest + return 0 } - return nil + return podResource.cpuRequest } -func (m *Metadata) getContainerCPURequest(podUID string, containerName string) *float64 { +func (m *Metadata) getContainerCPURequest(podUID string, containerName string) float64 { containerResource, ok := m.ContainerResources[podUID+containerName] if !ok { - return nil - } - if containerResource.cpuRequest > 0 { - return &containerResource.cpuRequest + return 0 } - return nil + return containerResource.cpuRequest } -func (m *Metadata) getPodMemoryLimit(uid string) *int64 { +func (m *Metadata) getPodMemoryLimit(uid string) int64 { podResource, ok := m.PodResources[uid] if !ok { - return nil - } - if podResource.memoryLimit > 0 { - return &podResource.memoryLimit + return 0 } - return nil + return podResource.memoryLimit } -func (m *Metadata) getContainerMemoryLimit(podUID string, containerName string) *int64 { +func (m *Metadata) getContainerMemoryLimit(podUID string, containerName string) int64 { containerResource, ok := m.ContainerResources[podUID+containerName] if !ok { - return nil - } - if containerResource.memoryLimit > 0 { - return &containerResource.memoryLimit + return 0 } - return nil + return containerResource.memoryLimit } -func (m *Metadata) getPodMemoryRequest(uid string) *int64 { +func (m *Metadata) getPodMemoryRequest(uid string) int64 { podResource, ok := m.PodResources[uid] if !ok { - return nil - } - if podResource.memoryRequest > 0 { - return &podResource.memoryRequest + return 0 } - return nil + return podResource.memoryRequest } -func (m *Metadata) getContainerMemoryRequest(podUID string, containerName string) *int64 { +func (m *Metadata) getContainerMemoryRequest(podUID string, containerName string) int64 { containerResource, ok := m.ContainerResources[podUID+containerName] if !ok { - return nil - } - if containerResource.memoryRequest > 0 { - return &containerResource.memoryRequest + return 0 } - return nil + return containerResource.memoryRequest } diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go index 9d20079b131b..9c4cd8be3745 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go @@ -388,14 +388,6 @@ func TestSetExtraLabelsForVolumeTypes(t *testing.T) { } } -func intp(i int64) *int64 { - return &i -} - -func floatp(f float64) *float64 { - return &f -} - // Test happy paths for volume type metadata. func TestCpuAndMemoryGetters(t *testing.T) { @@ -404,14 +396,14 @@ func TestCpuAndMemoryGetters(t *testing.T) { metadata Metadata podUID string containerName string - wantPodCPULimit *float64 - wantPodCPURequest *float64 - wantContainerCPULimit *float64 - wantContainerCPURequest *float64 - wantPodMemoryLimit *int64 - wantPodMemoryRequest *int64 - wantContainerMemoryLimit *int64 - wantContainerMemoryRequest *int64 + wantPodCPULimit float64 + wantPodCPURequest float64 + wantContainerCPULimit float64 + wantContainerCPURequest float64 + wantPodMemoryLimit int64 + wantPodMemoryRequest int64 + wantContainerMemoryLimit int64 + wantContainerMemoryRequest int64 }{ { name: "no metadata", @@ -460,14 +452,14 @@ func TestCpuAndMemoryGetters(t *testing.T) { }, nil), podUID: "uid-1234", containerName: "container-2", - wantPodCPULimit: floatp(2.1), - wantPodCPURequest: floatp(2.1), - wantContainerCPULimit: floatp(2), - wantContainerCPURequest: floatp(2), - wantPodMemoryLimit: intp(4000000000), - wantPodMemoryRequest: intp(4000000000), - wantContainerMemoryLimit: intp(3000000000), - wantContainerMemoryRequest: intp(3000000000), + wantPodCPULimit: 2.1, + wantPodCPURequest: 2.1, + wantContainerCPULimit: 2, + wantContainerCPURequest: 2, + wantPodMemoryLimit: 4000000000, + wantPodMemoryRequest: 4000000000, + wantContainerMemoryLimit: 3000000000, + wantContainerMemoryRequest: 3000000000, }, { name: "unknown pod", @@ -555,10 +547,10 @@ func TestCpuAndMemoryGetters(t *testing.T) { }, nil), podUID: "uid-1234", containerName: "container-3", - wantPodCPULimit: floatp(0.7), - wantPodCPURequest: floatp(0.7), - wantPodMemoryLimit: intp(4000000000), - wantPodMemoryRequest: intp(4000000000), + wantPodCPULimit: 0.7, + wantPodCPURequest: 0.7, + wantPodMemoryLimit: 4000000000, + wantPodMemoryRequest: 4000000000, }, { name: "container limit not set", @@ -595,10 +587,10 @@ func TestCpuAndMemoryGetters(t *testing.T) { }, nil), podUID: "uid-1234", containerName: "container-2", - wantPodCPURequest: floatp(2), - wantContainerCPURequest: floatp(1), - wantPodMemoryRequest: intp(2000000000), - wantContainerMemoryRequest: intp(1000000000), + wantPodCPURequest: 2, + wantContainerCPURequest: 1, + wantPodMemoryRequest: 2000000000, + wantContainerMemoryRequest: 1000000000, }, { name: "container request not set", @@ -635,10 +627,10 @@ func TestCpuAndMemoryGetters(t *testing.T) { }, nil), podUID: "uid-1234", containerName: "container-2", - wantPodCPULimit: floatp(2), - wantContainerCPULimit: floatp(1), - wantPodMemoryLimit: intp(2000000000), - wantContainerMemoryLimit: intp(1000000000), + wantPodCPULimit: 2, + wantContainerCPULimit: 1, + wantPodMemoryLimit: 2000000000, + wantContainerMemoryLimit: 1000000000, }, { name: "container limit not set but other is", @@ -673,10 +665,10 @@ func TestCpuAndMemoryGetters(t *testing.T) { }, nil), podUID: "uid-1234", containerName: "container-1", - wantContainerCPULimit: floatp(1), - wantContainerCPURequest: floatp(1), - wantContainerMemoryLimit: intp(1000000000), - wantContainerMemoryRequest: intp(1000000000), + wantContainerCPULimit: 1, + wantContainerCPURequest: 1, + wantContainerMemoryLimit: 1000000000, + wantContainerMemoryRequest: 1000000000, }, } for _, tt := range tests { From 26cb5f80aa98047a425cd2ed0345711cfbe74e62 Mon Sep 17 00:00:00 2001 From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com> Date: Tue, 26 Sep 2023 12:21:09 -0600 Subject: [PATCH 4/4] use maps directly --- .../internal/kubelet/metadata.go | 76 ++----------------- .../internal/kubelet/metadata_test.go | 16 ++-- 2 files changed, 14 insertions(+), 78 deletions(-) diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go b/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go index 9d9eb4ea2b40..0bf8b385ff34 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go @@ -50,8 +50,8 @@ type Metadata struct { Labels map[MetadataLabel]bool PodsMetadata *v1.PodList DetailedPVCResourceSetter func(rb *metadata.ResourceBuilder, volCacheID, volumeClaim, namespace string) error - PodResources map[string]resources - ContainerResources map[string]resources + podResources map[string]resources + containerResources map[string]resources } type resources struct { @@ -80,8 +80,8 @@ func NewMetadata(labels []MetadataLabel, podsMetadata *v1.PodList, Labels: getLabelsMap(labels), PodsMetadata: podsMetadata, DetailedPVCResourceSetter: detailedPVCResourceSetter, - PodResources: make(map[string]resources, 0), - ContainerResources: make(map[string]resources, 0), + podResources: make(map[string]resources, 0), + containerResources: make(map[string]resources, 0), } if podsMetadata != nil { @@ -124,9 +124,9 @@ func NewMetadata(labels []MetadataLabel, podsMetadata *v1.PodList, podResource.memoryRequest += containerResource.memoryRequest } - m.ContainerResources[string(pod.UID)+container.Name] = containerResource + m.containerResources[string(pod.UID)+container.Name] = containerResource } - m.PodResources[string(pod.UID)] = podResource + m.podResources[string(pod.UID)] = podResource } } @@ -223,67 +223,3 @@ func (m *Metadata) getPodVolume(podUID string, volumeName string) (v1.Volume, er return v1.Volume{}, fmt.Errorf("pod %q with volume %q not found in the fetched metadata", podUID, volumeName) } - -func (m *Metadata) getPodCPULimit(uid string) float64 { - podResource, ok := m.PodResources[uid] - if !ok { - return 0 - } - return podResource.cpuLimit -} - -func (m *Metadata) getContainerCPULimit(podUID string, containerName string) float64 { - containerResource, ok := m.ContainerResources[podUID+containerName] - if !ok { - return 0 - } - return containerResource.cpuLimit -} - -func (m *Metadata) getPodCPURequest(uid string) float64 { - podResource, ok := m.PodResources[uid] - if !ok { - return 0 - } - return podResource.cpuRequest -} - -func (m *Metadata) getContainerCPURequest(podUID string, containerName string) float64 { - containerResource, ok := m.ContainerResources[podUID+containerName] - if !ok { - return 0 - } - return containerResource.cpuRequest -} - -func (m *Metadata) getPodMemoryLimit(uid string) int64 { - podResource, ok := m.PodResources[uid] - if !ok { - return 0 - } - return podResource.memoryLimit -} - -func (m *Metadata) getContainerMemoryLimit(podUID string, containerName string) int64 { - containerResource, ok := m.ContainerResources[podUID+containerName] - if !ok { - return 0 - } - return containerResource.memoryLimit -} - -func (m *Metadata) getPodMemoryRequest(uid string) int64 { - podResource, ok := m.PodResources[uid] - if !ok { - return 0 - } - return podResource.memoryRequest -} - -func (m *Metadata) getContainerMemoryRequest(podUID string, containerName string) int64 { - containerResource, ok := m.ContainerResources[podUID+containerName] - if !ok { - return 0 - } - return containerResource.memoryRequest -} diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go index 9c4cd8be3745..ae9309a375c0 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go @@ -673,14 +673,14 @@ func TestCpuAndMemoryGetters(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - require.Equal(t, tt.wantPodCPULimit, tt.metadata.getPodCPULimit(tt.podUID)) - require.Equal(t, tt.wantPodCPURequest, tt.metadata.getPodCPURequest(tt.podUID)) - require.Equal(t, tt.wantContainerCPULimit, tt.metadata.getContainerCPULimit(tt.podUID, tt.containerName)) - require.Equal(t, tt.wantContainerCPURequest, tt.metadata.getContainerCPURequest(tt.podUID, tt.containerName)) - require.Equal(t, tt.wantPodMemoryLimit, tt.metadata.getPodMemoryLimit(tt.podUID)) - require.Equal(t, tt.wantPodMemoryRequest, tt.metadata.getPodMemoryRequest(tt.podUID)) - require.Equal(t, tt.wantContainerMemoryLimit, tt.metadata.getContainerMemoryLimit(tt.podUID, tt.containerName)) - require.Equal(t, tt.wantContainerMemoryRequest, tt.metadata.getContainerMemoryRequest(tt.podUID, tt.containerName)) + require.Equal(t, tt.wantPodCPULimit, tt.metadata.podResources[tt.podUID].cpuLimit) + require.Equal(t, tt.wantPodCPURequest, tt.metadata.podResources[tt.podUID].cpuRequest) + require.Equal(t, tt.wantContainerCPULimit, tt.metadata.containerResources[tt.podUID+tt.containerName].cpuLimit) + require.Equal(t, tt.wantContainerCPURequest, tt.metadata.containerResources[tt.podUID+tt.containerName].cpuRequest) + require.Equal(t, tt.wantPodMemoryLimit, tt.metadata.podResources[tt.podUID].memoryLimit) + require.Equal(t, tt.wantPodMemoryRequest, tt.metadata.podResources[tt.podUID].memoryRequest) + require.Equal(t, tt.wantContainerMemoryLimit, tt.metadata.containerResources[tt.podUID+tt.containerName].memoryLimit) + require.Equal(t, tt.wantContainerMemoryRequest, tt.metadata.containerResources[tt.podUID+tt.containerName].memoryRequest) }) } }