Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

(feat):Add configurable configmap config to otelcollector config #1067

Merged
merged 1 commit into from
Nov 21, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 19 additions & 3 deletions controllers/cloud.redhat.com/providers/sidecar/default.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
cronjobProvider "github.com/RedHatInsights/clowder/controllers/cloud.redhat.com/providers/cronjob"
deployProvider "github.com/RedHatInsights/clowder/controllers/cloud.redhat.com/providers/deployment"
provutils "github.com/RedHatInsights/clowder/controllers/cloud.redhat.com/providers/utils"
"github.com/RedHatInsights/rhc-osdk-utils/utils"

apps "k8s.io/api/apps/v1"
batch "k8s.io/api/batch/v1"
Expand Down Expand Up @@ -47,9 +48,20 @@ func (sc *sidecarProvider) Provide(app *crd.ClowdApp) error {
}
case "otel-collector":
if sidecar.Enabled && sc.Env.Spec.Providers.Sidecars.OtelCollector.Enabled {
cont := getOtelCollector()
cont := getOtelCollector(app.Name)
if cont != nil {
d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, *cont)
innerDeployment.PodSpec.Volumes = append(innerDeployment.PodSpec.Volumes, core.Volume{
Name: fmt.Sprintf("%s-otel-config", app.Name),
VolumeSource: core.VolumeSource{
ConfigMap: &core.ConfigMapVolumeSource{
LocalObjectReference: core.LocalObjectReference{
Name: fmt.Sprintf("%s-otel-config", app.Name),
},
Optional: utils.TruePtr(),
},
},
})
}
}
default:
Expand Down Expand Up @@ -84,7 +96,7 @@ func (sc *sidecarProvider) Provide(app *crd.ClowdApp) error {
}
case "otel-collector":
if sidecar.Enabled && sc.Env.Spec.Providers.Sidecars.OtelCollector.Enabled {
cont := getOtelCollector()
cont := getOtelCollector(app.Name)
if cont != nil {
cj.Spec.JobTemplate.Spec.Template.Spec.Containers = append(cj.Spec.JobTemplate.Spec.Template.Spec.Containers, *cont)
}
Expand Down Expand Up @@ -145,7 +157,7 @@ func getTokenRefresher(appName string) *core.Container {
return &cont
}

func getOtelCollector() *core.Container {
func getOtelCollector(appName string) *core.Container {
cont := core.Container{}

cont.Name = "otel-collector"
Expand All @@ -164,5 +176,9 @@ func getOtelCollector() *core.Container {
"memory": resource.MustParse("1024Mi"),
},
}
cont.VolumeMounts = []core.VolumeMount{{
Name: fmt.Sprintf("%s-otel-config", appName),
MountPath: "/etc/otelcol/config.yaml",
}}
return &cont
}
228 changes: 228 additions & 0 deletions tests/kuttl/test-sidecars/01-pods.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,14 @@ spec:
deployments:
- name: processor
podSpec:
volumeMounts:
- name: puptoo-otel-config
mountPath: "/etc/otelcol/config.yaml"
readOnly: true
volumes:
- name: puptoo-otel-config
configMap:
name: puptoo-otel-config
image: quay.io/psav/clowder-hello
sidecars:
- name: token-refresher
Expand All @@ -57,6 +65,14 @@ spec:
schedule: "*/1 * * * *"
podSpec:
image: quay.io/psav/clowder-hello
volumeMounts:
- name: puptoo-otel-config
mountPath: "/etc/otelcol/config.yaml"
readOnly: true
volumes:
- name: puptoo-otel-config
configMap:
name: puptoo-otel-config
sidecars:
- name: token-refresher
enabled: true
Expand All @@ -75,3 +91,215 @@ metadata:
name: puptoo-token-refresher
namespace: test-sidecars
type: Opaque
---
apiVersion: v1
data:
relay: |
exporters:
sapm:
access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
endpoint: https://ingest.us1.signalfx.com/v2/trace
sending_queue:
num_consumers: 32
signalfx:
access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN}
api_url: https://api.us1.signalfx.com
ingest_url: https://ingest.us1.signalfx.com
sending_queue:
num_consumers: 32
extensions:
health_check:
endpoint: 0.0.0.0:13133
http_forwarder:
egress:
endpoint: https://api.us1.signalfx.com
zpages: null
processors:
batch: null
filter/logs:
logs:
exclude:
match_type: strict
resource_attributes:
- key: splunk.com/exclude
value: "true"
k8sattributes:
extract:
annotations:
- from: pod
key: splunk.com/sourcetype
- from: namespace
key: splunk.com/exclude
tag_name: splunk.com/exclude
- from: pod
key: splunk.com/exclude
tag_name: splunk.com/exclude
- from: namespace
key: splunk.com/index
tag_name: com.splunk.index
- from: pod
key: splunk.com/index
tag_name: com.splunk.index
labels:
- key: app
metadata:
- k8s.namespace.name
- k8s.node.name
- k8s.pod.name
- k8s.pod.uid
pod_association:
- sources:
- from: resource_attribute
name: k8s.pod.uid
- sources:
- from: resource_attribute
name: k8s.pod.ip
- sources:
- from: resource_attribute
name: ip
- sources:
- from: connection
- sources:
- from: resource_attribute
name: host.name
memory_limiter:
check_interval: 2s
limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}
resource/add_cluster_name:
attributes:
- action: upsert
key: k8s.cluster.name
value: ${K8S_CLUSTER_NAME}
resource/add_collector_k8s:
attributes:
- action: insert
key: k8s.node.name
value: ${K8S_NODE_NAME}
- action: insert
key: k8s.pod.name
value: ${K8S_POD_NAME}
- action: insert
key: k8s.pod.uid
value: ${K8S_POD_UID}
- action: insert
key: k8s.namespace.name
value: ${K8S_NAMESPACE}
resource/add_env_name:
attributes:
- action: insert
key: deployment.environment
value: ${ENV_NAME}
resource/logs:
attributes:
- action: upsert
from_attribute: k8s.pod.annotations.splunk.com/sourcetype
key: com.splunk.sourcetype
- action: delete
key: k8s.pod.annotations.splunk.com/sourcetype
- action: delete
key: splunk.com/exclude
resourcedetection:
detectors:
- env
- system
override: true
timeout: 10s
receivers:
jaeger:
protocols:
grpc:
endpoint: 0.0.0.0:14250
thrift_http:
endpoint: 0.0.0.0:14268
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
prometheus/collector:
config:
scrape_configs:
- job_name: otel-collector
metric_relabel_configs:
- action: drop
regex: otelcol_rpc_.*
source_labels:
- __name__
- action: drop
regex: otelcol_http_.*
source_labels:
- __name__
- action: drop
regex: otelcol_processor_batch_.*
source_labels:
- __name__
scrape_interval: 10s
static_configs:
- targets:
- ${K8S_POD_IP}:8889
signalfx:
access_token_passthrough: true
endpoint: 0.0.0.0:9943
zipkin:
endpoint: 0.0.0.0:9411
service:
extensions:
- health_check
- zpages
- http_forwarder
pipelines:
logs/signalfx-events:
exporters:
- signalfx
processors:
- memory_limiter
- batch
receivers:
- signalfx
metrics:
exporters:
- signalfx
processors:
- memory_limiter
- batch
- resource/add_cluster_name
- resource/add_env_name
receivers:
- otlp
- signalfx
metrics/collector:
exporters:
- signalfx
processors:
- memory_limiter
- batch
- resource/add_collector_k8s
- resourcedetection
- resource/add_cluster_name
- resource/add_env_name
receivers:
- prometheus/collector
traces:
exporters:
- sapm
processors:
- memory_limiter
- k8sattributes
- batch
- resource/add_cluster_name
- resource/add_env_name
receivers:
- otlp
- jaeger
- zipkin
telemetry:
logs:
level: "debug"
metrics:
address: 0.0.0.0:8889
kind: ConfigMap
metadata:
name: puptoo-otel-config
namespace: test-sidecars
---
Loading