forked from flant/loghouse
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathvalues.yaml
204 lines (190 loc) · 4.69 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
# Override images tag with --set version=...
# Chart.Version is used if no --set version.
# Chart.Version is set to latest in master branch. helm package rewrite it to tag value for releases.
# version: latest|0.2.3|0.2.2
# Basic-auth string. Generated by command:
# echo "PASSWORD" | htpasswd -ni USERNAME | base64 -w0
# Example:
# command: echo "PASSWORD" | htpasswd -ni admin | base64 -w0
# result: YWRtaW46JGFwcjEkMzdxSEwvTVIkcEFvdzEzZDUwMkd5VFc2VDNlQmJiMAoK
auth: YWRtaW46JGFwcjEkelhESkU5YTkkRkU0OFdnZlBMZlJJQjk0bVhXZVprMAoK
# Settings for ingress
ingress:
enable: true
enable_https: true
clickhouse:
host: clickhouse.domain.com
path: "/"
tls_secret_name: clickhouse
# ingressClass: nginx
# tls_issuer: letsencrypt
# tls_issuer_kind: ClusterIssuer
# annotations:
# traefik.frontend.passHostHeader: "true"
loghouse:
host: loghouse.domain.com
path: "/"
tls_secret_name: loghouse
# ingressClass: nginx
# tls_issuer: letsencrypt
# tls_issuer_kind: ClusterIssuer
# annotations:
# traefik.frontend.passHostHeader: "true"
tabix:
host: tabix.domain.com
path: "/"
tls_secret_name: tabix
# ingressClass: traefik
# tls_issuer: letsencrypt
# tls_issuer_kind: ClusterIssuer
# annotations:
# traefik.frontend.passHostHeader: "true"
# Enable tabix
enable_tabix: true
# Docker custom path to get logs with fluentd. If docker installed on separate disk add --set to mount this path.
# docker_path: /data/docker
# Select storage for loghouse data
# If neither hostpath nor pvc section is defined emptyDir will be used
# pvc takes precedence over hostpath
storage:
# hostpath: /mnt/loghouse
# pvc:
# name: clickhouse
# accessMode: ReadWriteOnce
# size: 20Gi
# storageClassName: slow
# Clickhouse TTL using LOGS_TABLES_RETENTION_PERIOD variable, in days.
# This setting automatically delete logs older then LOGS_TABLES_RETENTION_PERIOD.
# Default is 14, as it was.
retention_period: 7
# Create db and default table structure in clickhouse
doDbDeploy: true
# Migrate data from old table format to new one
doDbMigrate: true
# If you not want install fluentd on master node
install_master: true
# Default imagePullPolicy
imagePullPolicy: Always
clickhouse:
# tolerations:
# - key: dedicated.flant.com
# operator: Equal
# value: logging
# effect: NoExecute
# nodeSelector:
# node-role.flant.com/logging: ""
# Setup clickhouse password
# password: supersecret
# Use external clickhouse
# externalEndpoints:
# - 10.0.0.1
# - 10.0.0.2
# - 10.0.0.3
image: "yandex/clickhouse-server"
version: "20.1"
external: false
server: clickhouse
port: 9000
httpPort: 8123
exporterPort: 9116
prometheusEnabled: true
user: default
db: logs
table: logs
hasBuffer: true
svcLabels: {}
svcAnnotations: {}
podAnnotations: {}
# You should create registry secret
# if you install images from private registry
imagePullSecrets:
# The memory limit is also used to specify max memory limit bytes for clickhouse.
# Note that "max memory limit" is calculated as "limits.memory" - 128Mi,
# because clickhouse process needs 128Mi to run.
# So specify at least 2Gi for "limits.memory".
reserveMemory: 128Mi
resources:
limits:
cpu: 4
memory: 8Gi
requests:
cpu: 1
memory: 8Gi
exporter:
image: "flant/clickhouse-exporter"
version: "0.1.0"
resources:
limits:
cpu: 100m
memory: 32Mi
requests:
cpu: 100m
memory: 32Mi
fluentd:
image: "flant/loghouse-fluentd"
# tolerations:
# - key: "node-role.kubernetes.io/master"
# operator: "Exists"
# nodeSelector:
# node-role.flant.com/logging: ""
tcpPort: 5170
udpPort: 5160
exporterPort: 24231
forwardPort: 24224
prometheusEnabled: true
imagePullSecrets:
svcLabels: {}
svcAnnotations: {}
podAnnotations: {}
resources:
limits:
cpu: 1
memory: 512Mi
requests:
cpu: 0.1
memory: 256Mi
loghouse:
image: "flant/loghouse-dashboard"
# tolerations:
# - key: dedicated.flant.com
# operator: Equal
# value: logging
# effect: NoExecute
# nodeSelector:
# node-role.flant.com/logging: ""
users:
admin:
- ".*"
imagePullSecrets:
resources:
limits:
cpu: 1
memory: 2Gi
requests:
cpu: 1
memory: 2Gi
frontend:
resources:
limits:
cpu: 1
memory: 64Mi
requests:
cpu: 100m
memory: 64Mi
tabix:
image: "flant/loghouse-tabix"
# tolerations:
# - key: dedicated.flant.com
# operator: Equal
# value: logging
# effect: NoExecute
# nodeSelector:
# node-role.flant.com/logging: ""
imagePullSecrets:
resources:
limits:
memory: 64Mi
cpu: 0.5
requests:
memory: 64Mi
cpu: 0.1