-
Notifications
You must be signed in to change notification settings - Fork 4
/
docker-compose.yaml
355 lines (333 loc) · 16.8 KB
/
docker-compose.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
# This is a static docker-compose file intended for local installations of Hay Say.
# In the future, a docker-compose file will be dynamically generated according to user selections in a launcher UI.
services:
# Hay Say used to download character models via Docker by downloading special, data-only images called "model packs".
# Model packs proved to be inefficient with disk space usage, so Hay Say was updated to allow users to download
# individual characters directly from Mega, Google Drive, and Huggingface Hub instead. The models packs should still
# work, however, and they are included below (but commented out) as a fallback in case there is an issue with
# downloading models individually. See the Readme file for the list of characters included in each model pack.
# Support for model packs will be removed in the future.
# All current models for Controllable TalkNet
# controllable_talknet_model_pack_0:
# image: hydrusbeta/hay_say:controllable_talknet_model_pack_0
# volumes:
# - controllable_talknet_model_pack_0:/home/luna/hay_say/controllable_talknet_model_pack_0
# Talking models for so-vits-svc 3.0
# so_vits_svc_3_model_pack_0:
# image: hydrusbeta/hay_say:so_vits_svc_3_model_pack_0
# volumes:
# - so_vits_svc_3_model_pack_0:/home/luna/hay_say/so_vits_svc_3_model_pack_0
# Singing models for so-vits-svc 3.0.
# so_vits_svc_3_model_pack_1:
# image: hydrusbeta/hay_say:so_vits_svc_3_model_pack_1
# volumes:
# - so_vits_svc_3_model_pack_1:/home/luna/hay_say/so_vits_svc_3_model_pack_1
# Talking models for so-vits-svc 4.0.
# so_vits_svc_4_model_pack_0:
# image: hydrusbeta/hay_say:so_vits_svc_4_model_pack_0
# volumes:
# - so_vits_svc_4_model_pack_0:/home/luna/hay_say/so_vits_svc_4_model_pack_0
# Singing models for so-vits-svc 4.0.
# image: hydrusbeta/hay_say:so_vits_svc_4_model_pack_1
# volumes:
# - so_vits_svc_4_model_pack_1:/home/luna/hay_say/so_vits_svc_4_model_pack_1
# Multi-speaker so-vits-svc 4.0 model for Pinkie Pie's various emotions.
# so_vits_svc_4_model_pack_2:
# image: hydrusbeta/hay_say:so_vits_svc_4_model_pack_2
# volumes:
# - so_vits_svc_4_model_pack_2:/home/luna/hay_say/so_vits_svc_4_model_pack_2
# Singing models of the Mane Six for so-vits-svc 5.0
# so_vits_svc_5_model_pack_0:
# image: hydrusbeta/hay_say:so_vits_svc_5_model_pack_0
# volumes:
# - so_vits_svc_5_model_pack_0:/home/luna/hay_say/so_vits_svc_5_model_pack_0
# First group of pony models that were available for RVC
# rvc_model_pack_0:
# image: hydrusbeta/hay_say:rvc_model_pack_0
# volumes:
# - rvc_model_pack_0:/home/luna/hay_say/rvc_model_pack_0
# Mane Six models for RVC
# rvc_model_pack_1:
# image: hydrusbeta/hay_say:rvc_model_pack_1
# volumes:
# - rvc_model_pack_1:/home/luna/hay_say/rvc_model_pack_1
# limited_user_migration is a service that runs on startup and ensures that all files in all
# Hay Say volumes are owned by the limited user (luna).
limited_user_migration:
image: hydrusbeta/hay_say:hay_say_ui
user: root
volumes:
- so_vits_svc_3_model_pack_0:/home/luna/hay_say/so_vits_svc_3_model_pack_0
- so_vits_svc_3_model_pack_1:/home/luna/hay_say/so_vits_svc_3_model_pack_1
- so_vits_svc_4_model_pack_0:/home/luna/hay_say/so_vits_svc_4_model_pack_0
- so_vits_svc_4_model_pack_1:/home/luna/hay_say/so_vits_svc_4_model_pack_1
- so_vits_svc_4_model_pack_2:/home/luna/hay_say/so_vits_svc_4_model_pack_2
- so_vits_svc_5_model_pack_0:/home/luna/hay_say/so_vits_svc_5_model_pack_0
- rvc_model_pack_0:/home/luna/hay_say/rvc_model_pack_0
- rvc_model_pack_1:/home/luna/hay_say/rvc_model_pack_1
- controllable_talknet_model_pack_0:/home/luna/hay_say/controllable_talknet_model_pack_0
- models:/home/luna/hay_say/models
- audio_cache:/home/luna/hay_say/audio_cache
command: ["chown", "-R", "luna:luna", "/home/luna/hay_say/"]
networks:
- internal
# The Redis container provides an in-memory data store that can be shared between applications.
# This allows plotly to pass data to background workers.
redis:
image: redis
command: redis-server
healthcheck:
test: ["CMD", "redis-cli", "--raw", "incr", "ping"]
start_period: 15s
# start_interval is not available in versions of Docker Engine earlier than 25. For backwards
# compatibility, set the interval property instead for now. Replace it with start_interval sometime in the future,
# once everyone is on version 25+.
# start_interval: 1s
interval: 1s
networks:
- internal
# This container runs the main UI
hay_say_ui:
depends_on:
limited_user_migration:
condition: service_completed_successfully
redis:
condition: service_healthy
image: hydrusbeta/hay_say:hay_say_ui
ports:
- 6573:6573
working_dir: /home/luna/hay_say/hay_say_ui
volumes:
# The container needs access to any model pack volumes so it can migrate their models to the models volume.
- so_vits_svc_3_model_pack_0:/home/luna/hay_say/so_vits_svc_3_model_pack_0
- so_vits_svc_3_model_pack_1:/home/luna/hay_say/so_vits_svc_3_model_pack_1
- so_vits_svc_4_model_pack_0:/home/luna/hay_say/so_vits_svc_4_model_pack_0
- so_vits_svc_4_model_pack_1:/home/luna/hay_say/so_vits_svc_4_model_pack_1
- so_vits_svc_4_model_pack_2:/home/luna/hay_say/so_vits_svc_4_model_pack_2
- so_vits_svc_5_model_pack_0:/home/luna/hay_say/so_vits_svc_5_model_pack_0
- rvc_model_pack_0:/home/luna/hay_say/rvc_model_pack_0
- rvc_model_pack_1:/home/luna/hay_say/rvc_model_pack_1
- controllable_talknet_model_pack_0:/home/luna/hay_say/controllable_talknet_model_pack_0
- models:/home/luna/hay_say/models
- audio_cache:/home/luna/hay_say/audio_cache
# Override the CMD in the Docker file to enable model management, update model lists on startup, and automatically
# migrate all models to the models folder. Also spin up 3 instances of celery (one for generating with CPU, one for
# generating with GPU and one for downloading models), with 5 workers for downloading models and a single worker
# each for generating output with GPU and CPU.
command: ["/bin/sh", "-c", "
celery --workdir ~/hay_say/hay_say_ui/ -A celery_download:celery_app worker --loglevel=INFO --concurrency 5 --include_architecture ControllableTalkNet --include_architecture SoVitsSvc3 --include_architecture SoVitsSvc4 --include_architecture SoVitsSvc5 --include_architecture Rvc --include_architecture StyleTTS2 --include_architecture GPTSoVITS &
celery --workdir ~/hay_say/hay_say_ui/ -A celery_generate_gpu:celery_app worker --loglevel=INFO --concurrency 1 --cache_implementation file --include_architecture ControllableTalkNet --include_architecture SoVitsSvc3 --include_architecture SoVitsSvc4 --include_architecture SoVitsSvc5 --include_architecture Rvc --include_architecture StyleTTS2 --include_architecture GPTSoVITS &
celery --workdir ~/hay_say/hay_say_ui/ -A celery_generate_cpu:celery_app worker --loglevel=INFO --concurrency 1 --cache_implementation file --include_architecture ControllableTalkNet --include_architecture SoVitsSvc3 --include_architecture SoVitsSvc4 --include_architecture SoVitsSvc5 --include_architecture Rvc --include_architecture StyleTTS2 --include_architecture GPTSoVITS &
gunicorn --config=server_initialization.py --workers 6 --bind 0.0.0.0:6573 'wsgi:get_server(enable_model_management=True, update_model_lists_on_startup=False, enable_session_caches=False, migrate_models=True, cache_implementation=\"file\", architectures=[\"ControllableTalkNet\", \"SoVitsSvc3\", \"SoVitsSvc4\", \"SoVitsSvc5\", \"Rvc\", \"StyleTTS2\", \"GPTSoVITS\"])'
"]
networks:
- internal
- external
# This container provides a web service interface to so-vits-svc 3.0.
so_vits_svc_3_server:
depends_on:
limited_user_migration:
condition: service_completed_successfully
redis:
condition: service_healthy
image: hydrusbeta/hay_say:so_vits_svc_3_server
working_dir: /home/luna/hay_say/so_vits_svc_3
volumes:
- so_vits_svc_3_model_pack_0:/home/luna/hay_say/so_vits_svc_3_model_pack_0
- so_vits_svc_3_model_pack_1:/home/luna/hay_say/so_vits_svc_3_model_pack_1
- models:/home/luna/hay_say/models
- audio_cache:/home/luna/hay_say/audio_cache
command: ["/bin/sh", "-c", "/home/luna/hay_say/.venvs/so_vits_svc_3_server/bin/python /home/luna/hay_say/so_vits_svc_3_server/main.py --cache_implementation file"]
networks:
- internal
# GPU integration is disabled by default to prevent an error on machines that do not have a Cuda-capable GPU.
# Uncomment the lines below to enable it for so-vits-svc 3.0 if you wish.
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: all
# capabilities: [gpu]
# This container provides a web service interface to so-vits-svc 4.0.
so_vits_svc_4_server:
depends_on:
limited_user_migration:
condition: service_completed_successfully
redis:
condition: service_healthy
image: hydrusbeta/hay_say:so_vits_svc_4_server
working_dir: /home/luna/hay_say/so_vits_svc_4
volumes:
- so_vits_svc_4_model_pack_0:/home/luna/hay_say/so_vits_svc_4_model_pack_0
- so_vits_svc_4_model_pack_1:/home/luna/hay_say/so_vits_svc_4_model_pack_1
- so_vits_svc_4_model_pack_2:/home/luna/hay_say/so_vits_svc_4_model_pack_2
- models:/home/luna/hay_say/models
- audio_cache:/home/luna/hay_say/audio_cache
command: ["/bin/sh", "-c", "/home/luna/hay_say/.venvs/so_vits_svc_4_server/bin/python /home/luna/hay_say/so_vits_svc_4_server/main.py --cache_implementation file"]
networks:
- internal
# GPU integration is disabled by default to prevent an error on machines that do not have a Cuda-capable GPU.
# Uncomment the lines below to enable it for so-vits-svc 4.0 if you wish.
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: all
# capabilities: [gpu]
# This container provides a web service interface to so-vits-svc 5.0.
so_vits_svc_5_server:
depends_on:
limited_user_migration:
condition: service_completed_successfully
redis:
condition: service_healthy
image: hydrusbeta/hay_say:so_vits_svc_5_server
working_dir: /home/luna/hay_say/so_vits_svc_5
volumes:
- so_vits_svc_5_model_pack_0:/home/luna/hay_say/so_vits_svc_5_model_pack_0
- models:/home/luna/hay_say/models
- audio_cache:/home/luna/hay_say/audio_cache
command: ["/bin/sh", "-c", "/home/luna/hay_say/.venvs/so_vits_svc_5_server/bin/python /home/luna/hay_say/so_vits_svc_5_server/main.py --cache_implementation file"]
networks:
- internal
# GPU integration is disabled by default to prevent an error on machines that do not have a Cuda-capable GPU.
# Uncomment the lines below to enable it for so-vits-svc 5.0 if you wish.
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: all
# capabilities: [gpu]
# This container provides a web service interface to Retrieval-based Voice Conversion (RVC).
rvc_server:
depends_on:
limited_user_migration:
condition: service_completed_successfully
redis:
condition: service_healthy
image: hydrusbeta/hay_say:rvc_server
ports:
# Map port 7865 in case someone wants to see the original RVC UI. It's not really usable because it won't see the
# model files or reference audio files.
# Note: The original UI does not start up automatically. It can be manually started by executing the following command:
# docker exec hay_say_ui-rvc_server-1 /home/luna/hay_say/.venvs/rvc/bin/python /home/luna/hay_say/rvc/infer-web.py
- 7865:7865
working_dir: /home/luna/hay_say/rvc
volumes:
- rvc_model_pack_0:/home/luna/hay_say/rvc_model_pack_0
- rvc_model_pack_1:/home/luna/hay_say/rvc_model_pack_1
- models:/home/luna/hay_say/models
- audio_cache:/home/luna/hay_say/audio_cache
command: ["/bin/sh", "-c", "/home/luna/hay_say/.venvs/rvc_server/bin/python /home/luna/hay_say/rvc_server/main.py --cache_implementation file"]
networks:
- internal
# GPU integration is disabled by default to prevent an error on machines that do not have a Cuda-capable GPU.
# Uncomment the lines below to enable it for RVC if you wish.
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: all
# capabilities: [gpu]
# This container provides a web service interface to Controllable TalkNet.
controllable_talknet_server:
depends_on:
limited_user_migration:
condition: service_completed_successfully
redis:
condition: service_healthy
image: hydrusbeta/hay_say:controllable_talknet_server
ports:
# Map port 8050 in case someone want to use the original Controllable TalkNet UI.
# Note: The original UI does not start up automatically. It can be manually started by executing 2 commands:
# docker exec hay_say_ui-controllable_talknet_server-1 mkdir -p /talknet/is_docker
# docker exec hay_say_ui-controllable_talknet_server-1 /home/luna/hay_say/.venvs/controllable_talknet/bin/python /home/luna/hay_say/controllable_talknet/talknet_offline.py
- 8050:8050
working_dir: /home/luna/hay_say/controllable_talknet
volumes:
- controllable_talknet_model_pack_0:/home/luna/hay_say/controllable_talknet_model_pack_0
- models:/home/luna/hay_say/models
- audio_cache:/home/luna/hay_say/audio_cache
command: ["/bin/sh", "-c", "/home/luna/hay_say/.venvs/controllable_talknet_server/bin/python /home/luna/hay_say/controllable_talknet_server/main.py --cache_implementation file"]
networks:
- internal
# GPU integration is disabled by default to prevent an error on machines that do not have a Cuda-capable GPU.
# Uncomment the lines below to enable it for Controllable TalkNet if you wish.
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: all
# capabilities: [gpu]
# This container provides a web service interface to StyleTTS2.
styletts_2_server:
depends_on:
limited_user_migration:
condition: service_completed_successfully
redis:
condition: service_healthy
image: hydrusbeta/hay_say:styletts_2_server
working_dir: /home/luna/hay_say/styletts_2
volumes:
- models:/home/luna/hay_say/models
- audio_cache:/home/luna/hay_say/audio_cache
command: ["/bin/sh", "-c", "/home/luna/hay_say/.venvs/styletts_2_server/bin/python /home/luna/hay_say/styletts_2_server/main.py --cache_implementation file"]
networks:
- internal
# GPU integration is disabled by default to prevent an error on machines that do not have a Cuda-capable GPU.
# Uncomment the lines below to enable it for Controllable TalkNet if you wish.
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: all
# capabilities: [gpu]
# This container provides a web service interface to GPT-So-VITS-v2.
gpt_so_vits_server:
depends_on:
redis:
condition: service_healthy
image: hydrusbeta/hay_say:gpt_so_vits_server
working_dir: /home/luna/hay_say/gpt_so_vits
volumes:
- models:/home/luna/hay_say/models
- audio_cache:/home/luna/hay_say/audio_cache
command: ["/bin/sh", "-c", "/home/luna/hay_say/.venvs/gpt_so_vits_server/bin/python /home/luna/hay_say/gpt_so_vits_server/main.py --cache_implementation file"]
networks:
- internal
# GPU integration is disabled by default to prevent an error on machines that do not have a Cuda-capable GPU.
# Uncomment the lines below to enable it for GPT-So-VITS-v2 if you wish.
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: all
# capabilities: [gpu]
volumes:
so_vits_svc_3_model_pack_0:
so_vits_svc_3_model_pack_1:
so_vits_svc_4_model_pack_0:
so_vits_svc_4_model_pack_1:
so_vits_svc_4_model_pack_2:
so_vits_svc_5_model_pack_0:
rvc_model_pack_0:
rvc_model_pack_1:
controllable_talknet_model_pack_0:
models:
external: true
audio_cache:
external: true
# Only containers which need to be connected to the outside world should be on the external network.
# Place all other containers on the internal network to restrict internet access. This is a security
# measure to prevent malicious modules from phoning home.
networks:
internal:
internal: true
external: