-
Notifications
You must be signed in to change notification settings - Fork 80
/
.env.example
86 lines (65 loc) · 2.36 KB
/
.env.example
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
# ======================
# Core Configuration
# ======================
# Environment
ENVIRONMENT=development
# ENVIRONMENT=production
# Version tag for Docker images in production
VERSION=latest
# GitHub repository (username/repo-name)
GITHUB_REPOSITORY=pyspur-dev/pyspur
# Backend Configuration
BACKEND_PORT=8000
BACKEND_HOST=0.0.0.0
DEBUG=False
# Frontend Configuration
FRONTEND_PORT=3000
FRONTEND_HOST=0.0.0.0
# Nginx Configuration
# This is the port that will be used to access the application
NGINX_PORT=6080
# ======================
# Database Settings
# ======================
# PySpur uses PostgreSQL as the database. By default, the database is hosted in a separate container.
# If you want to use an external database, you can provide the connection details here.
# PostgreSQL Configuration
POSTGRES_DB=pyspur
POSTGRES_USER=pyspur
POSTGRES_PASSWORD=pyspur
POSTGRES_HOST=db
POSTGRES_PORT=5432
# ======================
# Model Provider API Keys
# ======================
# OPENAI_API_KEY=your_openai_api_key
# GEMINI_API_KEY=your_gemini_api_key
# ANTHROPIC_API_KEY=your_anthropic_api_key
# ======================
# OpenAI API URL Configuration
# ======================
# In case you are using OpenAI-compatible API service, you can specify the base URL of the API here
# OPENAI_API_BASE=https://api.openai.com/v1
# ======================
# Ollama Configuration
# ======================
# NOTE:
# if the ollama service is running on port 11434 of the host machine,
# then use http://host.docker.internal:11434 as the base url
# if the ollama service is running on a different host, use the ip address or domain name of the host
# Also make sure the ollama service is configured to accept requests.
# This can be done setting OLLAMA_HOST=0.0.0.0 environment variable before launching the ollama service.
# OLLAMA_BASE_URL=http://host.docker.internal:11434
# ======================
# Azure OpenAI Configuration
# ======================
# AZURE_OPENAI_API_KEY=your_azure_openai_api_key
# AZURE_OPENAI_API_BASE=https://your-resource-name.openai.azure.com
# AZURE_OPENAI_API_VERSION=your_azure_openai_api_version
# AZURE_OPENAI_DEPLOYMENT_NAME=your_azure_openai_deployment_name
# ======================
# ======================
# Slack Configuration
# ======================
# SLACK_BOT_TOKEN=your_slack_bot_token # starts with xoxb
# SLACK_USER_TOKEN=your_slack_user_token # starts with xoxp