-
Notifications
You must be signed in to change notification settings - Fork 8
/
Copy path.env.example
42 lines (34 loc) · 1.34 KB
/
.env.example
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
# .env file for Cascade of Semantically Integrated Layers (CSIL)
# Debug Mode
DEBUG_MODE=true
# Use external knowledge base
USE_EXTERNAL_KNOWLEDGE=true
# Semantic Processing Thresholds
MIN_KEYWORDS=2
MAX_KEYWORDS=20
KEYWORD_WEIGHT_THRESHOLD=0.1
SIMILARITY_THRESHOLD=0.1
# Thresholds for each processing layer in CSIL
INITIAL_UNDERSTANDING_THRESHOLD=0.7
RELATIONSHIP_ANALYSIS_THRESHOLD=0.7
CONTEXTUAL_INTEGRATION_THRESHOLD=0.9
SYNTHESIS_THRESHOLD=0.8
# Adaptive Processing Configuration
ADAPTIVE_THRESHOLDS=true
MIN_THRESHOLD=0.1
MAX_THRESHOLD=0.9
THRESHOLD_STEP=0.05
# Knowledge Graph Update Settings
KG_UPDATE_FREQUENCY=1 # Set frequency of knowledge graph updates
# LLM Configuration
LLM_URL=http://0.0.0.0:11434/v1/chat/completions # API endpoint for LLM service
LLM_MODEL=hf.co/arcee-ai/SuperNova-Medius-GGUF:f16 # Language model path
LLM_CONTEXT_WINDOW=8192 # Maximum tokens allowed in the context window
LLM_TEMPERATURE=0.6 # Temperature setting for response variability
LLM_MAX_TOKENS=4096 # Max tokens in the generated response
LLM_STREAM=true # Enable streaming responses
LLM_TOP_P=0.9 # Nucleus sampling probability threshold
LLM_FREQUENCY_PENALTY=0.0 # Penalty for frequent tokens
LLM_PRESENCE_PENALTY=0.0 # Penalty for repeating content
LLM_REPEAT_PENALTY=1.1 # Additional penalty for repetition
LLM_SEED= # Seed for reproducibility (optional)