-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathconfig-sample.toml
More file actions
99 lines (92 loc) · 3.97 KB
/
config-sample.toml
File metadata and controls
99 lines (92 loc) · 3.97 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
# ===== General Configuration =====
[config]
debug = true
dev_id = [123456789012345678, 123456789012345678]
admin_server = 123456789012345678
support_server = "https://discord.com/invite/your_invite_link"
# ===== Logs Configuration =====
[logs]
logging_level = "INFO"
logger_name = "AlphaLLM"
channel_id = 123456789012345678
log_role_id = 123456789012345678
channel_name = "📄-logs"
category_id = 123456789012345678
loki_url = "https://your-loki-url/loki/api/v1/push"
grafana_url = "https://your-grafana-url"
user_id = 123456
# ===== Memory Configuration =====
[memory]
embedder_model = "sentence-transformers/all-MiniLM-L6-v2"
embedder_cache_dir = "./data/embedder"
function_calling_model = "google/functiongemma-270m-it"
function_calling_cache_dir = "./data/function_calling"
reranker_model = "cross-encoder/ms-marco-MiniLM-L-6-v2"
reranker_cache_dir = "./data/reranker"
stm_max_age = 14400
ltm_min_similarity = 0.3
# ===== Models Configuration =====
[models]
available_models = [
"llama",
"openai",
"mistral",
"qwen",
"gemini",
"sonar",
"evilgpt",
"grok",
"claude",
"kimi",
"deepseek",
"glm",
"phi",
"cohere",
"minimax",
"nemotron",
"yi",
"hermes",
"longcat",
"granite"
]
models = [
{"name"= "llama", "owner"= "Meta", "description"= "Reliable and versatile for general conversation."},
{"name"= "openai", "owner"= "OpenAI", "description"= "Most advanced GPT model, ideal for complex tasks."},
{"name"= "mistral", "owner"= "Mistral", "description"= "Balanced for a wide range of tasks."},
{"name"= "qwen", "owner"= "Alibaba", "description"= "Optimized for logical reasoning and programming."},
{"name"= "gemini", "owner"= "Google", "description"= "Very fast and flexible, excellent for short responses."},
{"name"= "sonar", "owner"= "Perplexity", "description"= "Specialized in information retrieval."},
{"name"= "evilgpt", "owner"= "Mistral using a custom system prompt", "description"= "Unfiltered and creative."},
{"name"= "grok", "owner"= "xAI", "description"= "Humorous and casual."},
{"name"= "claude", "owner"= "Anthropic", "description"= "Poetic and creative."},
{"name"= "kimi", "owner"= "Moonshot", "description"= "Clear and pedagogical."},
{"name"= "deepseek", "owner"= "DeepSeek", "description"= "Strong for generating long content."},
{"name"= "glm", "owner"= "zAI", "description"= "Excellent multilingual support."},
{"name"= "phi", "owner"= "Microsoft", "description"= "Lightweight and efficient."},
{"name"= "cohere", "owner"= "Cohere", "description"= "Powerful in logic and analytical reasoning."},
{"name"= "minimax", "owner"= "Minimax", "description"= "Advanced multi-modal model."},
{"name"= "nemotron", "owner"= "Nvidia", "description"= "Open-source vision language model."},
{"name"= "yi", "owner"= "01-AI", "description"= "Strong bilingual multimodal model."},
{"name"= "hermes", "owner"= "NousResearch", "description"= "Open-source series with strong reasoning."},
{"name"= "longcat", "owner"= "Meituan", "description"= "Efficient Mixture-of-Experts model."},
{"name"= "granite", "owner"= "IBM", "description"= "Efficient open model from IBM."}
]
# ===== Database Configuration =====
[database]
tables_to_clone = ["blacklist", "server_settings_new", "users_settings", "users"]
# ===== Bots Configuration =====
[bots]
main_bot_activity = "🤖 Try @AlphaLLM or /commands"
logger_bot_activity = "🎛️ Monitoring AlphaLLM"
admin_bot_activity = "⚙️ Administrate AlphaLLM"
# ===== Unified Text Configuration =====
[unified_text]
prompt_dir = "configs/prompts"
max_file_size = 10485760 # 10 MB in bytes
allowed_mime_types = ["text/plain", "application/pdf", "image/jpeg", "image/png"]
# ===== Limits Configuration =====
[limits]
max_stm_messages = 50 # Maximum short-term memory messages per conversation
max_ltm_results = 5 # Maximum long-term memory results to fetch
max_conversation_history = 10 # Maximum messages to include in prompt history
discord_rate_limit = 30 # Messages per minute for Discord