-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathconfig.yaml.example
More file actions
209 lines (182 loc) · 9.4 KB
/
config.yaml.example
File metadata and controls
209 lines (182 loc) · 9.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
# PulseBot Configuration
# Copy this file to config.yaml and fill in your values.
# Environment variables can be substituted with ${VAR_NAME} or ${VAR_NAME:-default} syntax.
# ---------------------------------------------------------------------------
# Agent — core LLM identity
# ---------------------------------------------------------------------------
agent:
name: "PulseBot" # Display name shown in responses
provider: "anthropic" # Active LLM provider: anthropic | openai | openrouter | gemini | ollama | nvidia
model: "claude-sonnet-4-20250514" # Model ID for the chosen provider
temperature: 0.7 # 0.0 = deterministic, 1.0 = creative
max_tokens: 4096 # Max tokens in a single LLM response
max_iterations: 15 # Max tool-call iterations per user message
verbose_tools: false # Log full tool arguments/results to console
# ---------------------------------------------------------------------------
# Timeplus — streaming database backend (required)
# ---------------------------------------------------------------------------
timeplus:
host: "${TIMEPLUS_HOST:-localhost}" # Timeplus/Proton server hostname
port: 8463 # Native protocol port (default 8463)
username: "${TIMEPLUS_USER:-default}"
password: "${TIMEPLUS_PASSWORD:-}"
# ---------------------------------------------------------------------------
# Providers — API keys and default models for each LLM provider
# Only configure the provider(s) you intend to use.
# ---------------------------------------------------------------------------
providers:
# Anthropic Claude
anthropic:
api_key: "${ANTHROPIC_API_KEY}"
default_model: "claude-sonnet-4-20250514" # claude-opus-4-6, claude-haiku-4-5-20251001
# OpenAI — also works for any OpenAI-compatible API
openai:
api_key: "${OPENAI_API_KEY}"
default_model: "gpt-4o"
# Uncomment to point at a different OpenAI-compatible endpoint:
# base_url: "https://dashscope.aliyuncs.com/compatible-mode/v1" # Alibaba Qwen
# base_url: "https://api.deepseek.com/v1" # DeepSeek
# provider_name: "deepseek" # optional display name override
# OpenRouter — unified gateway to many models
openrouter:
api_key: "${OPENROUTER_API_KEY}"
default_model: "anthropic/claude-sonnet-4-20250514"
base_url: "https://openrouter.ai/api/v1"
# Google Gemini
gemini:
api_key: "${GEMINI_API_KEY}"
default_model: "gemini-2.5-flash" # gemini-2.5-pro, gemini-2.0-flash
timeout_seconds: 120 # Abort hung API calls after this many seconds
# Ollama — local LLM, no API key required
ollama:
host: "${OLLAMA_HOST:-http://localhost:11434}"
default_model: "llama3" # llama3, mistral, phi3, gemma2, qwen2.5
timeout_seconds: 120
# NVIDIA NIM
nvidia:
api_key: "${NVIDIA_API_KEY}"
default_model: "moonshotai/kimi-k2.5"
timeout_seconds: 120
enable_thinking: false # Enable extended thinking (supported models only)
# ---------------------------------------------------------------------------
# Channels — input sources the agent listens on
# ---------------------------------------------------------------------------
channels:
# Telegram bot integration (webchat is always on via the API server)
telegram:
enabled: false
token: "${TELEGRAM_BOT_TOKEN}"
allow_from: [] # Restrict to these Telegram user IDs; empty = allow all
# ---------------------------------------------------------------------------
# Skills — tools available to the agent
# ---------------------------------------------------------------------------
skills:
# Built-in skills to load. Remove any you don't need.
builtin:
- file_ops # Read/write files in the workspace
- shell # Run shell commands
- workspace # Manage isolated session workspaces
- scheduler # Create and manage scheduled tasks
- skill_manager # Install/list skills from ClawHub registry
- project_manager # Spawn and coordinate multi-agent projects
# Directories to scan for agentskills.io skill packages (.zip or unpacked)
skill_dirs:
- "./skills" # Relative to the working directory where pulsebot runs
# Skill names (from SKILL.md frontmatter) to skip even if found on disk
disabled_skills: []
# ClawHub skill registry integration
clawhub:
# Authentication token — supports env var substitution like other API keys
auth_token: "${CLAWHUB_AUTH_TOKEN:-}"
# Alternative: read token from a file instead of config
# auth_token_path: "~/.clawhub/token"
# site_url: "https://clawhub.ai" # Override registry site (default: clawhub.ai)
# registry_url: # Auto-discovered from .well-known; override only if needed
# install_dir: # Defaults to first entry in skill_dirs
# ---------------------------------------------------------------------------
# API Server — FastAPI REST + WebSocket server
# ---------------------------------------------------------------------------
api:
host: "0.0.0.0"
port: 8000
cors_origins: # Allowed CORS origins for browser clients
- "http://localhost:3000"
- "http://localhost:5173"
# ---------------------------------------------------------------------------
# Logging
# ---------------------------------------------------------------------------
logging:
level: "INFO" # DEBUG | INFO | WARNING | ERROR | CRITICAL
format: "json" # "json" (structured, for log aggregators) or "text" (human-readable)
# ---------------------------------------------------------------------------
# Memory — vector-indexed long-term memory
# ---------------------------------------------------------------------------
memory:
enabled: true
similarity_threshold: 0.95 # Cosine similarity floor for duplicate suppression (0.0–1.0)
# Embedding provider for vector memory.
# "local" — sentence-transformers, no API key, runs fully offline (default)
# "openai" — cloud embeddings, higher quality, requires OPENAI_API_KEY
# "ollama" — local via Ollama server
embedding_provider: "local"
embedding_model: "all-MiniLM-L6-v2"
# Common model options by provider:
# local: all-MiniLM-L6-v2 (384-dim, ~100 MB, CPU-friendly)
# openai: text-embedding-3-small (1536-dim), text-embedding-3-large (3072-dim)
# ollama: mxbai-embed-large (1024-dim), nomic-embed-text (768-dim)
# embedding_api_key: "${OPENAI_API_KEY}" # Override OpenAI key for embeddings
# embedding_host: "${OLLAMA_HOST}" # Override Ollama host for embeddings
# embedding_dimensions: 384 # Override vector dimensions (auto-detected if unset)
embedding_timeout_seconds: 30 # Abort slow embedding requests after N seconds
# ---------------------------------------------------------------------------
# Workspace — isolated per-session file workspaces
# ---------------------------------------------------------------------------
workspace:
# Root directory on the agent machine where session workspaces are created
base_dir: "${WORKSPACE_DIR:-./workspaces}"
# Port the agent's embedded WorkspaceServer listens on (for artifact delivery)
workspace_port: "${WORKSPACE_PORT:-8001}"
# Hostname or Docker service name the API server uses to reach the agent
agent_host: "${AGENT_HOST:-localhost}"
# Base URL the agent uses to register artifacts with the API server
api_server_url: "${API_SERVER_URL:-http://localhost:8000}"
# Shared secret for /internal/workspace/* endpoints.
# Generate with: python -c "import secrets; print(secrets.token_hex(32))"
# Must be identical in both agent and API server config.
internal_api_key: "${WORKSPACE_INTERNAL_KEY:-}"
# Seconds to wait after spawning a workspace backend before health-checking
backend_boot_timeout: 3.0
# ---------------------------------------------------------------------------
# Hooks — intercept tool calls for policy enforcement or auditing
# ---------------------------------------------------------------------------
hooks:
tool_call:
pre_call: []
# Example: block specific tools
# pre_call:
# - type: policy
# config:
# deny_tools: ["shell"]
# allow_tools: ["read_file", "write_file"]
#
# Example: log every tool call to a webhook before execution
# pre_call:
# - type: webhook
# config:
# url: "https://my-audit-service.example.com/hook"
# auth_header: "Bearer ${WEBHOOK_SECRET}"
# timeout: 5.0
# fail_open: true # approve on network error; false = deny on error
# ---------------------------------------------------------------------------
# Multi-Agent — parallel and pipeline sub-agent projects
# ---------------------------------------------------------------------------
multi_agent:
max_agents_per_project: 10 # Hard cap on sub-agents spawned by a single project
max_concurrent_projects: 5 # Max simultaneously active projects across all sessions
# ---------------------------------------------------------------------------
# Observability — event stream emitted to the `events` Timeplus stream
# ---------------------------------------------------------------------------
observability:
events:
enabled: true
min_severity: "info" # Minimum severity to emit: debug | info | warning | error | critical