Documentation
¶
Overview ¶
Package config provides configuration loading and management.
Index ¶
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
func DefaultAPIKeyEnv ¶
DefaultAPIKeyEnv returns the default environment variable name for a provider.
Types ¶
type AgentConfig ¶
AgentConfig contains agent identification settings.
type Config ¶
type Config struct {
Agent AgentConfig `toml:"agent"`
LLM LLMConfig `toml:"llm"` // Default LLM settings
SmallLLM LLMConfig `toml:"small_llm"` // Fast/cheap model for summarization
Profiles map[string]Profile `toml:"profiles"` // Capability profiles
Web WebConfig `toml:"web"`
Telemetry TelemetryConfig `toml:"telemetry"`
Storage StorageConfig `toml:"storage"` // Persistent storage settings
MCP MCPConfig `toml:"mcp"` // MCP tool servers
Skills SkillsConfig `toml:"skills"` // Agent Skills
Security SecurityConfig `toml:"security"` // Security framework
Timeouts TimeoutsConfig `toml:"timeouts"` // Network operation timeouts
Embedding EmbeddingConfig `toml:"embedding"` // Embedding provider for resume vectors
Service ServiceConfig `toml:"service"` // Service agent settings (for `agent serve`)
}
Config represents the agent configuration.
func LoadDefault ¶
LoadDefault loads configuration from agent.toml in the current directory.
func (*Config) GetAPIKey ¶
GetAPIKey returns the API key from the configured environment variable. If api_key_env is not set, uses the default env var for the provider.
func (*Config) GetGatewayToken ¶
GetGatewayToken returns the gateway token from the configured environment variable.
func (*Config) GetProfile ¶
GetProfile returns the LLM config for a capability profile. Falls back to default LLM config if profile not found.
func (*Config) GetProfileAPIKey ¶
GetProfileAPIKey returns the API key for a specific profile.
type EmbeddingConfig ¶
type EmbeddingConfig struct {
// Provider name: "openai", "google", "openai-compat", "litellm", "none"
Provider string `toml:"provider"`
// Model name (e.g., "text-embedding-3-small", "text-embedding-004")
Model string `toml:"model"`
// APIKey for the embedding provider (or use credentials.toml)
APIKey string `toml:"api_key"`
// BaseURL for OpenAI-compatible endpoints (Ollama, LiteLLM, etc.)
BaseURL string `toml:"base_url"`
}
EmbeddingConfig holds embedding provider settings for resume vectors.
type LLMConfig ¶
type LLMConfig struct {
Provider string `toml:"provider"`
Model string `toml:"model"`
APIKeyEnv string `toml:"api_key_env"`
MaxTokens int `toml:"max_tokens"`
BaseURL string `toml:"base_url"` // Custom API endpoint (OpenRouter, LiteLLM, Ollama, LMStudio)
Thinking string `toml:"thinking"` // Thinking level: auto|off|low|medium|high
MaxRetries int `toml:"max_retries"` // Max retry attempts (default 5)
RetryBackoff string `toml:"retry_backoff"` // Max backoff duration (default "60s")
}
LLMConfig contains LLM provider settings.
type MCPConfig ¶
type MCPConfig struct {
Servers map[string]MCPServerConfig `toml:"servers"`
}
MCPConfig contains MCP tool server configuration.
type MCPServerConfig ¶
type MCPServerConfig struct {
Command string `toml:"command"`
Args []string `toml:"args,omitempty"`
Env map[string]string `toml:"env,omitempty"`
DeniedTools []string `toml:"denied_tools,omitempty"` // Tools to exclude from LLM
}
MCPServerConfig configures an MCP server connection.
type Profile ¶
type Profile struct {
Provider string `toml:"provider"`
Model string `toml:"model"`
APIKeyEnv string `toml:"api_key_env"`
MaxTokens int `toml:"max_tokens"`
BaseURL string `toml:"base_url"` // Custom API endpoint
Thinking string `toml:"thinking"` // Thinking level: auto|off|low|medium|high
}
Profile represents a capability profile mapping to a specific LLM configuration.
type SecurityConfig ¶
type SecurityConfig struct {
Mode string `toml:"mode"` // "default" or "paranoid"
UserTrust string `toml:"user_trust"` // Trust level for user messages: "trusted", "vetted", "untrusted"
TriageLLM string `toml:"triage_llm"` // Profile name for Tier 2 triage (cheap/fast model)
}
SecurityConfig contains security framework configuration.
type ServiceConfig ¶
type ServiceConfig struct {
// BusURL is the message bus URL for swarm mode (e.g., "nats://localhost:4222").
// If empty, agent runs in local HTTP mode.
BusURL string `toml:"bus_url"`
// HTTPAddr is the HTTP server address for local mode (e.g., ":8080").
// Only used if BusURL is empty.
HTTPAddr string `toml:"http_addr"`
// QueueGroup for load balancing across multiple instances.
// Defaults to capability name if not set.
QueueGroup string `toml:"queue_group"`
// HeartbeatInterval between heartbeat messages.
// Default: "5s"
HeartbeatInterval string `toml:"heartbeat_interval"`
// DrainTimeout is how long to wait for current task during shutdown.
// Default: "30s"
DrainTimeout string `toml:"drain_timeout"`
// Capability override. If empty, capabilities are inferred from Agentfile.
Capability string `toml:"capability"`
}
ServiceConfig contains settings for service agent mode (`agent serve`).
type SkillsConfig ¶
type SkillsConfig struct {
Paths []string `toml:"paths"` // Directories to search for skills
}
SkillsConfig contains Agent Skills configuration.
type StorageConfig ¶
type StorageConfig struct {
Path string `toml:"path"` // Base directory for persistent data (BM25 memory)
}
StorageConfig contains storage settings.
type TelemetryConfig ¶
type TelemetryConfig struct {
Enabled bool `toml:"enabled"`
Endpoint string `toml:"endpoint"` // OTLP endpoint (e.g., localhost:4317)
Protocol string `toml:"protocol"` // grpc (default) or http
Insecure bool `toml:"insecure"` // Disable TLS (default false)
Headers map[string]string `toml:"headers"` // Auth headers (e.g., DD-API-KEY, x-honeycomb-team)
}
TelemetryConfig contains telemetry settings.
type TimeoutsConfig ¶
type TimeoutsConfig struct {
MCP int `toml:"mcp"` // MCP tool call timeout in seconds (default 60)
WebSearch int `toml:"web_search"` // web_search timeout in seconds (default 30)
WebFetch int `toml:"web_fetch"` // web_fetch timeout in seconds (default 60)
}
TimeoutsConfig contains timeout settings for network operations.