Documentation
¶
Index ¶
- Constants
- Variables
- func AddModelPrefix(backend ModelBackend, modelID string) string
- func AppendCacheTTLTimestamp(meta *PortalMetadata)
- func BuildDebounceKey(roomID id.RoomID, sender id.UserID) string
- func CombineDebounceEntries(entries []DebounceEntry) (string, int)
- func EnqueueReactionFeedback(roomID id.RoomID, feedback ReactionFeedback)
- func EstimateTokens(messages []openai.ChatCompletionMessageParamUnion, model string) (int, error)
- func ExtractBeeperPreviews(previews []*PreviewWithImage) []*event.BeeperLinkPreview
- func ExtractURLs(text string, maxURLs int) []string
- func FallbackReasoningLevel(current string) string
- func FormatPreviewsForContext(previews []*event.BeeperLinkPreview, maxChars int) string
- func FormatProxyError(proxyErr *ProxyError) string
- func FormatReactionFeedback(feedback []ReactionFeedback) string
- func FormatUserFacingError(err error) string
- func FromAgentDefinitionContent(content *AgentDefinitionContent) *agents.AgentDefinition
- func GetModelDisplayName(modelID string) string
- func GetPDFEngineFromContext(ctx context.Context) string
- func HasValidPrefix(modelID string) bool
- func IsAuthError(err error) bool
- func IsBillingError(err error) bool
- func IsCacheTTLEligibleProvider(model string) bool
- func IsCompactionFailureError(err error) bool
- func IsGoogleModel(modelID string) bool
- func IsImageError(err error) bool
- func IsMissingToolCallInputError(err error) bool
- func IsModelNotFound(err error) bool
- func IsNoResponseChunksError(err error) bool
- func IsOverloadedError(err error) bool
- func IsRateLimitError(err error) bool
- func IsReasoningError(err error) bool
- func IsRoleOrderingError(err error) bool
- func IsServerError(err error) bool
- func IsTimeoutError(err error) bool
- func IsToolSchemaError(err error) bool
- func IsToolUniquenessError(err error) bool
- func IsToolUseIDFormatError(err error) bool
- func LimitHistoryTurns(prompt []openai.ChatCompletionMessageParamUnion, limit int) []openai.ChatCompletionMessageParamUnion
- func MakeMessageID(eventID id.EventID) networkid.MessageID
- func MakePDFPluginMiddleware(defaultEngine string) option.Middleware
- func MakeToolDedupMiddleware(log zerolog.Logger) option.Middleware
- func NewCallID() string
- func NewTurnID() string
- func ParseExistingLinkPreviews(rawContent map[string]any) []*event.BeeperLinkPreview
- func PreviewsToMapSlice(previews []*event.BeeperLinkPreview) []map[string]any
- func PruneContext(prompt []openai.ChatCompletionMessageParamUnion, config *PruningConfig, ...) []openai.ChatCompletionMessageParamUnion
- func RegisterBeforeCompactionHook(hook CompactionBeforeHook)
- func ResolveAlias(modelID string) string
- func SanitizeGoogleTurnOrdering(prompt []openai.ChatCompletionMessageParamUnion) []openai.ChatCompletionMessageParamUnion
- func SanitizeToolCallID(id string, mode string) string
- func ShouldDebounce(evt *event.Event, body string) bool
- func ShouldRefreshCacheTTL(meta *PortalMetadata) bool
- func StripEnvelope(text string) string
- func ToOpenAIChatTools(tools []ToolDefinition, log *zerolog.Logger) []openai.ChatCompletionToolUnionParam
- func ToOpenAIResponsesInput(messages []UnifiedMessage) responses.ResponseInputParam
- func ToOpenAITools(tools []ToolDefinition, strictMode ToolStrictMode, log *zerolog.Logger) []responses.ToolUnionParam
- func UploadPreviewImages(ctx context.Context, previews []*PreviewWithImage, intent bridgev2.MatrixAPI, ...) []*event.BeeperLinkPreview
- func ValidateGeminiTurns(prompt []openai.ChatCompletionMessageParamUnion) bool
- func WithBridgeToolContext(ctx context.Context, btc *BridgeToolContext) context.Context
- func WithPDFEngine(ctx context.Context, engine string) context.Context
- func WithTypingContext(ctx context.Context, typing *TypingContext) context.Context
- type AIClient
- func (oc *AIClient) BackgroundContext(ctx context.Context) context.Context
- func (oc *AIClient) BroadcastRoomState(ctx context.Context, portal *bridgev2.Portal) error
- func (oc *AIClient) CleanupPortal(ctx context.Context, portal *bridgev2.Portal, reason string)
- func (oc *AIClient) Connect(ctx context.Context)
- func (oc *AIClient) DefaultAgentID() string
- func (oc *AIClient) Disconnect()
- func (oc *AIClient) DownloadAndEncodeMedia(ctx context.Context, mediaURL string, file *event.EncryptedFileInfo, maxMB int) (string, string, error)
- func (oc *AIClient) EmitOpenCodeStreamEvent(ctx context.Context, portal *bridgev2.Portal, ...)
- func (oc *AIClient) FetchMessages(ctx context.Context, params bridgev2.FetchMessagesParams) (*bridgev2.FetchMessagesResponse, error)
- func (oc *AIClient) FinishOpenCodeStream(turnID string)
- func (oc *AIClient) GetCapabilities(ctx context.Context, portal *bridgev2.Portal) *event.RoomFeatures
- func (oc *AIClient) GetChatInfo(ctx context.Context, portal *bridgev2.Portal) (*bridgev2.ChatInfo, error)
- func (oc *AIClient) GetContactList(ctx context.Context) ([]*bridgev2.ResolveIdentifierResponse, error)
- func (oc *AIClient) GetUserInfo(ctx context.Context, ghost *bridgev2.Ghost) (*bridgev2.UserInfo, error)
- func (oc *AIClient) HandleMatrixDeleteChat(ctx context.Context, msg *bridgev2.MatrixDeleteChat) error
- func (oc *AIClient) HandleMatrixDisappearingTimer(ctx context.Context, msg *bridgev2.MatrixDisappearingTimer) (bool, error)
- func (oc *AIClient) HandleMatrixEdit(ctx context.Context, edit *bridgev2.MatrixEdit) error
- func (oc *AIClient) HandleMatrixMessage(ctx context.Context, msg *bridgev2.MatrixMessage) (*bridgev2.MatrixMessageResponse, error)
- func (oc *AIClient) HandleMatrixMessageRemove(ctx context.Context, msg *bridgev2.MatrixMessageRemove) error
- func (oc *AIClient) HandleMatrixReaction(ctx context.Context, msg *bridgev2.MatrixReaction) (*database.Reaction, error)
- func (oc *AIClient) HandleMatrixReactionRemove(ctx context.Context, msg *bridgev2.MatrixReactionRemove) error
- func (oc *AIClient) HandleMatrixTyping(ctx context.Context, typing *bridgev2.MatrixTyping) error
- func (oc *AIClient) HumanUserID(loginID networkid.UserLoginID) networkid.UserID
- func (oc *AIClient) IsLoggedIn() bool
- func (oc *AIClient) IsThisUser(ctx context.Context, userID networkid.UserID) bool
- func (oc *AIClient) Log() *zerolog.Logger
- func (oc *AIClient) Login() *bridgev2.UserLogin
- func (oc *AIClient) LogoutRemote(ctx context.Context)
- func (oc *AIClient) OpenCodeInstances() map[string]*opencodebridge.OpenCodeInstance
- func (oc *AIClient) PortalMeta(portal *bridgev2.Portal) *opencodebridge.PortalMeta
- func (oc *AIClient) PreHandleMatrixReaction(ctx context.Context, msg *bridgev2.MatrixReaction) (bridgev2.MatrixReactionPreResponse, error)
- func (oc *AIClient) ResolveIdentifier(ctx context.Context, identifier string, createChat bool) (*bridgev2.ResolveIdentifierResponse, error)
- func (oc *AIClient) RoomCapabilitiesEventType() event.Type
- func (oc *AIClient) RoomSettingsEventType() event.Type
- func (oc *AIClient) SaveOpenCodeInstances(ctx context.Context, instances map[string]*opencodebridge.OpenCodeInstance) error
- func (oc *AIClient) SavePortal(ctx context.Context, portal *bridgev2.Portal) error
- func (oc *AIClient) SearchUsers(ctx context.Context, query string) ([]*bridgev2.ResolveIdentifierResponse, error)
- func (oc *AIClient) SendPendingStatus(ctx context.Context, portal *bridgev2.Portal, evt *event.Event, msg string)
- func (oc *AIClient) SendSuccessStatus(ctx context.Context, portal *bridgev2.Portal, evt *event.Event)
- func (oc *AIClient) SendSystemNotice(ctx context.Context, portal *bridgev2.Portal, msg string)
- func (oc *AIClient) SenderForOpenCode(instanceID string, fromMe bool) bridgev2.EventSender
- func (oc *AIClient) SetPortalMeta(portal *bridgev2.Portal, meta *opencodebridge.PortalMeta)
- func (oc *AIClient) SetRoomName(ctx context.Context, portal *bridgev2.Portal, name string) error
- type AIErrorContent
- type AIErrorData
- type AIProvider
- type AckReactionGateParams
- type AckReactionScope
- type AgentConfig
- type AgentDefaultsConfig
- type AgentDefinitionContent
- type AgentEntryConfig
- type AgentHandoffContent
- type AgentHandoffData
- type AgentMemberContent
- type AgentState
- type AgentStoreAdapter
- func (s *AgentStoreAdapter) DeleteAgent(ctx context.Context, agentID string) error
- func (s *AgentStoreAdapter) GetAgentByID(ctx context.Context, agentID string) (*agents.AgentDefinition, error)
- func (s *AgentStoreAdapter) GetAgentForRoom(ctx context.Context, meta *PortalMetadata) (*agents.AgentDefinition, error)
- func (s *AgentStoreAdapter) ListAvailableTools(_ context.Context) ([]tools.ToolInfo, error)
- func (s *AgentStoreAdapter) ListModels(ctx context.Context) ([]agents.ModelInfo, error)
- func (s *AgentStoreAdapter) LoadAgents(ctx context.Context) (map[string]*agents.AgentDefinition, error)
- func (s *AgentStoreAdapter) SaveAgent(ctx context.Context, agent *agents.AgentDefinition) error
- type AgentsConfig
- type AgentsEventContent
- type Annotation
- type AnnotationSource
- type ApplyPatchToolsConfig
- type ApprovalInfo
- type AssistantTurnAI
- type AssistantTurnContent
- type AttachmentMetadata
- type BeeperConfig
- type BossStoreAdapter
- func (b *BossStoreAdapter) CreateRoom(ctx context.Context, room tools.RoomData) (string, error)
- func (b *BossStoreAdapter) DeleteAgent(ctx context.Context, agentID string) error
- func (b *BossStoreAdapter) ListAvailableTools(ctx context.Context) ([]tools.ToolInfo, error)
- func (b *BossStoreAdapter) ListModels(ctx context.Context) ([]tools.ModelData, error)
- func (b *BossStoreAdapter) ListRooms(ctx context.Context) ([]tools.RoomData, error)
- func (b *BossStoreAdapter) LoadAgents(ctx context.Context) (map[string]tools.AgentData, error)
- func (b *BossStoreAdapter) ModifyRoom(ctx context.Context, roomID string, updates tools.RoomData) error
- func (b *BossStoreAdapter) RunInternalCommand(ctx context.Context, roomID string, command string) (string, error)
- func (b *BossStoreAdapter) SaveAgent(ctx context.Context, agent tools.AgentData) error
- type BridgeConfig
- type BridgeToolContext
- type BuiltinAlwaysAllowRule
- type ChannelConfig
- type ChannelDefaultsConfig
- type ChannelHeartbeatVisibilityConfig
- type ChannelsConfig
- type CodexClient
- func (cc *CodexClient) Connect(ctx context.Context)
- func (cc *CodexClient) Disconnect()
- func (cc *CodexClient) GetCapabilities(ctx context.Context, portal *bridgev2.Portal) *event.RoomFeatures
- func (cc *CodexClient) GetChatInfo(ctx context.Context, portal *bridgev2.Portal) (*bridgev2.ChatInfo, error)
- func (cc *CodexClient) GetUserInfo(ctx context.Context, ghost *bridgev2.Ghost) (*bridgev2.UserInfo, error)
- func (cc *CodexClient) HandleMatrixDeleteChat(ctx context.Context, msg *bridgev2.MatrixDeleteChat) error
- func (cc *CodexClient) HandleMatrixMessage(ctx context.Context, msg *bridgev2.MatrixMessage) (*bridgev2.MatrixMessageResponse, error)
- func (cc *CodexClient) IsLoggedIn() bool
- func (cc *CodexClient) IsThisUser(ctx context.Context, userID networkid.UserID) bool
- func (cc *CodexClient) LogoutRemote(ctx context.Context)
- type CodexClientInfo
- type CodexConfig
- type CodexLogin
- type CollaborationInfo
- type CollaborationParticipant
- type CommandsConfig
- type CompactionAfterHook
- type CompactionBeforeHook
- type CompactionConfig
- type CompactionEvent
- type CompactionEventEmitter
- type CompactionEventType
- type CompactionHookContext
- type CompactionHookResult
- type CompactionHooks
- type CompactionResult
- type Compactor
- func (c *Compactor) CompactContext(ctx context.Context, sessionID string, ...) (*CompactionResult, []openai.ChatCompletionMessageParamUnion)
- func (c *Compactor) CompactOnOverflow(ctx context.Context, sessionID string, ...) (*CompactionResult, []openai.ChatCompletionMessageParamUnion, bool)
- func (c *Compactor) SetEventEmitter(emitter CompactionEventEmitter)
- func (c *Compactor) SetSummarizationModel(model string)
- type Config
- type ContentPart
- type ContentPartType
- type ContextLengthError
- type CronConfig
- type DebounceBuffer
- type DebounceEntry
- type Debouncer
- type DedupeCache
- type DesktopAPIInstance
- type DirectChatConfig
- type EffectiveSettings
- type EnvelopeFormatOptions
- type EventUsageInfo
- type FailoverReason
- type FetchConfig
- type FileAnnotation
- type GenerateParams
- type GenerateResponse
- type GeneratedFileRef
- type GenerationDetails
- type GenerationProgress
- type GenerationStatusContent
- type GhostMetadata
- type GravatarProfile
- type GravatarState
- type GroupChatConfig
- type HeartbeatActiveHoursConfig
- type HeartbeatConfig
- type HeartbeatEventPayload
- type HeartbeatIndicatorType
- type HeartbeatRunConfig
- type HeartbeatRunOutcome
- type HeartbeatRunner
- type HeartbeatState
- type HeartbeatWake
- type HeartbeatWakeHandler
- type ImageDimensionError
- type ImageGenerationMetadata
- type ImageSizeError
- type InboundConfig
- type InboundDebounceConfig
- type LinkPreviewConfig
- type LinkPreviewer
- func (lp *LinkPreviewer) FetchPreview(ctx context.Context, urlStr string) (*PreviewWithImage, error)
- func (lp *LinkPreviewer) FetchPreviews(ctx context.Context, urls []string) []*PreviewWithImage
- func (lp *LinkPreviewer) FetchPreviewsWithCitations(ctx context.Context, urls []string, citations []sourceCitation) []*PreviewWithImage
- func (lp *LinkPreviewer) PreviewFromCitation(ctx context.Context, urlStr string, c sourceCitation) *PreviewWithImage
- type MCPAlwaysAllowRule
- type MCPServerConfig
- type MCPToolsConfig
- type MatrixReactionSummary
- type MatrixRoomInfo
- type MatrixUserProfile
- type MediaToolsConfig
- type MediaUnderstandingAttachmentDecision
- type MediaUnderstandingAttachmentsConfig
- type MediaUnderstandingCapability
- type MediaUnderstandingConfig
- type MediaUnderstandingDecision
- type MediaUnderstandingDeepgramConfig
- type MediaUnderstandingKind
- type MediaUnderstandingModelConfig
- type MediaUnderstandingModelDecision
- type MediaUnderstandingOutput
- type MediaUnderstandingScopeConfig
- type MediaUnderstandingScopeMatch
- type MediaUnderstandingScopeRule
- type MemoryConfig
- type MemoryFactContent
- type MemoryFlushConfig
- type MemoryIndexEntry
- type MemorySearchBatchConfig
- type MemorySearchBatchStatus
- type MemorySearchCacheConfig
- type MemorySearchCacheStatus
- type MemorySearchChunkingConfig
- type MemorySearchConfig
- type MemorySearchExperimentalConfig
- type MemorySearchFTSStatus
- type MemorySearchHybridConfig
- type MemorySearchManager
- func (m *MemorySearchManager) Close()
- func (m *MemorySearchManager) ProbeEmbeddingAvailability(ctx context.Context) (bool, string)
- func (m *MemorySearchManager) ProbeVectorAvailability(ctx context.Context) bool
- func (m *MemorySearchManager) ReadFile(ctx context.Context, relPath string, from, lines *int) (map[string]any, error)
- func (m *MemorySearchManager) Search(ctx context.Context, query string, opts memory.SearchOptions) ([]memory.SearchResult, error)
- func (m *MemorySearchManager) Status() memory.ProviderStatus
- func (m *MemorySearchManager) StatusDetails(ctx context.Context) (*MemorySearchStatus, error)
- type MemorySearchQueryConfig
- type MemorySearchRemoteConfig
- type MemorySearchSessionSyncConfig
- type MemorySearchSourceCount
- type MemorySearchStatus
- type MemorySearchStoreConfig
- type MemorySearchSyncConfig
- type MemorySearchVectorConfig
- type MemorySearchVectorStatus
- type MessageMetadata
- type MessageRole
- type MessagesConfig
- type ModelAPI
- type ModelBackend
- type ModelCache
- type ModelCapabilities
- type ModelCapabilitiesEventContent
- type ModelCatalogEntry
- type ModelDefinitionConfig
- type ModelInfo
- type ModelProviderConfig
- type ModelsConfig
- type NexusToolsConfig
- type NonFallbackError
- type NormalizedLocation
- type OpenAIConnector
- func (oc *OpenAIConnector) CreateLogin(ctx context.Context, user *bridgev2.User, flowID string) (bridgev2.LoginProcess, error)
- func (oc *OpenAIConnector) FillPortalBridgeInfo(portal *bridgev2.Portal, content *event.BridgeEventContent)
- func (oc *OpenAIConnector) GetBridgeInfoVersion() (info, capabilities int)
- func (oc *OpenAIConnector) GetCapabilities() *bridgev2.NetworkGeneralCapabilities
- func (oc *OpenAIConnector) GetConfig() (example string, data any, upgrader configupgrade.Upgrader)
- func (oc *OpenAIConnector) GetDBMetaTypes() database.MetaTypes
- func (oc *OpenAIConnector) GetLoginFlows() []bridgev2.LoginFlow
- func (oc *OpenAIConnector) GetName() bridgev2.BridgeName
- func (oc *OpenAIConnector) Init(bridge *bridgev2.Bridge)
- func (oc *OpenAIConnector) LoadUserLogin(ctx context.Context, login *bridgev2.UserLogin) error
- func (oc *OpenAIConnector) SetMatrixCredentials(accessToken, homeserver string)
- func (oc *OpenAIConnector) Start(ctx context.Context) error
- func (oc *OpenAIConnector) Stop(ctx context.Context)
- type OpenAILogin
- type OpenAIProvider
- func NewOpenAIProviderWithBaseURL(apiKey, baseURL string, log zerolog.Logger) (*OpenAIProvider, error)
- func NewOpenAIProviderWithPDFPlugin(apiKey, baseURL, userID, pdfEngine string, headers map[string]string, ...) (*OpenAIProvider, error)
- func NewOpenAIProviderWithUserID(apiKey, baseURL, userID string, log zerolog.Logger) (*OpenAIProvider, error)
- func (o *OpenAIProvider) Client() openai.Client
- func (o *OpenAIProvider) Generate(ctx context.Context, params GenerateParams) (*GenerateResponse, error)
- func (o *OpenAIProvider) GenerateStream(ctx context.Context, params GenerateParams) (<-chan StreamEvent, error)
- func (o *OpenAIProvider) ListModels(ctx context.Context) ([]ModelInfo, error)
- func (o *OpenAIProvider) Name() string
- type OpenAIRemoteMessage
- func (m *OpenAIRemoteMessage) AddLogContext(c zerolog.Context) zerolog.Context
- func (m *OpenAIRemoteMessage) ConvertMessage(ctx context.Context, portal *bridgev2.Portal, intent bridgev2.MatrixAPI) (*bridgev2.ConvertedMessage, error)
- func (m *OpenAIRemoteMessage) GetID() networkid.MessageID
- func (m *OpenAIRemoteMessage) GetPortalKey() networkid.PortalKey
- func (m *OpenAIRemoteMessage) GetSender() bridgev2.EventSender
- func (m *OpenAIRemoteMessage) GetStreamOrder() int64
- func (m *OpenAIRemoteMessage) GetTimestamp() time.Time
- func (m *OpenAIRemoteMessage) GetTransactionID() networkid.TransactionID
- func (m *OpenAIRemoteMessage) GetType() bridgev2.RemoteEventType
- type OpenCodeConfig
- type OrchestrationConfig
- type PDFConfig
- type PDFPluginConfig
- type PortalInitOpts
- type PortalMetadata
- type PreDeltaError
- type PreviewWithImage
- type ProviderBraveConfig
- type ProviderConfig
- type ProviderDirectConfig
- type ProviderExaConfig
- type ProviderOpenRouterConfig
- type ProviderPerplexityConfig
- type ProvidersConfig
- type ProvisioningAPI
- type ProxyError
- type ProxyErrorResponse
- type PruningConfig
- type QueueConfig
- type QueueDirective
- type QueueDropPolicy
- type QueueInlineOptions
- type QueueMode
- type QueueSettings
- type ReactionFeedback
- type ReactionQueue
- type ReasoningEffortOption
- type ReplyTarget
- type ReqSetDefaults
- type ResolvedHeartbeatVisibility
- type ResponseDirectives
- type ResponsePrefixContext
- type ResultStatus
- type RoomCapabilitiesEventContent
- type RoomSettingsEventContent
- type SearchConfig
- type ServiceConfig
- type ServiceConfigMap
- type ServiceTokens
- type SessionConfig
- type SettingExplanation
- type SettingSource
- type StatusDisplay
- type StepBoundaryContent
- type StepDisplay
- type StreamContentType
- type StreamCursor
- type StreamDeltaContent
- type StreamEvent
- type StreamEventType
- type StreamingConfig
- type SystemEvent
- type ThinkingContent
- type TimingInfo
- type ToolApprovalDecision
- type ToolApprovalDecisionCodex
- type ToolApprovalKind
- type ToolApprovalsConfig
- type ToolApprovalsRuntimeConfig
- type ToolArtifact
- type ToolCallContent
- type ToolCallData
- type ToolCallMetadata
- type ToolCallResult
- type ToolDefinition
- type ToolDisplay
- type ToolInfo
- type ToolOutputPreview
- type ToolProgressContent
- type ToolProgressDetails
- type ToolProvidersConfig
- type ToolResultContent
- type ToolResultData
- type ToolResultDisplay
- type ToolStatus
- type ToolStrictMode
- type ToolType
- type TurnCancelledContent
- type TurnStatus
- type TypingContext
- type TypingController
- type TypingControllerOptions
- type TypingMode
- type TypingSignaler
- type UnifiedMessage
- type UsageInfo
- type UserDefaults
- type UserLoginMetadata
- type VFSToolsConfig
Constants ¶
const ( BuilderRoomSlug = "builder" BuilderRoomName = "Manage AI Chats" )
Builder room constants
const ( ToolNameCalculator = toolspec.CalculatorName ToolNameWebSearch = toolspec.WebSearchName )
Tool name constants
const ( AIMaxTextLength = 100000 AIEditMaxAge = 24 * time.Hour )
AI bridge capability constants
const ( // DefaultDedupeTTL is the time-to-live for deduplication entries (20 minutes like clawdbot) DefaultDedupeTTL = 20 * time.Minute // DefaultDedupeMaxSize is the maximum number of entries in the cache DefaultDedupeMaxSize = 5000 )
const ( AIRateLimited status.BridgeStateErrorCode = "ai-rate-limited" AIAuthFailed status.BridgeStateErrorCode = "ai-auth-failed" AIContextTooLong status.BridgeStateErrorCode = "ai-context-too-long" AIModelNotFound status.BridgeStateErrorCode = "ai-model-not-found" AIProviderError status.BridgeStateErrorCode = "ai-provider-error" AIBillingError status.BridgeStateErrorCode = "ai-billing-error" AIOverloaded status.BridgeStateErrorCode = "ai-overloaded" AITimeout status.BridgeStateErrorCode = "ai-timeout" AIImageError status.BridgeStateErrorCode = "ai-image-error" )
Bridge state error codes for AI-specific errors
const ( // Retryable errors ErrorContextTooLong = "context_too_long" ErrorContentFilter = "content_filter" ErrorToolFailed = "tool_failed" ErrorToolTimeout = "tool_timeout" // Non-retryable errors ErrorCancelled = "cancelled" ErrorInvalidInput = "invalid_input" )
const ( ToolWebSearch = "web_search" ToolFunctionCalling = "function_calling" )
Tool constants for model capabilities
const ( RelReplace = matrixevents.RelReplace RelReference = matrixevents.RelReference RelThread = matrixevents.RelThread RelInReplyTo = matrixevents.RelInReplyTo )
Relation types
const ( BeeperAIKey = matrixevents.BeeperAIKey BeeperAIToolCallKey = matrixevents.BeeperAIToolCallKey BeeperAIToolResultKey = matrixevents.BeeperAIToolResultKey )
Content field keys
const ( ProviderBeeper = "beeper" // Beeper's OpenRouter proxy ProviderOpenAI = "openai" // Direct OpenAI API ProviderOpenRouter = "openrouter" // Direct OpenRouter API ProviderMagicProxy = "magic_proxy" // Magic Proxy (OpenRouter-compatible) ProviderCodex = "codex" // Local Codex app-server (stdio JSON-RPC) FlowCustom = "custom" // Custom login flow (provider resolved during login) )
Provider constants - all use OpenAI SDK with different base URLs
const ( DefaultModelOpenAI = "openai/gpt-5.2" // OpenRouter-compatible backends (OpenRouter + Magic Proxy) should default to Opus. DefaultModelOpenRouter = "anthropic/claude-opus-4.6" DefaultModelBeeper = "anthropic/claude-opus-4.6" )
Default models for each provider
const ( DefaultQueueDebounceMs = 1000 DefaultQueueCap = 20 )
const ( ToolNameMemorySearch = toolspec.MemorySearchName ToolNameMemoryGet = toolspec.MemoryGetName ToolNameGravatarFetch = toolspec.GravatarFetchName ToolNameGravatarSet = toolspec.GravatarSetName ToolNameBeeperDocs = toolspec.BeeperDocsName ToolNameBeeperSendFeedback = toolspec.BeeperSendFeedbackName ToolNameRead = toolspec.ReadName ToolNameApplyPatch = toolspec.ApplyPatchName ToolNameWrite = toolspec.WriteName ToolNameEdit = toolspec.EditName )
Memory tool names (matching OpenClaw interface)
const DefaultDebounceMs = 0
DefaultDebounceMs is the default debounce delay in milliseconds.
const DefaultGeminiImageModel = "gemini-3-pro-image-preview"
DefaultGeminiImageModel is the default direct Gemini image model.
const DefaultImageModel = "google/gemini-3-pro-image-preview"
DefaultImageModel is the default model for image generation.
const DefaultOpenAIImageModel = "gpt-image-1"
DefaultOpenAIImageModel is the default direct OpenAI image model.
const DefaultQueueDrop = QueueDropSummarize
const DefaultQueueMode = QueueModeCollect
const ImageResultPrefix = "IMAGE:"
ImageResultPrefix is the prefix used to identify image results that need media sending.
const ImagesResultPrefix = "IMAGES:"
ImagesResultPrefix is the prefix used to identify multi-image results.
const SilentReplyToken = "NO_REPLY"
SilentReplyToken is the token the agent uses to indicate no response is needed. Matches clawdbot/OpenClaw's SILENT_REPLY_TOKEN.
const TTSResultPrefix = "AUDIO:"
TTSResultPrefix is the prefix used to identify TTS results that need audio sending.
const ToolNameBetterWebSearch = "better_web_search"
const ToolNameCron = toolspec.CronName
ToolNameCron is the name of the cron tool.
const ToolNameImage = toolspec.ImageName
ToolNameImage is the OpenClaw-compatible image analysis tool.
const ToolNameImageGenerate = toolspec.ImageGenerateName
ToolNameImageGenerate is the image generation tool (non-OpenClaw).
const ToolNameMessage = toolspec.MessageName
ToolNameMessage is the name of the message tool.
const ToolNameSessionStatus = toolspec.SessionStatusName
ToolNameSessionStatus is the name of the session status tool.
const ToolNameTTS = toolspec.TTSName
ToolNameTTS is the name of the text-to-speech tool.
const ToolNameWebFetch = toolspec.WebFetchName
ToolNameWebFetch is the name of the web fetch tool.
Variables ¶
var ( ErrApprovalMissingID = errors.New("missing approval id") ErrApprovalMissingRoom = errors.New("missing room id") ErrApprovalOnlyOwner = errors.New("only the owner can approve") ErrApprovalUnknown = errors.New("unknown or expired approval id") ErrApprovalWrongRoom = errors.New("approval id does not belong to this room") ErrApprovalExpired = errors.New("approval expired") ErrApprovalAlreadyHandled = errors.New("approval already resolved") )
Sentinel errors for approval resolution, so callers can map them to UI/toasts without parsing error strings.
var ( ErrAPIKeyRequired = bridgev2.RespError{ ErrCode: "IO.AI_BRIDGE.API_KEY_REQUIRED", Err: "Enter an API key.", StatusCode: http.StatusBadRequest, } ErrBaseURLRequired = bridgev2.RespError{ ErrCode: "IO.AI_BRIDGE.BASE_URL_REQUIRED", Err: "Enter a base URL.", StatusCode: http.StatusBadRequest, } ErrOpenAIOrOpenRouterRequired = bridgev2.RespError{ ErrCode: "IO.AI_BRIDGE.OPENAI_OR_OPENROUTER_REQUIRED", Err: "Enter an OpenAI or OpenRouter API key.", StatusCode: http.StatusBadRequest, } ErrAPIKeyInvalid = bridgev2.RespError{ ErrCode: "IO.AI_BRIDGE.INVALID_API_KEY", Err: "That API key is invalid.", StatusCode: http.StatusUnauthorized, } ErrCode: "IO.AI_BRIDGE.PROVIDER_UNAVAILABLE", Err: "The AI provider is unavailable.", StatusCode: http.StatusServiceUnavailable, } ErrContextLengthExceeded = bridgev2.RespError{ ErrCode: "IO.AI_BRIDGE.CONTEXT_LENGTH_EXCEEDED", Err: "Message is too long. Some earlier messages were trimmed.", StatusCode: http.StatusRequestEntityTooLarge, } ErrUnsupportedMediaType = bridgev2.RespError{ ErrCode: "IO.AI_BRIDGE.UNSUPPORTED_MEDIA_TYPE", Err: "This file type isn't supported by the current model.", StatusCode: http.StatusUnsupportedMediaType, } ErrModelNotFound = bridgev2.RespError{ ErrCode: "IO.AI_BRIDGE.MODEL_NOT_FOUND", Err: "That model isn't available.", StatusCode: http.StatusNotFound, } )
Pre-defined bridgev2.RespError constants for consistent error responses
var AIErrorEventType = matrixevents.AIErrorEventType
AIErrorEventType represents AI generation errors that are part of conversation
var AgentHandoffEventType = matrixevents.AgentHandoffEventType
AgentHandoffEventType represents a handoff between agents
var AgentsEventType = matrixevents.AgentsEventType
AgentsEventType configures active agents in a room
var AssistantTurnEventType = matrixevents.AssistantTurnEventType
AssistantTurnEventType is the container event for an assistant's response
var BridgeStateHumanErrors = map[status.BridgeStateErrorCode]string{ AIRateLimited: "You're sending requests too quickly. Wait a moment, then try again.", AIAuthFailed: "Authentication failed. Check your API key or sign in again.", AIContextTooLong: "This conversation is too long for this model.", AIModelNotFound: "That model isn't available.", AIProviderError: "The AI provider returned an error. Try again later.", AIBillingError: "There's a billing issue with the AI provider. Check your account or credits.", AIOverloaded: "The AI service is busy right now. Try again in a moment.", AITimeout: "The request timed out. Try again.", AIImageError: "That image is too large or has unsupported dimensions for this model.", }
BridgeStateHumanErrors provides human-readable messages for AI bridge error codes
var CommandActivation = registerAICommand(commandregistry.Definition{ Name: "activation", Description: "Set group activation policy (mention|always)", Args: "<mention|always>", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnActivation, })
CommandActivation handles the !ai activation command.
var CommandAgent = registerAICommand(commandregistry.Definition{ Name: "agent", Description: "Get or set the agent for this chat", Args: "[_agent id_]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnAgent, })
CommandAgent handles the !ai agent command
var CommandAgents = registerAICommand(commandregistry.Definition{ Name: "agents", Description: "List available agents", Section: HelpSectionAI, RequiresLogin: true, Handler: fnAgents, })
CommandAgents handles the !ai agents command
var CommandApprove = registerAICommand(commandregistry.Definition{ Name: "approve", Description: "Resolve a pending approval request", Args: "<approvalId> <allow|always|deny> [reason]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnApprove, })
CommandApprove handles the !ai approve command.
var CommandClay = registerAICommand(commandregistry.Definition{ Name: "clay", Description: "Quick setup for Clay MCP (Nexus)", Args: "[connect|status|disconnect|remove|token]", Section: HelpSectionAI, RequiresLogin: true, Handler: fnClayCommand, })
CommandClay provides a shortcut for Clay/Nexus MCP bootstrap.
var CommandCommands = registerAICommand(commandregistry.Definition{ Name: "commands", Aliases: []string{"cmds"}, Description: "Show AI command groups and recommended command forms", Section: HelpSectionAI, RequiresPortal: false, RequiresLogin: true, Handler: fnCommands, })
var CommandConfig = registerAICommand(commandregistry.Definition{ Name: "config", Description: "Show current chat configuration", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnConfig, })
CommandConfig handles the !ai config command
var CommandContext = registerAICommand(commandregistry.Definition{ Name: "context", Description: "Get or set context message limit (1-100)", Args: "[_count_]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnContext, })
CommandContext handles the !ai context command
var CommandCreateAgent = registerAICommand(commandregistry.Definition{ Name: "create-agent", Description: "Create a new custom agent", Args: "<id> <name> [model] [system prompt...]", Section: HelpSectionAI, RequiresLogin: true, Handler: fnCreateAgent, })
CommandCreateAgent handles the !ai create-agent command
var CommandCron = registerAICommand(commandregistry.Definition{ Name: "cron", Description: "Inspect/manage cron jobs", Args: "[status|list|runs|run|remove] ...", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnCron, })
CommandCron handles the !ai cron command.
var CommandDebounce = registerAICommand(commandregistry.Definition{ Name: "debounce", Description: "Get or set message debounce delay (ms), 'off' to disable, 'default' to reset", Args: "[_delay_|off|default]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnDebounce, })
CommandDebounce handles the !ai debounce command
var CommandDeleteAgent = registerAICommand(commandregistry.Definition{ Name: "delete-agent", Description: "Delete a custom agent", Args: "<id>", Section: HelpSectionAI, RequiresLogin: true, Handler: fnDeleteAgent, })
CommandDeleteAgent handles the !ai delete-agent command
var CommandDesktopAPI = registerAICommand(commandregistry.Definition{ Name: "desktop-api", Description: "Manage Beeper Desktop API instances", Args: "<add|list|remove> [args]", Section: HelpSectionAI, RequiresPortal: false, RequiresLogin: true, Handler: fnDesktopAPI, })
CommandDesktopAPI handles the !ai desktop-api command
var CommandElevated = registerAICommand(commandregistry.Definition{ Name: "elevated", Aliases: []string{"elev"}, Description: "Get or set elevated access (off|on|ask|full)", Args: "[level]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnElevated, })
CommandElevated handles the !ai elevated command.
var CommandFork = registerAICommand(commandregistry.Definition{ Name: "fork", Description: "Fork conversation to a new chat", Args: "[_event_id_]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnFork, })
CommandFork handles the !ai fork command
var CommandGravatar = registerAICommand(commandregistry.Definition{ Name: "gravatar", Description: "Fetch or set the Gravatar profile for this login", Args: "[fetch|set] [email]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnGravatar, })
CommandGravatar handles the !ai gravatar command
var CommandLastHeartbeat = registerAICommand(commandregistry.Definition{ Name: "last-heartbeat", Description: "Show the last heartbeat event for this login", Section: HelpSectionAI, RequiresPortal: false, RequiresLogin: true, Handler: fnLastHeartbeat, })
CommandLastHeartbeat handles the !ai last-heartbeat command.
var CommandMCP = registerAICommand(commandregistry.Definition{ Name: "mcp", Description: "Manage MCP servers for this login", Args: "<add|remove|connect|disconnect|list> [args]", Section: HelpSectionAI, RequiresLogin: true, Handler: fnMCPCommand, })
CommandMCP handles the !ai mcp command.
var CommandManage = registerAICommand(commandregistry.Definition{ Name: "manage", Description: "Open the agent management room (for creating custom agents)", Section: HelpSectionAI, RequiresLogin: true, Handler: fnManage, })
CommandManage handles the !ai manage command. This creates or opens the Builder room for advanced users to manage custom agents.
var CommandMemory = registerAICommand(commandregistry.Definition{ Name: "memory", Description: "Inspect and edit memory files/index", Args: "<status|reindex|search|get|set|append> [...]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnMemory, })
CommandMemory handles the !ai memory command
var CommandMode = registerAICommand(commandregistry.Definition{ Name: "mode", Description: "Set conversation mode (messages|responses)", Args: "[_mode_]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnMode, })
CommandMode handles the !ai mode command
var CommandModel = registerAICommand(commandregistry.Definition{ Name: "model", Description: "Get or set the AI model for this chat", Args: "[_model name_]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnModel, })
CommandModel handles the !ai model command
var CommandModels = registerAICommand(commandregistry.Definition{ Name: "models", Description: "List all available models", Section: HelpSectionAI, RequiresLogin: true, Handler: fnModels, })
CommandModels handles the !ai models command
var CommandNew = registerAICommand(commandregistry.Definition{ Name: "new", Description: "Create a new chat of the same type (agent or model)", Args: "[<directory>] | [agent <agent_id>]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnNew, })
CommandNew handles the !ai new command
var CommandOpenCode = registerAICommand(commandregistry.Definition{ Name: "opencode", Aliases: []string{"openconnect"}, Description: "Manage OpenCode connections", Args: "<add|list|new|remove> [args]", Section: HelpSectionAI, RequiresLogin: true, Handler: fnOpenCodeCommand, })
CommandOpenCode handles the !ai opencode command.
var CommandPlayground = registerAICommand(commandregistry.Definition{ Name: "playground", Aliases: []string{"sandbox"}, Description: "Manage AI chat rooms (new, list)", Args: "<new [model] | list>", Section: HelpSectionAI, RequiresLogin: true, Handler: fnPlayground, })
CommandPlayground handles the !ai playground command with sub-commands.
var CommandQueue = registerAICommand(commandregistry.Definition{ Name: "queue", Description: "Inspect or configure the message queue", Args: "[status|reset|<mode>] [debounce:<dur>] [cap:<n>] [drop:<old|new|summarize>]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnQueue, })
CommandQueue handles the !ai queue command.
var CommandReasoning = registerAICommand(commandregistry.Definition{ Name: "reasoning", Description: "Get or set reasoning visibility/effort (off|on|low|medium|high|xhigh)", Args: "[level]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnReasoning, })
CommandReasoning handles the !ai reasoning command.
var CommandRegenerate = registerAICommand(commandregistry.Definition{ Name: "regenerate", Description: "Regenerate the last AI response", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnRegenerate, })
CommandRegenerate handles the !ai regenerate command
var CommandReset = registerAICommand(commandregistry.Definition{ Name: "reset", Description: "Start a new session/thread in this room", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnReset, })
CommandReset handles the !ai reset command.
var CommandSend = registerAICommand(commandregistry.Definition{ Name: "send", Description: "Allow/deny sending messages (on|off|inherit)", Args: "<on|off|inherit>", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnSend, })
CommandSend handles the !ai send command.
var CommandStatus = registerAICommand(commandregistry.Definition{ Name: "status", Description: "Show current session status", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnStatus, })
CommandStatus handles the !ai status command.
var CommandStop = registerAICommand(commandregistry.Definition{ Name: "stop", Aliases: []string{"abort", "interrupt"}, Description: "Abort the current run and clear the pending queue", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnStop, })
CommandStop handles the !ai stop command.
var CommandSystemPrompt = registerAICommand(commandregistry.Definition{ Name: "system-prompt", Aliases: []string{"prompt", "system"}, Description: "Get or set the system prompt (shows full constructed prompt)", Args: "[_text_]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnSystemPrompt, })
CommandSystemPrompt handles the !ai system-prompt command
var CommandTemp = registerAICommand(commandregistry.Definition{ Name: "temp", Aliases: []string{"temperature"}, Description: "Get or set the temperature (0-2)", Args: "[_value_]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnTemp, })
CommandTemp handles the !ai temp command
var CommandThink = registerAICommand(commandregistry.Definition{ Name: "think", Description: "Get or set thinking level (off|minimal|low|medium|high|xhigh)", Args: "[level]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnThink, })
CommandThink handles the !ai think command.
var CommandTimezone = registerAICommand(commandregistry.Definition{ Name: "timezone", Aliases: []string{"tz"}, Description: "Get or set your timezone for all chats (IANA name)", Args: "[_timezone_|reset]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnTimezone, })
CommandTimezone handles the !ai timezone command
var CommandTitle = registerAICommand(commandregistry.Definition{ Name: "title", Aliases: []string{"retitle"}, Description: "Regenerate the chat room title", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnTitle, })
CommandTitle handles the !ai title command
var CommandTokens = registerAICommand(commandregistry.Definition{ Name: "tokens", Aliases: []string{"maxtokens"}, Description: "Get or set max completion tokens (1-16384)", Args: "[_count_]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnTokens, })
CommandTokens handles the !ai tokens command
var CommandTools = registerAICommand(commandregistry.Definition{ Name: "tools", Description: "Enable/disable tools", Args: "[on|off] [_tool_]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnTools, })
CommandTools handles the !ai tools command
var CommandTyping = registerAICommand(commandregistry.Definition{ Name: "typing", Description: "Get or set typing indicator behavior for this chat", Args: "[never|instant|thinking|message|off|reset|interval <seconds>]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnTyping, })
CommandTyping handles the !ai typing command
var CommandVerbose = registerAICommand(commandregistry.Definition{ Name: "verbose", Aliases: []string{"v"}, Description: "Get or set verbosity (off|on|full)", Args: "[level]", Section: HelpSectionAI, RequiresPortal: true, RequiresLogin: true, Handler: fnVerbose, })
CommandVerbose handles the !ai verbose command.
var CommandWhoami = registerAICommand(commandregistry.Definition{ Name: "whoami", Aliases: []string{"id"}, Description: "Show your Matrix user ID", Section: HelpSectionAI, RequiresPortal: false, RequiresLogin: false, Handler: fnWhoami, })
CommandWhoami handles the !ai whoami command.
var CompactionStatusEventType = matrixevents.CompactionStatusEventType
CompactionStatusEventType notifies clients about context compaction
var GenerationStatusEventType = matrixevents.GenerationStatusEventType
GenerationStatusEventType provides rich status updates during generation
var HelpSectionAI = commands.HelpSection{
Name: "AI Chat",
Order: 30,
}
HelpSectionAI is the help section for AI-related commands
var ModelCapabilitiesEventType = matrixevents.ModelCapabilitiesEventType
ModelCapabilitiesEventType is the Matrix state event type for broadcasting available models
var ModelManifest = struct { Models map[string]ModelInfo Aliases map[string]string }{ Models: map[string]ModelInfo{ "anthropic/claude-3.7-sonnet": { ID: "anthropic/claude-3.7-sonnet", Name: "Claude 3.7 Sonnet", Provider: "openrouter", API: "openai-completions", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: true, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: true, ContextWindow: 200000, MaxOutputTokens: 64000, AvailableTools: []string{ToolWebSearch, ToolFunctionCalling}, }, "anthropic/claude-3.7-sonnet:thinking": { ID: "anthropic/claude-3.7-sonnet:thinking", Name: "Claude 3.7 Sonnet (Reasoning)", Provider: "openrouter", API: "openai-completions", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: true, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: true, ContextWindow: 200000, MaxOutputTokens: 64000, AvailableTools: []string{ToolWebSearch, ToolFunctionCalling}, }, "anthropic/claude-haiku-4.5": { ID: "anthropic/claude-haiku-4.5", Name: "Claude Haiku 4.5", Provider: "openrouter", API: "openai-completions", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: true, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: true, ContextWindow: 200000, MaxOutputTokens: 64000, AvailableTools: []string{ToolWebSearch, ToolFunctionCalling}, }, "anthropic/claude-opus-4": { ID: "anthropic/claude-opus-4", Name: "Claude Opus 4", Provider: "openrouter", API: "openai-completions", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: true, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: true, ContextWindow: 200000, MaxOutputTokens: 32000, AvailableTools: []string{ToolWebSearch, ToolFunctionCalling}, }, "anthropic/claude-opus-4.1": { ID: "anthropic/claude-opus-4.1", Name: "Claude 4.1 Opus", Provider: "openrouter", API: "openai-completions", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: true, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: true, ContextWindow: 200000, MaxOutputTokens: 32000, AvailableTools: []string{ToolWebSearch, ToolFunctionCalling}, }, "anthropic/claude-opus-4.5": { ID: "anthropic/claude-opus-4.5", Name: "Claude Opus 4.5", Provider: "openrouter", API: "openai-completions", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: true, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: true, ContextWindow: 200000, MaxOutputTokens: 64000, AvailableTools: []string{ToolWebSearch, ToolFunctionCalling}, }, "anthropic/claude-opus-4.6": { ID: "anthropic/claude-opus-4.6", Name: "Claude Opus 4.6", Provider: "openrouter", API: "openai-completions", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: true, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: true, ContextWindow: 1000000, MaxOutputTokens: 128000, AvailableTools: []string{ToolWebSearch, ToolFunctionCalling}, }, "anthropic/claude-sonnet-4": { ID: "anthropic/claude-sonnet-4", Name: "Claude 4 Sonnet", Provider: "openrouter", API: "openai-completions", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: true, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: true, ContextWindow: 1000000, MaxOutputTokens: 64000, AvailableTools: []string{ToolWebSearch, ToolFunctionCalling}, }, "anthropic/claude-sonnet-4.5": { ID: "anthropic/claude-sonnet-4.5", Name: "Claude Sonnet 4.5", Provider: "openrouter", API: "openai-completions", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: true, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: true, ContextWindow: 1000000, MaxOutputTokens: 64000, AvailableTools: []string{ToolWebSearch, ToolFunctionCalling}, }, "deepseek/deepseek-chat-v3-0324": { ID: "deepseek/deepseek-chat-v3-0324", Name: "DeepSeek v3 (0324)", Provider: "openrouter", API: "openai-completions", SupportsVision: false, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 163840, MaxOutputTokens: 65536, AvailableTools: []string{ToolFunctionCalling}, }, "deepseek/deepseek-chat-v3.1": { ID: "deepseek/deepseek-chat-v3.1", Name: "DeepSeek v3.1", Provider: "openrouter", API: "openai-completions", SupportsVision: false, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 32768, MaxOutputTokens: 7168, AvailableTools: []string{ToolFunctionCalling}, }, "deepseek/deepseek-r1": { ID: "deepseek/deepseek-r1", Name: "DeepSeek R1 (Original)", Provider: "openrouter", API: "openai-completions", SupportsVision: false, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 64000, MaxOutputTokens: 16000, AvailableTools: []string{ToolFunctionCalling}, }, "deepseek/deepseek-r1-0528": { ID: "deepseek/deepseek-r1-0528", Name: "DeepSeek R1 (0528)", Provider: "openrouter", API: "openai-completions", SupportsVision: false, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 163840, MaxOutputTokens: 65536, AvailableTools: []string{ToolFunctionCalling}, }, "deepseek/deepseek-r1-distill-qwen-32b": { ID: "deepseek/deepseek-r1-distill-qwen-32b", Name: "DeepSeek R1 (Qwen Distilled)", Provider: "openrouter", API: "openai-completions", SupportsVision: false, SupportsToolCalling: false, SupportsReasoning: true, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 32768, MaxOutputTokens: 32768, AvailableTools: []string{}, }, "deepseek/deepseek-v3.1-terminus": { ID: "deepseek/deepseek-v3.1-terminus", Name: "DeepSeek v3.1 Terminus", Provider: "openrouter", API: "openai-completions", SupportsVision: false, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 163840, MaxOutputTokens: 0, AvailableTools: []string{ToolFunctionCalling}, }, "deepseek/deepseek-v3.2": { ID: "deepseek/deepseek-v3.2", Name: "DeepSeek v3.2", Provider: "openrouter", API: "openai-completions", SupportsVision: false, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 163840, MaxOutputTokens: 65536, AvailableTools: []string{ToolFunctionCalling}, }, "google/gemini-2.0-flash-001": { ID: "google/gemini-2.0-flash-001", Name: "Gemini 2.0 Flash", Provider: "openrouter", API: "openai-completions", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: false, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: true, SupportsVideo: true, SupportsPDF: true, ContextWindow: 1048576, MaxOutputTokens: 8192, AvailableTools: []string{ToolFunctionCalling}, }, "google/gemini-2.0-flash-lite-001": { ID: "google/gemini-2.0-flash-lite-001", Name: "Gemini 2.0 Flash Lite", Provider: "openrouter", API: "openai-completions", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: false, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: true, SupportsVideo: true, SupportsPDF: true, ContextWindow: 1048576, MaxOutputTokens: 8192, AvailableTools: []string{ToolFunctionCalling}, }, "google/gemini-2.5-flash": { ID: "google/gemini-2.5-flash", Name: "Gemini 2.5 Flash", Provider: "openrouter", API: "openai-completions", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: true, SupportsVideo: true, SupportsPDF: true, ContextWindow: 1048576, MaxOutputTokens: 65535, AvailableTools: []string{ToolFunctionCalling}, }, "google/gemini-2.5-flash-image": { ID: "google/gemini-2.5-flash-image", Name: "Nano Banana", Provider: "openrouter", API: "openai-completions", SupportsVision: true, SupportsToolCalling: false, SupportsReasoning: false, SupportsWebSearch: false, SupportsImageGen: true, SupportsAudio: false, SupportsVideo: false, SupportsPDF: true, ContextWindow: 32768, MaxOutputTokens: 32768, AvailableTools: []string{}, }, "google/gemini-2.5-flash-lite": { ID: "google/gemini-2.5-flash-lite", Name: "Gemini 2.5 Flash Lite", Provider: "openrouter", API: "openai-completions", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: true, SupportsVideo: true, SupportsPDF: true, ContextWindow: 1048576, MaxOutputTokens: 65535, AvailableTools: []string{ToolFunctionCalling}, }, "google/gemini-2.5-pro": { ID: "google/gemini-2.5-pro", Name: "Gemini 2.5 Pro", Provider: "openrouter", API: "openai-completions", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: true, SupportsVideo: true, SupportsPDF: true, ContextWindow: 1048576, MaxOutputTokens: 65536, AvailableTools: []string{ToolFunctionCalling}, }, "google/gemini-3-flash-preview": { ID: "google/gemini-3-flash-preview", Name: "Gemini 3 Flash", Provider: "openrouter", API: "openai-completions", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: true, SupportsVideo: true, SupportsPDF: true, ContextWindow: 1048576, MaxOutputTokens: 65535, AvailableTools: []string{ToolFunctionCalling}, }, "google/gemini-3-pro-image-preview": { ID: "google/gemini-3-pro-image-preview", Name: "Nano Banana Pro", Provider: "openrouter", API: "openai-completions", SupportsVision: true, SupportsToolCalling: false, SupportsReasoning: true, SupportsWebSearch: false, SupportsImageGen: true, SupportsAudio: false, SupportsVideo: false, SupportsPDF: true, ContextWindow: 65536, MaxOutputTokens: 32768, AvailableTools: []string{}, }, "google/gemini-3-pro-preview": { ID: "google/gemini-3-pro-preview", Name: "Gemini 3 Pro", Provider: "openrouter", API: "openai-completions", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: true, SupportsVideo: true, SupportsPDF: true, ContextWindow: 1048576, MaxOutputTokens: 65536, AvailableTools: []string{ToolFunctionCalling}, }, "meta-llama/llama-3.3-70b-instruct": { ID: "meta-llama/llama-3.3-70b-instruct", Name: "Llama 3.3 70B", Provider: "openrouter", API: "openai-completions", SupportsVision: false, SupportsToolCalling: true, SupportsReasoning: false, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 131072, MaxOutputTokens: 16384, AvailableTools: []string{ToolFunctionCalling}, }, "meta-llama/llama-4-maverick": { ID: "meta-llama/llama-4-maverick", Name: "Llama 4 Maverick", Provider: "openrouter", API: "openai-completions", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: false, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 1048576, MaxOutputTokens: 16384, AvailableTools: []string{ToolFunctionCalling}, }, "meta-llama/llama-4-scout": { ID: "meta-llama/llama-4-scout", Name: "Llama 4 Scout", Provider: "openrouter", API: "openai-completions", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: false, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 327680, MaxOutputTokens: 16384, AvailableTools: []string{ToolFunctionCalling}, }, "minimax/minimax-m2": { ID: "minimax/minimax-m2", Name: "MiniMax M2", Provider: "openrouter", API: "openai-completions", SupportsVision: false, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 196608, MaxOutputTokens: 65536, AvailableTools: []string{ToolFunctionCalling}, }, "minimax/minimax-m2.1": { ID: "minimax/minimax-m2.1", Name: "MiniMax M2.1", Provider: "openrouter", API: "openai-completions", SupportsVision: false, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 196608, MaxOutputTokens: 0, AvailableTools: []string{ToolFunctionCalling}, }, "minimax/minimax-m2.5": { ID: "minimax/minimax-m2.5", Name: "MiniMax M2.5", Provider: "openrouter", API: "openai-completions", SupportsVision: false, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 196608, MaxOutputTokens: 0, AvailableTools: []string{ToolFunctionCalling}, }, "moonshotai/kimi-k2": { ID: "moonshotai/kimi-k2", Name: "Kimi K2 (0711)", Provider: "openrouter", API: "openai-completions", SupportsVision: false, SupportsToolCalling: true, SupportsReasoning: false, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 131072, MaxOutputTokens: 0, AvailableTools: []string{ToolFunctionCalling}, }, "moonshotai/kimi-k2-0905": { ID: "moonshotai/kimi-k2-0905", Name: "Kimi K2 (0905)", Provider: "openrouter", API: "openai-completions", SupportsVision: false, SupportsToolCalling: true, SupportsReasoning: false, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 131072, MaxOutputTokens: 0, AvailableTools: []string{ToolFunctionCalling}, }, "moonshotai/kimi-k2-thinking": { ID: "moonshotai/kimi-k2-thinking", Name: "Kimi K2 (Thinking)", Provider: "openrouter", API: "openai-completions", SupportsVision: false, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 262144, MaxOutputTokens: 65535, AvailableTools: []string{ToolFunctionCalling}, }, "moonshotai/kimi-k2.5": { ID: "moonshotai/kimi-k2.5", Name: "Kimi K2.5", Provider: "openrouter", API: "openai-completions", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 262144, MaxOutputTokens: 262144, AvailableTools: []string{ToolFunctionCalling}, }, "openai/gpt-4.1": { ID: "openai/gpt-4.1", Name: "GPT 4.1", Provider: "openrouter", API: "openai-responses", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: false, SupportsWebSearch: true, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: true, ContextWindow: 1047576, MaxOutputTokens: 32768, AvailableTools: []string{ToolWebSearch, ToolFunctionCalling}, }, "openai/gpt-4.1-mini": { ID: "openai/gpt-4.1-mini", Name: "GPT 4.1 Mini", Provider: "openrouter", API: "openai-responses", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: false, SupportsWebSearch: true, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: true, ContextWindow: 1047576, MaxOutputTokens: 32768, AvailableTools: []string{ToolWebSearch, ToolFunctionCalling}, }, "openai/gpt-4.1-nano": { ID: "openai/gpt-4.1-nano", Name: "GPT 4.1 Nano", Provider: "openrouter", API: "openai-responses", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: false, SupportsWebSearch: true, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: true, ContextWindow: 1047576, MaxOutputTokens: 32768, AvailableTools: []string{ToolWebSearch, ToolFunctionCalling}, }, "openai/gpt-4o-mini": { ID: "openai/gpt-4o-mini", Name: "GPT 4o Mini", Provider: "openrouter", API: "openai-responses", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: false, SupportsWebSearch: true, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: true, ContextWindow: 128000, MaxOutputTokens: 16384, AvailableTools: []string{ToolWebSearch, ToolFunctionCalling}, }, "openai/gpt-5": { ID: "openai/gpt-5", Name: "GPT-5", Provider: "openrouter", API: "openai-responses", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: true, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: true, ContextWindow: 400000, MaxOutputTokens: 128000, AvailableTools: []string{ToolWebSearch, ToolFunctionCalling}, }, "openai/gpt-5-image": { ID: "openai/gpt-5-image", Name: "GPT ImageGen 1.5", Provider: "openrouter", API: "openai-responses", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: true, SupportsImageGen: true, SupportsAudio: false, SupportsVideo: false, SupportsPDF: true, ContextWindow: 400000, MaxOutputTokens: 128000, AvailableTools: []string{ToolWebSearch, ToolFunctionCalling}, }, "openai/gpt-5-image-mini": { ID: "openai/gpt-5-image-mini", Name: "GPT ImageGen", Provider: "openrouter", API: "openai-responses", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: true, SupportsImageGen: true, SupportsAudio: false, SupportsVideo: false, SupportsPDF: true, ContextWindow: 400000, MaxOutputTokens: 128000, AvailableTools: []string{ToolWebSearch, ToolFunctionCalling}, }, "openai/gpt-5-mini": { ID: "openai/gpt-5-mini", Name: "GPT 5 Mini", Provider: "openrouter", API: "openai-responses", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: true, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: true, ContextWindow: 400000, MaxOutputTokens: 128000, AvailableTools: []string{ToolWebSearch, ToolFunctionCalling}, }, "openai/gpt-5-nano": { ID: "openai/gpt-5-nano", Name: "GPT 5 Nano", Provider: "openrouter", API: "openai-responses", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: true, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: true, ContextWindow: 400000, MaxOutputTokens: 128000, AvailableTools: []string{ToolWebSearch, ToolFunctionCalling}, }, "openai/gpt-5.1": { ID: "openai/gpt-5.1", Name: "GPT 5.1", Provider: "openrouter", API: "openai-responses", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: true, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: true, ContextWindow: 400000, MaxOutputTokens: 128000, AvailableTools: []string{ToolWebSearch, ToolFunctionCalling}, }, "openai/gpt-5.2": { ID: "openai/gpt-5.2", Name: "GPT 5.2", Provider: "openrouter", API: "openai-responses", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: true, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: true, ContextWindow: 400000, MaxOutputTokens: 128000, AvailableTools: []string{ToolWebSearch, ToolFunctionCalling}, }, "openai/gpt-5.2-pro": { ID: "openai/gpt-5.2-pro", Name: "GPT 5.2 Pro", Provider: "openrouter", API: "openai-responses", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: true, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: true, ContextWindow: 400000, MaxOutputTokens: 128000, AvailableTools: []string{ToolWebSearch, ToolFunctionCalling}, }, "openai/gpt-oss-120b": { ID: "openai/gpt-oss-120b", Name: "OSS 120B", Provider: "openrouter", API: "openai-responses", SupportsVision: false, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 131072, MaxOutputTokens: 0, AvailableTools: []string{ToolFunctionCalling}, }, "openai/gpt-oss-20b": { ID: "openai/gpt-oss-20b", Name: "OSS 20B", Provider: "openrouter", API: "openai-responses", SupportsVision: false, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 131072, MaxOutputTokens: 0, AvailableTools: []string{ToolFunctionCalling}, }, "openai/o3": { ID: "openai/o3", Name: "O3", Provider: "openrouter", API: "openai-responses", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: true, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: true, ContextWindow: 200000, MaxOutputTokens: 100000, AvailableTools: []string{ToolWebSearch, ToolFunctionCalling}, }, "openai/o3-mini": { ID: "openai/o3-mini", Name: "O3 Mini", Provider: "openrouter", API: "openai-responses", SupportsVision: false, SupportsToolCalling: true, SupportsReasoning: false, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: true, ContextWindow: 200000, MaxOutputTokens: 100000, AvailableTools: []string{ToolFunctionCalling}, }, "openai/o3-pro": { ID: "openai/o3-pro", Name: "O3 Pro", Provider: "openrouter", API: "openai-responses", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: true, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: true, ContextWindow: 200000, MaxOutputTokens: 100000, AvailableTools: []string{ToolWebSearch, ToolFunctionCalling}, }, "openai/o4-mini": { ID: "openai/o4-mini", Name: "O4 Mini", Provider: "openrouter", API: "openai-responses", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: true, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: true, ContextWindow: 200000, MaxOutputTokens: 100000, AvailableTools: []string{ToolWebSearch, ToolFunctionCalling}, }, "qwen/qwen-2.5-coder-32b-instruct": { ID: "qwen/qwen-2.5-coder-32b-instruct", Name: "Qwen 2.5 32B", Provider: "openrouter", API: "openai-completions", SupportsVision: false, SupportsToolCalling: false, SupportsReasoning: false, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 32768, MaxOutputTokens: 32768, AvailableTools: []string{}, }, "qwen/qwen3-235b-a22b": { ID: "qwen/qwen3-235b-a22b", Name: "Qwen 3 235B", Provider: "openrouter", API: "openai-completions", SupportsVision: false, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 40960, MaxOutputTokens: 40960, AvailableTools: []string{ToolFunctionCalling}, }, "qwen/qwen3-235b-a22b-thinking-2507": { ID: "qwen/qwen3-235b-a22b-thinking-2507", Name: "Qwen 3 235B (Thinking)", Provider: "openrouter", API: "openai-completions", SupportsVision: false, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: true, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 131072, MaxOutputTokens: 0, AvailableTools: []string{ToolWebSearch, ToolFunctionCalling}, }, "qwen/qwen3-32b": { ID: "qwen/qwen3-32b", Name: "Qwen 3 32B", Provider: "openrouter", API: "openai-completions", SupportsVision: false, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 40960, MaxOutputTokens: 40960, AvailableTools: []string{ToolFunctionCalling}, }, "qwen/qwen3-coder": { ID: "qwen/qwen3-coder", Name: "Qwen 3 Coder", Provider: "openrouter", API: "openai-completions", SupportsVision: false, SupportsToolCalling: true, SupportsReasoning: false, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 262144, MaxOutputTokens: 0, AvailableTools: []string{ToolFunctionCalling}, }, "x-ai/grok-3": { ID: "x-ai/grok-3", Name: "Grok 3", Provider: "openrouter", API: "openai-completions", SupportsVision: false, SupportsToolCalling: true, SupportsReasoning: false, SupportsWebSearch: true, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 131072, MaxOutputTokens: 0, AvailableTools: []string{ToolWebSearch, ToolFunctionCalling}, }, "x-ai/grok-3-mini": { ID: "x-ai/grok-3-mini", Name: "Grok 3 Mini", Provider: "openrouter", API: "openai-completions", SupportsVision: false, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: true, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 131072, MaxOutputTokens: 0, AvailableTools: []string{ToolWebSearch, ToolFunctionCalling}, }, "x-ai/grok-4": { ID: "x-ai/grok-4", Name: "Grok 4", Provider: "openrouter", API: "openai-completions", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: true, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 256000, MaxOutputTokens: 0, AvailableTools: []string{ToolWebSearch, ToolFunctionCalling}, }, "x-ai/grok-4-fast": { ID: "x-ai/grok-4-fast", Name: "Grok 4 Fast", Provider: "openrouter", API: "openai-completions", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: true, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 2000000, MaxOutputTokens: 30000, AvailableTools: []string{ToolWebSearch, ToolFunctionCalling}, }, "x-ai/grok-4.1-fast": { ID: "x-ai/grok-4.1-fast", Name: "Grok 4.1 Fast", Provider: "openrouter", API: "openai-completions", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: true, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 2000000, MaxOutputTokens: 30000, AvailableTools: []string{ToolWebSearch, ToolFunctionCalling}, }, "z-ai/glm-4.5": { ID: "z-ai/glm-4.5", Name: "GLM 4.5", Provider: "openrouter", API: "openai-completions", SupportsVision: false, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 131072, MaxOutputTokens: 65536, AvailableTools: []string{ToolFunctionCalling}, }, "z-ai/glm-4.5-air": { ID: "z-ai/glm-4.5-air", Name: "GLM 4.5 Air", Provider: "openrouter", API: "openai-completions", SupportsVision: false, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 131072, MaxOutputTokens: 98304, AvailableTools: []string{ToolFunctionCalling}, }, "z-ai/glm-4.5v": { ID: "z-ai/glm-4.5v", Name: "GLM 4.5V", Provider: "openrouter", API: "openai-completions", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 65536, MaxOutputTokens: 16384, AvailableTools: []string{ToolFunctionCalling}, }, "z-ai/glm-4.6": { ID: "z-ai/glm-4.6", Name: "GLM 4.6", Provider: "openrouter", API: "openai-completions", SupportsVision: false, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 202752, MaxOutputTokens: 65536, AvailableTools: []string{ToolFunctionCalling}, }, "z-ai/glm-4.6v": { ID: "z-ai/glm-4.6v", Name: "GLM 4.6V", Provider: "openrouter", API: "openai-completions", SupportsVision: true, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: true, SupportsPDF: false, ContextWindow: 131072, MaxOutputTokens: 131072, AvailableTools: []string{ToolFunctionCalling}, }, "z-ai/glm-4.7": { ID: "z-ai/glm-4.7", Name: "GLM 4.7", Provider: "openrouter", API: "openai-completions", SupportsVision: false, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 202752, MaxOutputTokens: 65535, AvailableTools: []string{ToolFunctionCalling}, }, "z-ai/glm-5": { ID: "z-ai/glm-5", Name: "GLM 5", Provider: "openrouter", API: "openai-completions", SupportsVision: false, SupportsToolCalling: true, SupportsReasoning: true, SupportsWebSearch: false, SupportsImageGen: false, SupportsAudio: false, SupportsVideo: false, SupportsPDF: false, ContextWindow: 204800, MaxOutputTokens: 131072, AvailableTools: []string{ToolFunctionCalling}, }, }, Aliases: map[string]string{ "beeper/default": "anthropic/claude-opus-4.6", "beeper/fast": "openai/gpt-5-mini", "beeper/reasoning": "openai/gpt-5.2", "beeper/smart": "openai/gpt-5.2", }, }
ModelManifest contains all model definitions and aliases. Models are fetched from OpenRouter API, aliases are defined in the generator config.
var OpenClawAliases = map[string]string{
"opus": "anthropic/claude-opus-4.6",
"sonnet": "anthropic/claude-sonnet-4.5",
"haiku": "anthropic/claude-haiku-4.5",
"gpt": "openai/gpt-5.2",
"gpt-mini": "openai/gpt-5-mini",
"gemini": "google/gemini-3-pro-preview",
"gemini-flash": "google/gemini-3-flash-preview",
"anthropic/claude-opus-4": "anthropic/claude-opus-4.6",
"anthropic/claude-sonnet-4": "anthropic/claude-sonnet-4.5",
"anthropic/claude-haiku-4": "anthropic/claude-haiku-4.5",
"anthropic/claude-opus-4-5": "anthropic/claude-opus-4.5",
"anthropic/claude-sonnet-4-5": "anthropic/claude-sonnet-4.5",
"anthropic/claude-haiku-4-5": "anthropic/claude-haiku-4.5",
"zai/glm-4.7": "z-ai/glm-4.7",
"minimax/MiniMax-M2.1": "minimax/minimax-m2.1",
"minimax/MiniMax-M2": "minimax/minimax-m2",
"moonshot/kimi-k2.5": "moonshotai/kimi-k2.5",
"moonshot/kimi-k2-0905": "moonshotai/kimi-k2-0905",
"moonshot/kimi-k2-0905-preview": "moonshotai/kimi-k2-0905",
"moonshot/kimi-k2-thinking": "moonshotai/kimi-k2-thinking",
}
OpenClawAliases provides OpenClaw-compatible shorthands and model ID aliases. These resolve to canonical model IDs in the local manifest.
var RoomCapabilitiesEventType = matrixevents.RoomCapabilitiesEventType
RoomCapabilitiesEventType is the Matrix state event type for bridge-controlled capabilities Protected by power levels (100) so only the bridge bot can modify
var RoomSettingsEventType = matrixevents.RoomSettingsEventType
RoomSettingsEventType is the Matrix state event type for user-editable settings Normal power level (0) so users can modify
var StepBoundaryEventType = matrixevents.StepBoundaryEventType
StepBoundaryEventType represents multi-step boundaries within a turn
var StreamDeltaEventType = matrixevents.StreamDeltaEventType
StreamDeltaEventType is the custom event type for streaming token updates (ephemeral).
var StreamEventMessageType = matrixevents.StreamEventMessageType
StreamEventMessageType is the unified event type for AI streaming updates (ephemeral).
var ToolCallEventType = matrixevents.ToolCallEventType
ToolCallEventType represents a tool invocation
var ToolProgressEventType = matrixevents.ToolProgressEventType
ToolProgressEventType provides tool execution progress updates
var ToolResultEventType = matrixevents.ToolResultEventType
ToolResultEventType represents a tool execution result
var TurnCancelledEventType = matrixevents.TurnCancelledEventType
TurnCancelledEventType represents a cancelled turn
Functions ¶
func AddModelPrefix ¶
func AddModelPrefix(backend ModelBackend, modelID string) string
AddModelPrefix adds a prefix to a model ID if it doesn't have one
func AppendCacheTTLTimestamp ¶
func AppendCacheTTLTimestamp(meta *PortalMetadata)
AppendCacheTTLTimestamp records the current time as the last cache-eligible request timestamp on the portal metadata.
func BuildDebounceKey ¶
BuildDebounceKey creates a key for debouncing: room+sender.
func CombineDebounceEntries ¶
func CombineDebounceEntries(entries []DebounceEntry) (string, int)
CombineDebounceEntries combines multiple entries into a single body. Returns the combined body and the count of combined messages.
func EnqueueReactionFeedback ¶
func EnqueueReactionFeedback(roomID id.RoomID, feedback ReactionFeedback)
EnqueueReactionFeedback adds reaction feedback for a room.
func EstimateTokens ¶
func EstimateTokens(messages []openai.ChatCompletionMessageParamUnion, model string) (int, error)
EstimateTokens counts tokens for a list of chat messages Based on OpenAI's cookbook: https://github.com/openai/openai-cookbook
func ExtractBeeperPreviews ¶
func ExtractBeeperPreviews(previews []*PreviewWithImage) []*event.BeeperLinkPreview
ExtractBeeperPreviews extracts just the BeeperLinkPreview from PreviewWithImage slice.
func ExtractURLs ¶
ExtractURLs extracts URLs from text, returning up to maxURLs unique URLs. It strips markdown link syntax to avoid detecting the same URL twice.
func FallbackReasoningLevel ¶
FallbackReasoningLevel returns a lower reasoning level to try when the current one fails. Returns empty string if there's no fallback available (already at "none" or unknown level).
func FormatPreviewsForContext ¶
func FormatPreviewsForContext(previews []*event.BeeperLinkPreview, maxChars int) string
FormatPreviewsForContext formats link previews for injection into LLM context.
func FormatProxyError ¶
func FormatProxyError(proxyErr *ProxyError) string
FormatProxyError formats a proxy error for user display
func FormatReactionFeedback ¶
func FormatReactionFeedback(feedback []ReactionFeedback) string
FormatReactionFeedback formats reaction feedback as context for the AI. Keep the string stable and channel-specific so the model can reason about where the reaction happened.
func FormatUserFacingError ¶
FormatUserFacingError transforms an API error into a user-friendly message. Returns a sanitized message suitable for display to end users.
func FromAgentDefinitionContent ¶
func FromAgentDefinitionContent(content *AgentDefinitionContent) *agents.AgentDefinition
FromAgentDefinitionContent converts a Matrix event form to AgentDefinition.
func GetModelDisplayName ¶
GetModelDisplayName returns a human-readable display name for a model.
func GetPDFEngineFromContext ¶
GetPDFEngineFromContext retrieves the PDF engine override from context
func HasValidPrefix ¶
HasValidPrefix checks if a model ID has a valid backend prefix
func IsAuthError ¶
IsAuthError checks if the error is an authentication error. Checks openai.Error status codes first, then falls back to string pattern matching.
func IsBillingError ¶
IsBillingError checks if the error is a billing/payment error (402)
func IsCacheTTLEligibleProvider ¶
IsCacheTTLEligibleProvider returns true if the model is served by Anthropic (directly or via OpenRouter) and thus eligible for prompt caching.
func IsCompactionFailureError ¶
IsCompactionFailureError checks if a context-length error originated from compaction itself (e.g., the summarisation prompt overflowed). This lets callers avoid re-attempting compaction when compaction was the thing that failed.
func IsGoogleModel ¶
IsGoogleModel returns true if the model ID looks like a Google/Gemini model.
func IsImageError ¶
IsImageError checks if the error is related to image size or dimensions
func IsMissingToolCallInputError ¶
IsMissingToolCallInputError checks if the error indicates a corrupted session where tool call inputs are missing (e.g., from interrupted streaming).
func IsModelNotFound ¶
IsModelNotFound checks if the error is a model not found (404) error
func IsNoResponseChunksError ¶
IsNoResponseChunksError checks if the Responses streaming returned no chunks.
func IsOverloadedError ¶
IsOverloadedError checks if the error indicates the service is overloaded
func IsRateLimitError ¶
IsRateLimitError checks if the error is a rate limit (429) error
func IsReasoningError ¶
IsReasoningError checks if the error is related to unsupported reasoning/thinking levels
func IsRoleOrderingError ¶
IsRoleOrderingError checks if the error is related to message role ordering conflicts
func IsServerError ¶
IsServerError checks if the error is a server-side (5xx) error
func IsTimeoutError ¶
IsTimeoutError checks if the error is a timeout error
func IsToolSchemaError ¶
IsToolSchemaError checks if the error indicates a tool schema validation failure.
func IsToolUniquenessError ¶
IsToolUniquenessError checks if the error indicates duplicate tool names.
func IsToolUseIDFormatError ¶
IsToolUseIDFormatError checks if the error is caused by an invalid tool_use ID format (e.g., when IDs from one provider are replayed to another).
func LimitHistoryTurns ¶
func LimitHistoryTurns( prompt []openai.ChatCompletionMessageParamUnion, limit int, ) []openai.ChatCompletionMessageParamUnion
LimitHistoryTurns limits conversation history to the last N user turns (and their associated assistant responses and tool calls). This reduces token usage for long-running sessions. Returns the original prompt if limit is 0 or negative (unlimited).
func MakeMessageID ¶
MakeMessageID creates a message ID from a Matrix event ID
func MakePDFPluginMiddleware ¶
func MakePDFPluginMiddleware(defaultEngine string) option.Middleware
MakePDFPluginMiddleware creates middleware that injects the file-parser plugin for PDFs. The defaultEngine parameter is used as a fallback when no per-request engine is set in context. To set a per-request engine, use WithPDFEngine() to add it to the request context.
func MakeToolDedupMiddleware ¶
func MakeToolDedupMiddleware(log zerolog.Logger) option.Middleware
MakeToolDedupMiddleware removes duplicate tool names from outbound Responses requests.
func ParseExistingLinkPreviews ¶
func ParseExistingLinkPreviews(rawContent map[string]any) []*event.BeeperLinkPreview
ParseExistingLinkPreviews extracts link previews from a Matrix event's raw content.
func PreviewsToMapSlice ¶
func PreviewsToMapSlice(previews []*event.BeeperLinkPreview) []map[string]any
PreviewsToMapSlice converts BeeperLinkPreviews to a format suitable for JSON serialization.
func PruneContext ¶
func PruneContext( prompt []openai.ChatCompletionMessageParamUnion, config *PruningConfig, contextWindowTokens int, ) []openai.ChatCompletionMessageParamUnion
PruneContext prunes messages to fit within context window (OpenClaw algorithm) Phase 0: Limit history turns (if MaxHistoryTurns is set) Phase 1: Soft trim - truncate large tool results to head+tail Phase 2: Hard clear - replace old tool results with placeholder
func RegisterBeforeCompactionHook ¶
func RegisterBeforeCompactionHook(hook CompactionBeforeHook)
RegisterBeforeCompactionHook registers a hook to run before compaction
func ResolveAlias ¶
ResolveAlias returns the actual model ID for a given alias. If the input is not an alias, it returns the input unchanged.
func SanitizeGoogleTurnOrdering ¶
func SanitizeGoogleTurnOrdering(prompt []openai.ChatCompletionMessageParamUnion) []openai.ChatCompletionMessageParamUnion
SanitizeGoogleTurnOrdering fixes prompt ordering for Google models:
- Merges consecutive user messages
- Merges consecutive assistant messages
- Prepends a synthetic user turn if history starts with an assistant message
func SanitizeToolCallID ¶
SanitizeToolCallID cleans a tool call ID for provider compatibility.
Modes:
- "strict": strips all non-alphanumeric characters, preserves "call_" prefix
- "strict9": strips non-alphanumeric, truncates to 9 chars (some providers require short IDs)
If the ID is empty after sanitization, a new random call ID is generated.
func ShouldDebounce ¶
ShouldDebounce returns false for messages that shouldn't be debounced. Media, commands, and empty messages are processed immediately.
func ShouldRefreshCacheTTL ¶
func ShouldRefreshCacheTTL(meta *PortalMetadata) bool
ShouldRefreshCacheTTL returns true if the Anthropic prompt cache TTL window is about to expire (or has expired) and a cache-warming request should include a cache_control breakpoint.
func StripEnvelope ¶
StripEnvelope removes the [Channel Timestamp] envelope prefix from a message body. This is useful when replaying historical messages to the model — the envelope is informative for the current turn but noisy in history.
func ToOpenAIChatTools ¶
func ToOpenAIChatTools(tools []ToolDefinition, log *zerolog.Logger) []openai.ChatCompletionToolUnionParam
ToOpenAIChatTools converts tool definitions to OpenAI Chat Completions tool format.
func ToOpenAIResponsesInput ¶
func ToOpenAIResponsesInput(messages []UnifiedMessage) responses.ResponseInputParam
ToOpenAIResponsesInput converts unified messages to OpenAI Responses API format. Supports text + image/PDF inputs for user messages; audio/video are intentionally excluded (caller should fall back to Chat Completions for those).
func ToOpenAITools ¶
func ToOpenAITools(tools []ToolDefinition, strictMode ToolStrictMode, log *zerolog.Logger) []responses.ToolUnionParam
ToOpenAITools converts tool definitions to OpenAI Responses API format
func UploadPreviewImages ¶
func UploadPreviewImages(ctx context.Context, previews []*PreviewWithImage, intent bridgev2.MatrixAPI, roomID id.RoomID) []*event.BeeperLinkPreview
UploadPreviewImages uploads images from PreviewWithImage to Matrix and returns final BeeperLinkPreviews.
func ValidateGeminiTurns ¶
func ValidateGeminiTurns(prompt []openai.ChatCompletionMessageParamUnion) bool
ValidateGeminiTurns checks whether the prompt satisfies Google's strict user→assistant alternation requirement. Returns true if the prompt is valid.
func WithBridgeToolContext ¶
func WithBridgeToolContext(ctx context.Context, btc *BridgeToolContext) context.Context
WithBridgeToolContext adds bridge context to a context
func WithPDFEngine ¶
WithPDFEngine adds a PDF engine override to the context
func WithTypingContext ¶
func WithTypingContext(ctx context.Context, typing *TypingContext) context.Context
Types ¶
type AIClient ¶
AIClient handles communication with AI providers
func (*AIClient) BackgroundContext ¶
func (*AIClient) BroadcastRoomState ¶
BroadcastRoomState sends current room capabilities and settings to Matrix room state
func (*AIClient) CleanupPortal ¶
func (*AIClient) DefaultAgentID ¶
func (*AIClient) Disconnect ¶
func (oc *AIClient) Disconnect()
func (*AIClient) DownloadAndEncodeMedia ¶
func (*AIClient) EmitOpenCodeStreamEvent ¶
func (*AIClient) FetchMessages ¶
func (oc *AIClient) FetchMessages(ctx context.Context, params bridgev2.FetchMessagesParams) (*bridgev2.FetchMessagesResponse, error)
func (*AIClient) FinishOpenCodeStream ¶
func (*AIClient) GetCapabilities ¶
func (*AIClient) GetChatInfo ¶
func (*AIClient) GetContactList ¶
func (oc *AIClient) GetContactList(ctx context.Context) ([]*bridgev2.ResolveIdentifierResponse, error)
GetContactList returns a list of available AI agents and models as contacts
func (*AIClient) GetUserInfo ¶
func (*AIClient) HandleMatrixDeleteChat ¶
func (oc *AIClient) HandleMatrixDeleteChat(ctx context.Context, msg *bridgev2.MatrixDeleteChat) error
HandleMatrixDeleteChat deletes the remote OpenCode session when a chat is deleted.
func (*AIClient) HandleMatrixDisappearingTimer ¶
func (oc *AIClient) HandleMatrixDisappearingTimer(ctx context.Context, msg *bridgev2.MatrixDisappearingTimer) (bool, error)
HandleMatrixDisappearingTimer handles disappearing message timer changes from Matrix For AI bridge, we just update the portal's disappear field - the bridge framework handles the actual deletion
func (*AIClient) HandleMatrixEdit ¶
HandleMatrixEdit handles edits to previously sent messages
func (*AIClient) HandleMatrixMessage ¶
func (oc *AIClient) HandleMatrixMessage(ctx context.Context, msg *bridgev2.MatrixMessage) (*bridgev2.MatrixMessageResponse, error)
HandleMatrixMessage processes incoming Matrix messages and dispatches them to the AI
func (*AIClient) HandleMatrixMessageRemove ¶
func (oc *AIClient) HandleMatrixMessageRemove(ctx context.Context, msg *bridgev2.MatrixMessageRemove) error
HandleMatrixMessageRemove handles message deletions from Matrix For AI bridge, we just delete from our database - there's no "remote" to sync to
func (*AIClient) HandleMatrixReaction ¶
func (*AIClient) HandleMatrixReactionRemove ¶
func (*AIClient) HandleMatrixTyping ¶
HandleMatrixTyping tracks local user typing state for auto-greeting delays.
func (*AIClient) HumanUserID ¶
func (oc *AIClient) HumanUserID(loginID networkid.UserLoginID) networkid.UserID
func (*AIClient) IsLoggedIn ¶
func (*AIClient) IsThisUser ¶
func (*AIClient) LogoutRemote ¶
func (*AIClient) OpenCodeInstances ¶
func (oc *AIClient) OpenCodeInstances() map[string]*opencodebridge.OpenCodeInstance
func (*AIClient) PortalMeta ¶
func (oc *AIClient) PortalMeta(portal *bridgev2.Portal) *opencodebridge.PortalMeta
func (*AIClient) PreHandleMatrixReaction ¶
func (oc *AIClient) PreHandleMatrixReaction(ctx context.Context, msg *bridgev2.MatrixReaction) (bridgev2.MatrixReactionPreResponse, error)
func (*AIClient) ResolveIdentifier ¶
func (oc *AIClient) ResolveIdentifier(ctx context.Context, identifier string, createChat bool) (*bridgev2.ResolveIdentifierResponse, error)
ResolveIdentifier resolves an agent ID to a ghost and optionally creates a chat
func (*AIClient) RoomCapabilitiesEventType ¶
func (*AIClient) RoomSettingsEventType ¶
func (*AIClient) SaveOpenCodeInstances ¶
func (oc *AIClient) SaveOpenCodeInstances(ctx context.Context, instances map[string]*opencodebridge.OpenCodeInstance) error
func (*AIClient) SavePortal ¶
func (*AIClient) SearchUsers ¶
func (oc *AIClient) SearchUsers(ctx context.Context, query string) ([]*bridgev2.ResolveIdentifierResponse, error)
SearchUsers searches available AI agents by name/ID
func (*AIClient) SendPendingStatus ¶
func (*AIClient) SendSuccessStatus ¶
func (*AIClient) SendSystemNotice ¶
func (*AIClient) SenderForOpenCode ¶
func (oc *AIClient) SenderForOpenCode(instanceID string, fromMe bool) bridgev2.EventSender
func (*AIClient) SetPortalMeta ¶
func (oc *AIClient) SetPortalMeta(portal *bridgev2.Portal, meta *opencodebridge.PortalMeta)
type AIErrorContent ¶
type AIErrorContent struct {
Body string `json:"body"`
MsgType string `json:"msgtype"`
Error *AIErrorData `json:"com.beeper.ai.error"`
}
AIErrorContent represents an AI error timeline event
type AIErrorData ¶
type AIErrorData struct {
TurnID string `json:"turn_id,omitempty"`
AgentID string `json:"agent_id,omitempty"`
ErrorCode string `json:"error_code"`
ErrorMessage string `json:"error_message"`
Retryable bool `json:"retryable"`
Suggestion string `json:"suggestion,omitempty"`
}
AIErrorData contains error details
type AIProvider ¶
type AIProvider interface {
// Name returns the provider name (e.g., "openai", "openrouter")
Name() string
// GenerateStream generates a streaming response
GenerateStream(ctx context.Context, params GenerateParams) (<-chan StreamEvent, error)
// Generate generates a non-streaming response
Generate(ctx context.Context, params GenerateParams) (*GenerateResponse, error)
// ListModels returns available models for this provider
ListModels(ctx context.Context) ([]ModelInfo, error)
}
AIProvider defines a common interface for OpenAI-compatible AI providers
type AckReactionGateParams ¶
type AckReactionScope ¶
type AckReactionScope string
const ( AckScopeAll AckReactionScope = "all" AckScopeDirect AckReactionScope = "direct" AckScopeGroupAll AckReactionScope = "group-all" AckScopeGroupMention AckReactionScope = "group-mentions" AckScopeOff AckReactionScope = "off" AckScopeNone AckReactionScope = "none" )
type AgentConfig ¶
type AgentConfig struct {
AgentID string `json:"agent_id"`
Name string `json:"name"`
Model string `json:"model"`
UserID string `json:"user_id"` // Matrix user ID for this agent
Role string `json:"role"` // "primary", "specialist"
Description string `json:"description,omitempty"`
AvatarURL string `json:"avatar_url,omitempty"` // mxc:// URL
Triggers []string `json:"triggers,omitempty"` // e.g., ["@researcher", "/research"]
}
AgentConfig describes an AI agent
type AgentDefaultsConfig ¶
type AgentDefaultsConfig struct {
Subagents *agents.SubagentConfig `yaml:"subagents"`
SkipBootstrap bool `yaml:"skip_bootstrap"`
BootstrapMaxChars int `yaml:"bootstrap_max_chars"`
TimeoutSeconds int `yaml:"timeoutSeconds"`
SoulEvil *agents.SoulEvilConfig `yaml:"soul_evil"`
Heartbeat *HeartbeatConfig `yaml:"heartbeat"`
UserTimezone string `yaml:"userTimezone"`
EnvelopeTimezone string `yaml:"envelopeTimezone"` // local|utc|user|IANA
EnvelopeTimestamp string `yaml:"envelopeTimestamp"` // on|off
EnvelopeElapsed string `yaml:"envelopeElapsed"` // on|off
TypingMode string `yaml:"typingMode"` // never|instant|thinking|message
TypingIntervalSec *int `yaml:"typingIntervalSeconds"`
}
AgentDefaultsConfig defines default agent settings.
type AgentDefinitionContent ¶
type AgentDefinitionContent struct {
ID string `json:"id"`
Name string `json:"name"`
Description string `json:"description,omitempty"`
AvatarURL string `json:"avatar_url,omitempty"`
Model string `json:"model,omitempty"`
ModelFallback []string `json:"model_fallback,omitempty"`
SystemPrompt string `json:"system_prompt,omitempty"`
PromptMode string `json:"prompt_mode,omitempty"`
Tools *toolpolicy.ToolPolicyConfig `json:"tools,omitempty"`
Temperature float64 `json:"temperature,omitempty"`
ReasoningEffort string `json:"reasoning_effort,omitempty"`
IdentityName string `json:"identity_name,omitempty"`
IdentityPersona string `json:"identity_persona,omitempty"`
IsPreset bool `json:"is_preset,omitempty"`
MemorySearch *agents.MemorySearchConfig `json:"memory_search,omitempty"`
HeartbeatPrompt string `json:"heartbeat_prompt,omitempty"`
CreatedAt int64 `json:"created_at"`
UpdatedAt int64 `json:"updated_at"`
}
AgentDefinitionContent stores agent configuration in Matrix state events. This is the serialized form of agents.AgentDefinition for Matrix storage.
func ToAgentDefinitionContent ¶
func ToAgentDefinitionContent(agent *agents.AgentDefinition) *AgentDefinitionContent
ToAgentDefinitionContent converts an AgentDefinition to its Matrix event form.
type AgentEntryConfig ¶
type AgentEntryConfig struct {
ID string `yaml:"id"`
Heartbeat *HeartbeatConfig `yaml:"heartbeat"`
TypingMode string `yaml:"typingMode"` // never|instant|thinking|message
TypingIntervalSec *int `yaml:"typingIntervalSeconds"`
}
AgentEntryConfig defines per-agent overrides (OpenClaw-style).
type AgentHandoffContent ¶
type AgentHandoffContent struct {
Body string `json:"body"`
MsgType string `json:"msgtype"`
Handoff *AgentHandoffData `json:"com.beeper.ai.agent_handoff"`
}
AgentHandoffContent represents an agent handoff event
type AgentHandoffData ¶
type AgentHandoffData struct {
FromAgent string `json:"from_agent"`
ToAgent string `json:"to_agent"`
FromTurn string `json:"from_turn,omitempty"`
Reason string `json:"reason,omitempty"`
Context map[string]any `json:"context,omitempty"`
}
AgentHandoffData contains handoff details
type AgentMemberContent ¶
type AgentMemberContent struct {
Membership string `json:"membership"`
DisplayName string `json:"displayname,omitempty"`
AvatarURL string `json:"avatar_url,omitempty"`
Agent *AgentDefinitionContent `json:"com.beeper.ai.agent,omitempty"`
}
AgentMemberContent is stored in m.room.member events in the Builder room to persist agent definitions as Matrix state events.
type AgentState ¶
type AgentState struct {
AgentID string
TurnID string
Status string // pending, thinking, generating, tool_use, completed, failed, cancelled
StartedAt time.Time
Model string
ToolCalls []string // Event IDs of tool calls
ImageEvents []string // Event IDs of generated images
}
AgentState tracks the state of an active agent turn
type AgentStoreAdapter ¶
type AgentStoreAdapter struct {
// contains filtered or unexported fields
}
AgentStoreAdapter implements agents.AgentStore with UserLogin metadata as source of truth.
func NewAgentStoreAdapter ¶
func NewAgentStoreAdapter(client *AIClient) *AgentStoreAdapter
NewAgentStoreAdapter creates a new agent store adapter.
func (*AgentStoreAdapter) DeleteAgent ¶
func (s *AgentStoreAdapter) DeleteAgent(ctx context.Context, agentID string) error
DeleteAgent implements agents.AgentStore. It deletes a custom agent from UserLogin metadata.
func (*AgentStoreAdapter) GetAgentByID ¶
func (s *AgentStoreAdapter) GetAgentByID(ctx context.Context, agentID string) (*agents.AgentDefinition, error)
GetAgentByID looks up an agent by ID, returning preset or custom agents.
func (*AgentStoreAdapter) GetAgentForRoom ¶
func (s *AgentStoreAdapter) GetAgentForRoom(ctx context.Context, meta *PortalMetadata) (*agents.AgentDefinition, error)
GetAgentForRoom returns the agent assigned to a room. Falls back to the Quick Chatter if no specific agent is set.
func (*AgentStoreAdapter) ListAvailableTools ¶
ListAvailableTools implements agents.AgentStore.
func (*AgentStoreAdapter) ListModels ¶
ListModels implements agents.AgentStore.
func (*AgentStoreAdapter) LoadAgents ¶
func (s *AgentStoreAdapter) LoadAgents(ctx context.Context) (map[string]*agents.AgentDefinition, error)
LoadAgents implements agents.AgentStore. It loads agents from presets and metadata-backed custom agents.
func (*AgentStoreAdapter) SaveAgent ¶
func (s *AgentStoreAdapter) SaveAgent(ctx context.Context, agent *agents.AgentDefinition) error
SaveAgent implements agents.AgentStore. It saves custom agents to UserLogin metadata.
type AgentsConfig ¶
type AgentsConfig struct {
Defaults *AgentDefaultsConfig `yaml:"defaults"`
List []AgentEntryConfig `yaml:"list"`
}
AgentsConfig configures agent defaults (OpenClaw-style).
type AgentsEventContent ¶
type AgentsEventContent struct {
Agents []AgentConfig `json:"agents"`
Orchestration *OrchestrationConfig `json:"orchestration,omitempty"`
}
AgentsEventContent configures active agents in a room
type Annotation ¶
type Annotation struct {
Type string `json:"type"` // "citation", "reference"
Index int `json:"index,omitempty"` // Citation number [1], [2], etc.
StartChar int `json:"start_char,omitempty"`
EndChar int `json:"end_char,omitempty"`
Source *AnnotationSource `json:"source,omitempty"`
}
Annotation represents a citation or reference in the text
type AnnotationSource ¶
type AnnotationSource struct {
Type string `json:"type"` // "web", "document", "file"
URL string `json:"url,omitempty"`
Title string `json:"title,omitempty"`
Snippet string `json:"snippet,omitempty"`
FileID string `json:"file_id,omitempty"`
Filename string `json:"filename,omitempty"`
Page int `json:"page,omitempty"`
}
AnnotationSource provides source information for a citation
type ApplyPatchToolsConfig ¶
type ApplyPatchToolsConfig struct {
Enabled *bool `yaml:"enabled"`
AllowModels []string `yaml:"allow_models"`
}
ApplyPatchToolsConfig configures apply_patch availability.
type ApprovalInfo ¶
type ApprovalInfo struct {
Reason string `json:"reason,omitempty"`
Actions []string `json:"actions,omitempty"`
}
ApprovalInfo contains approval request details
type AssistantTurnAI ¶
type AssistantTurnAI struct {
TurnID string `json:"turn_id"`
AgentID string `json:"agent_id,omitempty"`
Model string `json:"model"`
Status TurnStatus `json:"status"`
FinishReason string `json:"finish_reason,omitempty"`
// Embedded thinking (not separate event)
Thinking *ThinkingContent `json:"thinking,omitempty"`
// Token usage
Usage *EventUsageInfo `json:"usage,omitempty"`
// Related events
ToolCalls []string `json:"tool_calls,omitempty"`
Images []string `json:"images,omitempty"`
// Timing information
Timing *TimingInfo `json:"timing,omitempty"`
// Annotations/citations
Annotations []Annotation `json:"annotations,omitempty"`
}
AssistantTurnAI contains the AI-specific metadata for an assistant turn
type AssistantTurnContent ¶
type AssistantTurnContent struct {
// Standard Matrix fallback fields
Body string `json:"body"`
MsgType string `json:"msgtype"`
Format string `json:"format,omitempty"`
FormattedBody string `json:"formatted_body,omitempty"`
// AI-specific metadata
AI *AssistantTurnAI `json:"com.beeper.ai,omitempty"`
}
AssistantTurnContent represents the content of an assistant turn event
type AttachmentMetadata ¶
type AttachmentMetadata struct {
Type string `json:"type"` // "file", "image"
FileID string `json:"file_id,omitempty"`
Filename string `json:"filename,omitempty"`
MxcURI string `json:"mxc_uri,omitempty"`
Mimetype string `json:"mimetype,omitempty"`
Size int `json:"size,omitempty"`
Width int `json:"width,omitempty"` // For images
Height int `json:"height,omitempty"` // For images
}
AttachmentMetadata describes files attached to user messages
type BeeperConfig ¶
type BeeperConfig struct {
BaseURL string `yaml:"base_url"` // Beeper AI proxy endpoint
Token string `yaml:"token"` // Beeper Matrix access token
}
BeeperConfig contains Beeper AI proxy credentials for automatic login. If both BaseURL and Token are set, users don't need to manually log in.
type BossStoreAdapter ¶
type BossStoreAdapter struct {
// contains filtered or unexported fields
}
BossStoreAdapter implements tools.AgentStoreInterface for boss tool execution. This adapter converts between our agent types and the tools package types.
func NewBossStoreAdapter ¶
func NewBossStoreAdapter(client *AIClient) *BossStoreAdapter
NewBossStoreAdapter creates a new boss store adapter.
func (*BossStoreAdapter) CreateRoom ¶
CreateRoom implements tools.AgentStoreInterface.
func (*BossStoreAdapter) DeleteAgent ¶
func (b *BossStoreAdapter) DeleteAgent(ctx context.Context, agentID string) error
DeleteAgent implements tools.AgentStoreInterface.
func (*BossStoreAdapter) ListAvailableTools ¶
ListAvailableTools implements tools.AgentStoreInterface.
func (*BossStoreAdapter) ListModels ¶
ListModels implements tools.AgentStoreInterface.
func (*BossStoreAdapter) LoadAgents ¶
LoadAgents implements tools.AgentStoreInterface.
func (*BossStoreAdapter) ModifyRoom ¶
func (b *BossStoreAdapter) ModifyRoom(ctx context.Context, roomID string, updates tools.RoomData) error
ModifyRoom implements tools.AgentStoreInterface.
func (*BossStoreAdapter) RunInternalCommand ¶
func (b *BossStoreAdapter) RunInternalCommand(ctx context.Context, roomID string, command string) (string, error)
RunInternalCommand implements tools.AgentStoreInterface.
type BridgeConfig ¶
type BridgeConfig struct {
CommandPrefix string `yaml:"command_prefix"`
LogEphemeralEvents *bool `yaml:"log_ephemeral_events"`
}
BridgeConfig tweaks Matrix-side behaviour for the AI bridge.
type BridgeToolContext ¶
type BridgeToolContext struct {
Client *AIClient
Portal *bridgev2.Portal
Meta *PortalMetadata
SourceEventID id.EventID // The triggering message's event ID (for reactions/replies)
SenderID string // The triggering sender ID (owner-only tool gating)
}
BridgeToolContext provides bridge-specific context for tool execution
func GetBridgeToolContext ¶
func GetBridgeToolContext(ctx context.Context) *BridgeToolContext
GetBridgeToolContext retrieves bridge context from a context
type BuiltinAlwaysAllowRule ¶
type ChannelConfig ¶
type ChannelConfig struct {
Heartbeat *ChannelHeartbeatVisibilityConfig `yaml:"heartbeat"`
ResponsePrefix string `yaml:"responsePrefix"`
ReplyToMode string `yaml:"replyToMode"` // off|first|all (Matrix)
ThreadReplies string `yaml:"threadReplies"` // off|inbound|always (Matrix)
}
type ChannelDefaultsConfig ¶
type ChannelDefaultsConfig struct {
Heartbeat *ChannelHeartbeatVisibilityConfig `yaml:"heartbeat"`
ResponsePrefix string `yaml:"responsePrefix"`
}
type ChannelsConfig ¶
type ChannelsConfig struct {
Defaults *ChannelDefaultsConfig `yaml:"defaults"`
Matrix *ChannelConfig `yaml:"matrix"`
}
ChannelsConfig defines per-channel settings (OpenClaw-style subset for Matrix).
type CodexClient ¶
func (*CodexClient) Connect ¶
func (cc *CodexClient) Connect(ctx context.Context)
func (*CodexClient) Disconnect ¶
func (cc *CodexClient) Disconnect()
func (*CodexClient) GetCapabilities ¶
func (cc *CodexClient) GetCapabilities(ctx context.Context, portal *bridgev2.Portal) *event.RoomFeatures
func (*CodexClient) GetChatInfo ¶
func (*CodexClient) GetUserInfo ¶
func (*CodexClient) HandleMatrixDeleteChat ¶
func (cc *CodexClient) HandleMatrixDeleteChat(ctx context.Context, msg *bridgev2.MatrixDeleteChat) error
HandleMatrixDeleteChat best-effort archives the Codex thread and removes the temp cwd. The core bridge handles Matrix-side room cleanup separately.
func (*CodexClient) HandleMatrixMessage ¶
func (cc *CodexClient) HandleMatrixMessage(ctx context.Context, msg *bridgev2.MatrixMessage) (*bridgev2.MatrixMessageResponse, error)
func (*CodexClient) IsLoggedIn ¶
func (cc *CodexClient) IsLoggedIn() bool
func (*CodexClient) IsThisUser ¶
func (*CodexClient) LogoutRemote ¶
func (cc *CodexClient) LogoutRemote(ctx context.Context)
type CodexClientInfo ¶
type CodexConfig ¶
type CodexConfig struct {
Enabled *bool `yaml:"enabled"`
Command string `yaml:"command"`
HomeBaseDir string `yaml:"home_base_dir"`
DefaultModel string `yaml:"default_model"`
NetworkAccess *bool `yaml:"network_access"`
ClientInfo *CodexClientInfo `yaml:"client_info"`
}
CodexConfig configures the optional Codex app-server integration.
type CodexLogin ¶
type CodexLogin struct {
User *bridgev2.User
Connector *OpenAIConnector
FlowID string
// contains filtered or unexported fields
}
CodexLogin provisions a provider=codex user login backed by a local `codex app-server` process. Tokens are persisted by Codex itself under an isolated CODEX_HOME per login.
func (*CodexLogin) Cancel ¶
func (cl *CodexLogin) Cancel()
func (*CodexLogin) SubmitUserInput ¶
type CollaborationInfo ¶
type CollaborationInfo struct {
Orchestrator string `json:"orchestrator,omitempty"`
Participants []CollaborationParticipant `json:"participants,omitempty"`
}
CollaborationInfo contains multi-agent collaboration status
type CollaborationParticipant ¶
type CollaborationParticipant struct {
AgentID string `json:"agent_id"`
Status string `json:"status"`
Task string `json:"task,omitempty"`
}
CollaborationParticipant represents an agent in a collaboration
type CommandsConfig ¶
type CommandsConfig struct {
OwnerAllowFrom []string `yaml:"ownerAllowFrom"`
}
CommandsConfig defines command authorization settings (OpenClaw-style).
type CompactionAfterHook ¶
type CompactionAfterHook func(ctx context.Context, event *CompactionEvent) error
CompactionAfterHook is called after compaction completes
type CompactionBeforeHook ¶
type CompactionBeforeHook func(ctx context.Context, hookCtx *CompactionHookContext) (*CompactionHookResult, error)
CompactionBeforeHook is called before compaction starts
type CompactionConfig ¶
type CompactionConfig struct {
*PruningConfig
// SummarizationEnabled enables LLM-based summarization instead of placeholder text
// Default: true (when compaction is enabled)
SummarizationEnabled *bool `yaml:"summarization_enabled" json:"summarization_enabled,omitempty"`
// SummarizationModel is the model to use for generating summaries
// Default: same as conversation model, or openai/gpt-5.2
SummarizationModel string `yaml:"summarization_model" json:"summarization_model,omitempty"`
// MaxSummaryTokens is the maximum tokens for generated summaries
// Default: 500
MaxSummaryTokens int `yaml:"max_summary_tokens" json:"max_summary_tokens,omitempty"`
// When exceeded, oldest messages are dropped and summarized
// Default: 0.5 (50%)
MaxHistoryShare float64 `yaml:"max_history_share" json:"max_history_share,omitempty"`
// ReserveTokens is the token budget reserved for compaction output
// Default: 2000
ReserveTokens int `yaml:"reserve_tokens" json:"reserve_tokens,omitempty"`
// CustomInstructions are additional instructions for the summarization model
CustomInstructions string `yaml:"custom_instructions" json:"custom_instructions,omitempty"`
}
CompactionConfig extends PruningConfig with summarization and event settings
func DefaultCompactionConfig ¶
func DefaultCompactionConfig() *CompactionConfig
DefaultCompactionConfig returns default compaction settings
type CompactionEvent ¶
type CompactionEvent struct {
Type CompactionEventType `json:"type"`
SessionID string `json:"session_id,omitempty"`
MessagesBefore int `json:"messages_before,omitempty"`
MessagesAfter int `json:"messages_after,omitempty"`
TokensBefore int `json:"tokens_before,omitempty"`
TokensAfter int `json:"tokens_after,omitempty"`
Summary string `json:"summary,omitempty"`
WillRetry bool `json:"will_retry,omitempty"`
Error string `json:"error,omitempty"`
Duration time.Duration `json:"duration_ms,omitempty"`
}
CompactionEvent represents a compaction lifecycle event
type CompactionEventEmitter ¶
type CompactionEventEmitter func(ctx context.Context, event *CompactionEvent)
CompactionEventEmitter handles emitting compaction events to clients
type CompactionEventType ¶
type CompactionEventType string
CompactionEventType represents the type of compaction event
const ( CompactionEventStart CompactionEventType = "compaction_start" CompactionEventEnd CompactionEventType = "compaction_end" )
type CompactionHookContext ¶
type CompactionHookContext struct {
SessionID string
MessageCount int
TokenCount int
Config *CompactionConfig
}
CompactionHookContext provides context for compaction hooks
type CompactionHookResult ¶
type CompactionHookResult struct {
// Skip if true, skips compaction entirely
Skip bool
// CustomSummary overrides the generated summary
CustomSummary string
// ModifiedConfig allows hooks to modify compaction config
ModifiedConfig *CompactionConfig
}
CompactionHookResult is returned by before_compaction hooks to modify behavior
type CompactionHooks ¶
type CompactionHooks struct {
// contains filtered or unexported fields
}
CompactionHooks manages registered compaction hooks
type CompactionResult ¶
type CompactionResult struct {
Compacted bool
Summary string
MessagesBefore int
MessagesAfter int
TokensBefore int
TokensAfter int
DroppedMessages int
Error error
}
CompactionResult holds the result of a compaction operation
type Compactor ¶
type Compactor struct {
// contains filtered or unexported fields
}
Compactor handles context compaction with LLM summarization
func NewCompactor ¶
NewCompactor creates a new compactor instance
func (*Compactor) CompactContext ¶
func (c *Compactor) CompactContext( ctx context.Context, sessionID string, messages []openai.ChatCompletionMessageParamUnion, contextWindowTokens int, ) (*CompactionResult, []openai.ChatCompletionMessageParamUnion)
CompactContext performs intelligent context compaction with LLM summarization
func (*Compactor) CompactOnOverflow ¶
func (c *Compactor) CompactOnOverflow( ctx context.Context, sessionID string, messages []openai.ChatCompletionMessageParamUnion, contextWindowTokens int, requestedTokens int, ) (*CompactionResult, []openai.ChatCompletionMessageParamUnion, bool)
CompactOnOverflow performs compaction when a context length error is detected This is called before reactive truncation to try to preserve more context
func (*Compactor) SetEventEmitter ¶
func (c *Compactor) SetEventEmitter(emitter CompactionEventEmitter)
SetEventEmitter sets the event emitter for compaction events
func (*Compactor) SetSummarizationModel ¶
SetSummarizationModel sets the model used for generating summaries
type Config ¶
type Config struct {
Beeper BeeperConfig `yaml:"beeper"`
Codex *CodexConfig `yaml:"codex"`
OpenCode *OpenCodeConfig `yaml:"opencode"`
Providers ProvidersConfig `yaml:"providers"`
Models *ModelsConfig `yaml:"models"`
Bridge BridgeConfig `yaml:"bridge"`
Tools ToolProvidersConfig `yaml:"tools"`
ToolApprovals *ToolApprovalsRuntimeConfig `yaml:"tool_approvals"`
ToolPolicy *toolpolicy.GlobalToolPolicyConfig `yaml:"tool_policy"`
Agents *AgentsConfig `yaml:"agents"`
Channels *ChannelsConfig `yaml:"channels"`
Cron *CronConfig `yaml:"cron"`
Messages *MessagesConfig `yaml:"messages"`
Commands *CommandsConfig `yaml:"commands"`
Session *SessionConfig `yaml:"session"`
// Global settings
DefaultSystemPrompt string `yaml:"default_system_prompt"`
ModelCacheDuration time.Duration `yaml:"model_cache_duration"`
Memory *MemoryConfig `yaml:"memory"`
MemorySearch *MemorySearchConfig `yaml:"memory_search"`
// Context pruning configuration (OpenClaw-style)
Pruning *PruningConfig `yaml:"pruning"`
// Link preview configuration
LinkPreviews *LinkPreviewConfig `yaml:"link_previews"`
// Inbound message processing configuration
Inbound *InboundConfig `yaml:"inbound"`
}
Config represents the connector-specific configuration that is nested under the `network:` block in the main bridge config.
type ContentPart ¶
type ContentPart struct {
Type ContentPartType
Text string
ImageURL string
ImageB64 string
MimeType string
PDFURL string
PDFB64 string
AudioB64 string
AudioFormat string // wav, mp3, webm, ogg, flac
VideoURL string
VideoB64 string
}
ContentPart represents a single piece of content (text, image, PDF, audio, or video)
type ContentPartType ¶
type ContentPartType string
ContentPartType identifies the type of content in a message
const ( ContentTypeText ContentPartType = "text" ContentTypeImage ContentPartType = "image" ContentTypePDF ContentPartType = "pdf" ContentTypeAudio ContentPartType = "audio" ContentTypeVideo ContentPartType = "video" )
type ContextLengthError ¶
ContextLengthError contains parsed details from context_length_exceeded errors
func ParseContextLengthError ¶
func ParseContextLengthError(err error) *ContextLengthError
ParseContextLengthError checks if err is a context length exceeded error and extracts the token counts from the error message
func (*ContextLengthError) Error ¶
func (e *ContextLengthError) Error() string
type CronConfig ¶
type CronConfig struct {
Enabled *bool `yaml:"enabled"`
Store string `yaml:"store"`
MaxConcurrentRuns int `yaml:"maxConcurrentRuns"`
}
CronConfig configures cron scheduling (OpenClaw-style).
type DebounceBuffer ¶
type DebounceBuffer struct {
// contains filtered or unexported fields
}
DebounceBuffer holds pending messages for a key.
type DebounceEntry ¶
type DebounceEntry struct {
Event *event.Event
Portal *bridgev2.Portal
Meta *PortalMetadata
RawBody string
SenderName string
RoomName string
IsGroup bool
WasMentioned bool
AckEventID id.EventID // Track ack reaction for removal after flush
PendingSent bool // Whether a pending status was already sent for this event
}
DebounceEntry represents a buffered message waiting to be processed.
type Debouncer ¶
type Debouncer struct {
// contains filtered or unexported fields
}
Debouncer buffers rapid messages and combines them. Based on clawdbot's inbound-debounce.ts implementation.
func NewDebouncer ¶
func NewDebouncer(delayMs int, onFlush func([]DebounceEntry), onError func(error, []DebounceEntry)) *Debouncer
NewDebouncer creates a new debouncer with the given delay and callbacks.
func NewDebouncerWithLogger ¶
func NewDebouncerWithLogger(delayMs int, onFlush func([]DebounceEntry), onError func(error, []DebounceEntry), log zerolog.Logger) *Debouncer
NewDebouncerWithLogger creates a new debouncer with logging support.
func (*Debouncer) Enqueue ¶
func (d *Debouncer) Enqueue(key string, entry DebounceEntry, shouldDebounce bool)
Enqueue adds a message to the debounce buffer. If shouldDebounce is false, the message is processed immediately.
func (*Debouncer) EnqueueWithDelay ¶
func (d *Debouncer) EnqueueWithDelay(key string, entry DebounceEntry, shouldDebounce bool, delayMs int)
EnqueueWithDelay adds a message with a custom debounce delay. delayMs: 0 = use default, -1 = immediate (no debounce), >0 = custom delay
func (*Debouncer) FlushAll ¶
func (d *Debouncer) FlushAll()
FlushAll flushes all pending buffers (e.g., on shutdown).
func (*Debouncer) FlushKey ¶
FlushKey immediately flushes the buffer for a key (e.g., when media arrives).
func (*Debouncer) PendingCount ¶
PendingCount returns the number of keys with pending buffers.
type DedupeCache ¶
type DedupeCache struct {
// contains filtered or unexported fields
}
DedupeCache is a thread-safe LRU cache with TTL for message deduplication. Based on clawdbot's dedupe.ts implementation.
func NewDedupeCache ¶
func NewDedupeCache(ttl time.Duration, maxSize int) *DedupeCache
NewDedupeCache creates a new deduplication cache with the given TTL and max size.
func (*DedupeCache) Check ¶
func (c *DedupeCache) Check(key string) bool
Check returns true if key is a duplicate (seen within TTL). Also records the key for future checks.
func (*DedupeCache) Size ¶
func (c *DedupeCache) Size() int
Size returns the current number of entries in the cache.
type DesktopAPIInstance ¶
type DirectChatConfig ¶
type DirectChatConfig struct {
HistoryLimit int `yaml:"historyLimit"`
}
DirectChatConfig defines direct message defaults.
type EffectiveSettings ¶
type EffectiveSettings struct {
Model SettingExplanation `json:"model"`
SystemPrompt SettingExplanation `json:"system_prompt"`
Temperature SettingExplanation `json:"temperature"`
ReasoningEffort SettingExplanation `json:"reasoning_effort"`
}
EffectiveSettings shows current values with source explanations
type EnvelopeFormatOptions ¶
type EventUsageInfo ¶
type EventUsageInfo struct {
PromptTokens int64 `json:"prompt_tokens,omitempty"`
CompletionTokens int64 `json:"completion_tokens,omitempty"`
ReasoningTokens int64 `json:"reasoning_tokens,omitempty"`
}
EventUsageInfo contains token usage information for Matrix events This is separate from the internal UsageInfo in provider.go to allow different serialization formats (int64 for Matrix JSON vs int for internal use)
type FailoverReason ¶
type FailoverReason string
FailoverReason is a typed enum for classifying why a model failover happened.
const ( FailoverAuth FailoverReason = "auth" FailoverBilling FailoverReason = "billing" FailoverRateLimit FailoverReason = "rate_limit" FailoverTimeout FailoverReason = "timeout" FailoverFormat FailoverReason = "format" FailoverOverload FailoverReason = "overload" FailoverServer FailoverReason = "server" FailoverUnknown FailoverReason = "unknown" )
func ClassifyFailoverReason ¶
func ClassifyFailoverReason(err error) FailoverReason
ClassifyFailoverReason returns a structured reason for why a model failover should occur. Wraps the existing Is*Error functions into a single classifier.
type FetchConfig ¶
type FetchConfig struct {
Provider string `yaml:"provider"`
Fallbacks []string `yaml:"fallbacks"`
Exa ProviderExaConfig `yaml:"exa"`
Direct ProviderDirectConfig `yaml:"direct"`
}
type FileAnnotation ¶
type FileAnnotation struct {
FileHash string `json:"file_hash"` // SHA256 hash of the file content
ParsedText string `json:"parsed_text"` // Extracted text content
PageCount int `json:"page_count,omitempty"` // Number of pages
CreatedAt int64 `json:"created_at"` // Unix timestamp when cached
}
FileAnnotation stores cached parsed PDF content from OpenRouter's file-parser plugin
type GenerateParams ¶
type GenerateParams struct {
Model string
Messages []UnifiedMessage
SystemPrompt string
Temperature float64
MaxCompletionTokens int
Tools []ToolDefinition
ReasoningEffort string // none, low, medium, high (for reasoning models)
// Responses API specific
PreviousResponseID string // For conversation continuation
WebSearchEnabled bool
}
GenerateParams contains parameters for generation requests
type GenerateResponse ¶
type GenerateResponse struct {
Content string
FinishReason string
ResponseID string // For Responses API continuation
ToolCalls []ToolCallResult
Usage UsageInfo
}
GenerateResponse contains the result of a non-streaming generation
type GeneratedFileRef ¶
GeneratedFileRef stores a reference to a file generated by the assistant (e.g., image generation).
type GenerationDetails ¶
type GenerationDetails struct {
CurrentTool string `json:"current_tool,omitempty"`
CallID string `json:"call_id,omitempty"`
ToolsCompleted int `json:"tools_completed,omitempty"`
ToolsTotal int `json:"tools_total,omitempty"`
}
GenerationDetails provides detailed status information
type GenerationProgress ¶
type GenerationProgress struct {
TokensGenerated int `json:"tokens_generated,omitempty"`
ThinkingTokens int `json:"thinking_tokens,omitempty"`
}
GenerationProgress tracks token generation progress
type GenerationStatusContent ¶
type GenerationStatusContent struct {
TurnID string `json:"turn_id"`
AgentID string `json:"agent_id,omitempty"`
TargetEvent string `json:"target_event,omitempty"`
Status string `json:"status"` // "starting", "thinking", "generating", "tool_use", etc.
StatusMessage string `json:"status_message,omitempty"`
Details *GenerationDetails `json:"details,omitempty"`
Progress *GenerationProgress `json:"progress,omitempty"`
Display *StatusDisplay `json:"display,omitempty"`
// For collaboration
Collaboration *CollaborationInfo `json:"collaboration,omitempty"`
}
GenerationStatusContent represents a generation status update
type GhostMetadata ¶
GhostMetadata stores metadata for AI model ghosts
type GravatarProfile ¶
type GravatarProfile struct {
Email string `json:"email,omitempty"`
Hash string `json:"hash,omitempty"`
Profile map[string]any `json:"profile,omitempty"` // Full profile payload
FetchedAt int64 `json:"fetched_at,omitempty"`
}
GravatarProfile stores the selected Gravatar profile for a login.
type GravatarState ¶
type GravatarState struct {
Primary *GravatarProfile `json:"primary,omitempty"`
}
GravatarState stores Gravatar profile state for a login.
type GroupChatConfig ¶
type GroupChatConfig struct {
MentionPatterns []string `yaml:"mentionPatterns"`
Activation string `yaml:"activation"` // mention|always
HistoryLimit int `yaml:"historyLimit"`
}
GroupChatConfig mirrors OpenClaw's group chat settings.
type HeartbeatConfig ¶
type HeartbeatConfig struct {
Every *string `yaml:"every"`
ActiveHours *HeartbeatActiveHoursConfig `yaml:"activeHours"`
Model *string `yaml:"model"`
Session *string `yaml:"session"`
Target *string `yaml:"target"`
To *string `yaml:"to"`
Prompt *string `yaml:"prompt"`
AckMaxChars *int `yaml:"ackMaxChars"`
IncludeReasoning *bool `yaml:"includeReasoning"`
}
HeartbeatConfig configures periodic heartbeat runs (OpenClaw-style).
type HeartbeatEventPayload ¶
type HeartbeatEventPayload struct {
TS int64 `json:"ts"`
Status string `json:"status"`
To string `json:"to,omitempty"`
Preview string `json:"preview,omitempty"`
DurationMs int64 `json:"durationMs,omitempty"`
HasMedia bool `json:"hasMedia,omitempty"`
Reason string `json:"reason,omitempty"`
Channel string `json:"channel,omitempty"`
Silent bool `json:"silent,omitempty"`
IndicatorType *HeartbeatIndicatorType `json:"indicatorType,omitempty"`
}
type HeartbeatIndicatorType ¶
type HeartbeatIndicatorType string
const ( HeartbeatIndicatorOK HeartbeatIndicatorType = "ok" HeartbeatIndicatorAlert HeartbeatIndicatorType = "alert" HeartbeatIndicatorError HeartbeatIndicatorType = "error" )
type HeartbeatRunConfig ¶
type HeartbeatRunConfig struct {
Reason string
AckMaxChars int
ShowOk bool
ShowAlerts bool
UseIndicator bool
IncludeReasoning bool
ExecEvent bool
ResponsePrefix string
SessionKey string
StoreAgentID string
StorePath string
PrevUpdatedAt int64
TargetRoom id.RoomID
TargetReason string
SuppressSend bool
AgentID string
Channel string
SuppressSave bool
}
type HeartbeatRunOutcome ¶
type HeartbeatRunner ¶
type HeartbeatRunner struct {
// contains filtered or unexported fields
}
func NewHeartbeatRunner ¶
func NewHeartbeatRunner(client *AIClient) *HeartbeatRunner
func (*HeartbeatRunner) Start ¶
func (r *HeartbeatRunner) Start()
func (*HeartbeatRunner) Stop ¶
func (r *HeartbeatRunner) Stop()
type HeartbeatState ¶
type HeartbeatState struct {
LastHeartbeatText string `json:"last_heartbeat_text,omitempty"`
LastHeartbeatSentAt int64 `json:"last_heartbeat_sent_at,omitempty"`
}
HeartbeatState tracks last heartbeat delivery for dedupe.
type HeartbeatWake ¶
type HeartbeatWake struct {
// contains filtered or unexported fields
}
func (*HeartbeatWake) HasPending ¶
func (w *HeartbeatWake) HasPending() bool
func (*HeartbeatWake) Request ¶
func (w *HeartbeatWake) Request(reason string, coalesce time.Duration)
func (*HeartbeatWake) SetHandler ¶
func (w *HeartbeatWake) SetHandler(handler HeartbeatWakeHandler)
type HeartbeatWakeHandler ¶
type HeartbeatWakeHandler func(reason string) cron.HeartbeatRunResult
type ImageDimensionError ¶
type ImageDimensionError struct {
MaxDimensionPx int
}
ImageDimensionError contains parsed details from image dimension errors.
func ParseImageDimensionError ¶
func ParseImageDimensionError(err error) *ImageDimensionError
ParseImageDimensionError extracts max dimension from an image error. Returns nil if the error is not an image dimension error.
type ImageGenerationMetadata ¶
type ImageGenerationMetadata struct {
TurnID string `json:"turn_id,omitempty"`
AgentID string `json:"agent_id,omitempty"`
Prompt string `json:"prompt,omitempty"`
RevisedPrompt string `json:"revised_prompt,omitempty"`
Model string `json:"model,omitempty"`
Style string `json:"style,omitempty"` // "vivid", "natural"
Quality string `json:"quality,omitempty"` // "standard", "hd"
}
ImageGenerationMetadata is added to m.image events for AI-generated images
type ImageSizeError ¶
type ImageSizeError struct {
MaxMB float64
}
ImageSizeError contains parsed details from image size errors.
func ParseImageSizeError ¶
func ParseImageSizeError(err error) *ImageSizeError
ParseImageSizeError extracts max size in MB from an image error. Returns nil if the error is not an image size error.
type InboundConfig ¶
type InboundConfig struct {
// Deduplication settings
DedupeTTL time.Duration `yaml:"dedupe_ttl"` // Time-to-live for dedupe entries (default: 20m)
DedupeMaxSize int `yaml:"dedupe_max_size"` // Max entries in dedupe cache (default: 5000)
// Debounce settings
DefaultDebounceMs int `yaml:"default_debounce_ms"` // Default debounce delay in ms (default: 500)
}
InboundConfig contains settings for inbound message processing including deduplication and debouncing.
func (*InboundConfig) WithDefaults ¶
func (c *InboundConfig) WithDefaults() *InboundConfig
WithDefaults returns the InboundConfig with default values applied.
type InboundDebounceConfig ¶
type InboundDebounceConfig struct {
DebounceMs int `yaml:"debounceMs"`
ByChannel map[string]int `yaml:"byChannel"`
}
InboundDebounceConfig mirrors OpenClaw's inbound debounce config.
type LinkPreviewConfig ¶
type LinkPreviewConfig struct {
Enabled bool `yaml:"enabled"`
MaxURLsInbound int `yaml:"max_urls_inbound"` // Max URLs to process from user messages
MaxURLsOutbound int `yaml:"max_urls_outbound"` // Max URLs to preview in AI responses
FetchTimeout time.Duration `yaml:"fetch_timeout"` // Timeout for fetching each URL
MaxContentChars int `yaml:"max_content_chars"` // Max chars for description in context
MaxPageBytes int64 `yaml:"max_page_bytes"` // Max page size to download
MaxImageBytes int64 `yaml:"max_image_bytes"` // Max image size to download
CacheTTL time.Duration `yaml:"cache_ttl"` // How long to cache previews
}
LinkPreviewConfig holds configuration for link preview functionality.
func DefaultLinkPreviewConfig ¶
func DefaultLinkPreviewConfig() LinkPreviewConfig
DefaultLinkPreviewConfig returns sensible defaults.
type LinkPreviewer ¶
type LinkPreviewer struct {
// contains filtered or unexported fields
}
LinkPreviewer handles URL preview generation.
func NewLinkPreviewer ¶
func NewLinkPreviewer(config LinkPreviewConfig) *LinkPreviewer
NewLinkPreviewer creates a new link previewer with the given config.
func (*LinkPreviewer) FetchPreview ¶
func (lp *LinkPreviewer) FetchPreview(ctx context.Context, urlStr string) (*PreviewWithImage, error)
FetchPreview fetches and generates a link preview for a URL, including the image data.
func (*LinkPreviewer) FetchPreviews ¶
func (lp *LinkPreviewer) FetchPreviews(ctx context.Context, urls []string) []*PreviewWithImage
FetchPreviews fetches previews for multiple URLs in parallel.
func (*LinkPreviewer) FetchPreviewsWithCitations ¶
func (lp *LinkPreviewer) FetchPreviewsWithCitations(ctx context.Context, urls []string, citations []sourceCitation) []*PreviewWithImage
FetchPreviewsWithCitations fetches previews for multiple URLs, using sourceCitation metadata when available to skip HTML fetching.
func (*LinkPreviewer) PreviewFromCitation ¶
func (lp *LinkPreviewer) PreviewFromCitation(ctx context.Context, urlStr string, c sourceCitation) *PreviewWithImage
PreviewFromCitation builds a PreviewWithImage from a sourceCitation without fetching HTML. It downloads the image directly from the citation's Image URL.
type MCPAlwaysAllowRule ¶
type MCPServerConfig ¶
type MCPServerConfig struct {
Transport string `json:"transport,omitempty"` // streamable_http|stdio
Endpoint string `json:"endpoint,omitempty"` // streamable HTTP endpoint
Command string `json:"command,omitempty"` // stdio command path/binary
Args []string `json:"args,omitempty"` // stdio command args
AuthType string `json:"auth_type,omitempty"` // bearer|apikey|none
Token string `json:"token,omitempty"`
AuthURL string `json:"auth_url,omitempty"` // Optional browser auth URL for manual token retrieval.
Connected bool `json:"connected,omitempty"`
Kind string `json:"kind,omitempty"` // generic|nexus
}
MCPServerConfig stores one MCP server connection for a login. The map key in ServiceTokens.MCPServers is the server name.
type MCPToolsConfig ¶
type MCPToolsConfig struct {
EnableStdio bool `yaml:"enable_stdio"`
}
MCPToolsConfig configures generic MCP behavior.
type MatrixReactionSummary ¶
type MatrixReactionSummary struct {
Key string `json:"key"` // The emoji
Count int `json:"count"` // Number of reactions with this emoji
Users []string `json:"users"` // User IDs who reacted
}
MatrixReactionSummary represents a summary of reactions on a message.
type MatrixRoomInfo ¶
type MatrixRoomInfo struct {
RoomID string `json:"room_id"`
Name string `json:"name,omitempty"`
Topic string `json:"topic,omitempty"`
MemberCount int `json:"member_count,omitempty"`
}
MatrixRoomInfo represents room information.
type MatrixUserProfile ¶
type MatrixUserProfile struct {
UserID string `json:"user_id"`
DisplayName string `json:"display_name,omitempty"`
AvatarURL string `json:"avatar_url,omitempty"`
}
MatrixUserProfile represents a user's profile information.
type MediaToolsConfig ¶
type MediaToolsConfig struct {
Models []MediaUnderstandingModelConfig `yaml:"models"`
Concurrency int `yaml:"concurrency"`
Image *MediaUnderstandingConfig `yaml:"image"`
Audio *MediaUnderstandingConfig `yaml:"audio"`
Video *MediaUnderstandingConfig `yaml:"video"`
}
MediaToolsConfig configures media understanding/transcription.
type MediaUnderstandingAttachmentDecision ¶
type MediaUnderstandingAttachmentDecision struct {
AttachmentIndex int `json:"attachment_index"`
Attempts []MediaUnderstandingModelDecision `json:"attempts,omitempty"`
Chosen *MediaUnderstandingModelDecision `json:"chosen,omitempty"`
}
MediaUnderstandingAttachmentDecision records attempts for one attachment.
type MediaUnderstandingAttachmentsConfig ¶
type MediaUnderstandingAttachmentsConfig struct {
Mode string `yaml:"mode"`
MaxAttachments int `yaml:"maxAttachments"`
Prefer string `yaml:"prefer"`
}
MediaUnderstandingAttachmentsConfig controls how media attachments are selected.
type MediaUnderstandingCapability ¶
type MediaUnderstandingCapability string
MediaUnderstandingCapability identifies the type of media being understood.
const ( MediaCapabilityImage MediaUnderstandingCapability = "image" MediaCapabilityAudio MediaUnderstandingCapability = "audio" MediaCapabilityVideo MediaUnderstandingCapability = "video" )
type MediaUnderstandingConfig ¶
type MediaUnderstandingConfig struct {
Enabled *bool `yaml:"enabled"`
Scope *MediaUnderstandingScopeConfig `yaml:"scope"`
MaxBytes int `yaml:"maxBytes"`
MaxChars int `yaml:"maxChars"`
Prompt string `yaml:"prompt"`
TimeoutSeconds int `yaml:"timeoutSeconds"`
Language string `yaml:"language"`
ProviderOptions map[string]map[string]any `yaml:"providerOptions"`
Deepgram *MediaUnderstandingDeepgramConfig `yaml:"deepgram"`
BaseURL string `yaml:"baseUrl"`
Headers map[string]string `yaml:"headers"`
Attachments *MediaUnderstandingAttachmentsConfig `yaml:"attachments"`
Models []MediaUnderstandingModelConfig `yaml:"models"`
}
MediaUnderstandingConfig defines defaults for media understanding of a capability.
type MediaUnderstandingDecision ¶
type MediaUnderstandingDecision struct {
Capability MediaUnderstandingCapability `json:"capability"`
Outcome string `json:"outcome,omitempty"` // success | skipped | disabled | scope-deny | no-attachment
Attachments []MediaUnderstandingAttachmentDecision `json:"attachments,omitempty"`
}
MediaUnderstandingDecision summarizes the overall outcome for a capability.
type MediaUnderstandingDeepgramConfig ¶
type MediaUnderstandingDeepgramConfig struct {
DetectLanguage *bool `yaml:"detectLanguage"`
Punctuate *bool `yaml:"punctuate"`
SmartFormat *bool `yaml:"smartFormat"`
}
MediaUnderstandingDeepgramConfig is a deprecated compatibility shim for Deepgram settings.
type MediaUnderstandingKind ¶
type MediaUnderstandingKind string
MediaUnderstandingKind identifies the output kind.
const ( MediaKindAudioTranscription MediaUnderstandingKind = "audio.transcription" MediaKindImageDescription MediaUnderstandingKind = "image.description" MediaKindVideoDescription MediaUnderstandingKind = "video.description" )
type MediaUnderstandingModelConfig ¶
type MediaUnderstandingModelConfig struct {
Provider string `yaml:"provider"`
Model string `yaml:"model"`
Capabilities []string `yaml:"capabilities"`
Type string `yaml:"type"`
Command string `yaml:"command"`
Args []string `yaml:"args"`
Prompt string `yaml:"prompt"`
MaxChars int `yaml:"maxChars"`
MaxBytes int `yaml:"maxBytes"`
TimeoutSeconds int `yaml:"timeoutSeconds"`
Language string `yaml:"language"`
ProviderOptions map[string]map[string]any `yaml:"providerOptions"`
Deepgram *MediaUnderstandingDeepgramConfig `yaml:"deepgram"`
BaseURL string `yaml:"baseUrl"`
Headers map[string]string `yaml:"headers"`
Profile string `yaml:"profile"`
PreferredProfile string `yaml:"preferredProfile"`
}
MediaUnderstandingModelConfig defines a single media understanding model entry.
type MediaUnderstandingModelDecision ¶
type MediaUnderstandingModelDecision struct {
Type string `json:"type,omitempty"` // provider | cli
Provider string `json:"provider,omitempty"`
Model string `json:"model,omitempty"`
Outcome string `json:"outcome,omitempty"` // success | skipped | failed
Reason string `json:"reason,omitempty"`
}
MediaUnderstandingModelDecision records a single model attempt.
type MediaUnderstandingOutput ¶
type MediaUnderstandingOutput struct {
Kind MediaUnderstandingKind `json:"kind"`
AttachmentIndex int `json:"attachment_index"`
Text string `json:"text"`
Provider string `json:"provider"`
Model string `json:"model,omitempty"`
}
MediaUnderstandingOutput represents a single media understanding result.
type MediaUnderstandingScopeConfig ¶
type MediaUnderstandingScopeConfig struct {
Default string `yaml:"default"`
Rules []MediaUnderstandingScopeRule `yaml:"rules"`
}
MediaUnderstandingScopeConfig controls allow/deny gating for media understanding.
type MediaUnderstandingScopeMatch ¶
type MediaUnderstandingScopeMatch struct {
Channel string `yaml:"channel"`
ChatType string `yaml:"chatType"`
KeyPrefix string `yaml:"keyPrefix"`
}
MediaUnderstandingScopeMatch defines match criteria for media understanding scope rules.
type MediaUnderstandingScopeRule ¶
type MediaUnderstandingScopeRule struct {
Action string `yaml:"action"`
Match *MediaUnderstandingScopeMatch `yaml:"match"`
}
MediaUnderstandingScopeRule defines a single allow/deny rule.
type MemoryConfig ¶
type MemoryConfig struct {
Citations string `yaml:"citations"`
InjectContext bool `yaml:"inject_context"`
}
MemoryConfig configures memory behavior (OpenClaw-style).
type MemoryFactContent ¶
type MemoryFactContent struct {
FactID string `json:"fact_id"`
Content string `json:"content"`
Keywords []string `json:"keywords,omitempty"`
Category string `json:"category,omitempty"` // preference, decision, entity, fact, other
Importance float64 `json:"importance,omitempty"` // 0-1, default 0.5
Source string `json:"source,omitempty"` // user, assistant, system
SourceRoom string `json:"source_room,omitempty"` // Room where the memory was created
CreatedAt int64 `json:"created_at"`
UpdatedAt int64 `json:"updated_at,omitempty"`
}
MemoryFactContent stores a memory fact in a timeline event
type MemoryFlushConfig ¶
type MemoryFlushConfig struct {
Enabled *bool `yaml:"enabled" json:"enabled,omitempty"`
SoftThresholdTokens int `yaml:"soft_threshold_tokens" json:"soft_threshold_tokens,omitempty"`
Prompt string `yaml:"prompt" json:"prompt,omitempty"`
SystemPrompt string `yaml:"system_prompt" json:"system_prompt,omitempty"`
}
MemoryFlushConfig configures pre-compaction memory flush behavior (OpenClaw-style).
type MemoryIndexEntry ¶
type MemoryIndexEntry struct {
FactID string `json:"fact_id"`
EventID string `json:"event_id"`
Keywords []string `json:"keywords"`
Category string `json:"category,omitempty"`
Importance float64 `json:"importance"`
Preview string `json:"preview"` // First 100 chars
CreatedAt int64 `json:"created_at"`
}
MemoryIndexEntry represents a single entry in the memory index
type MemorySearchBatchConfig ¶
type MemorySearchBatchStatus ¶
type MemorySearchCacheConfig ¶
type MemorySearchCacheStatus ¶
type MemorySearchConfig ¶
type MemorySearchConfig struct {
Enabled *bool `yaml:"enabled"`
Sources []string `yaml:"sources"`
ExtraPaths []string `yaml:"extra_paths"`
Provider string `yaml:"provider"`
Model string `yaml:"model"`
Remote *MemorySearchRemoteConfig `yaml:"remote"`
Fallback string `yaml:"fallback"`
Store *MemorySearchStoreConfig `yaml:"store"`
Chunking *MemorySearchChunkingConfig `yaml:"chunking"`
Sync *MemorySearchSyncConfig `yaml:"sync"`
Query *MemorySearchQueryConfig `yaml:"query"`
Cache *MemorySearchCacheConfig `yaml:"cache"`
Experimental *MemorySearchExperimentalConfig `yaml:"experimental"`
}
MemorySearchConfig configures semantic memory search (OpenClaw-style).
type MemorySearchExperimentalConfig ¶
type MemorySearchExperimentalConfig struct {
SessionMemory *bool `yaml:"session_memory"`
}
type MemorySearchFTSStatus ¶
type MemorySearchManager ¶
type MemorySearchManager struct {
// contains filtered or unexported fields
}
func (*MemorySearchManager) Close ¶
func (m *MemorySearchManager) Close()
Close stops background timers/goroutines. It is safe to call multiple times. Vector connections are no longer held persistently (grab+release per operation), so there is nothing to release here.
func (*MemorySearchManager) ProbeEmbeddingAvailability ¶
func (m *MemorySearchManager) ProbeEmbeddingAvailability(ctx context.Context) (bool, string)
func (*MemorySearchManager) ProbeVectorAvailability ¶
func (m *MemorySearchManager) ProbeVectorAvailability(ctx context.Context) bool
func (*MemorySearchManager) Search ¶
func (m *MemorySearchManager) Search(ctx context.Context, query string, opts memory.SearchOptions) ([]memory.SearchResult, error)
func (*MemorySearchManager) Status ¶
func (m *MemorySearchManager) Status() memory.ProviderStatus
func (*MemorySearchManager) StatusDetails ¶
func (m *MemorySearchManager) StatusDetails(ctx context.Context) (*MemorySearchStatus, error)
type MemorySearchQueryConfig ¶
type MemorySearchQueryConfig struct {
MaxResults int `yaml:"max_results"`
MinScore float64 `yaml:"min_score"`
MaxInjectedChars int `yaml:"max_injected_chars"`
Hybrid *MemorySearchHybridConfig `yaml:"hybrid"`
}
type MemorySearchRemoteConfig ¶
type MemorySearchRemoteConfig struct {
BaseURL string `yaml:"base_url"`
APIKey string `yaml:"api_key"`
Headers map[string]string `yaml:"headers"`
Batch *MemorySearchBatchConfig `yaml:"batch"`
}
type MemorySearchSourceCount ¶
type MemorySearchStatus ¶
type MemorySearchStatus struct {
Files int
Chunks int
Dirty bool
WorkspaceDir string
DBPath string
Provider string
Model string
RequestedProvider string
Sources []string
ExtraPaths []string
SourceCounts []MemorySearchSourceCount
Cache *MemorySearchCacheStatus
FTS *MemorySearchFTSStatus
Fallback *memory.FallbackStatus
Vector *MemorySearchVectorStatus
Batch *MemorySearchBatchStatus
}
type MemorySearchStoreConfig ¶
type MemorySearchStoreConfig struct {
Driver string `yaml:"driver"`
Path string `yaml:"path"`
Vector *MemorySearchVectorConfig `yaml:"vector"`
}
type MemorySearchSyncConfig ¶
type MessageMetadata ¶
type MessageMetadata struct {
Role string `json:"role,omitempty"`
Body string `json:"body,omitempty"`
CompletionID string `json:"completion_id,omitempty"`
FinishReason string `json:"finish_reason,omitempty"`
PromptTokens int64 `json:"prompt_tokens,omitempty"`
CompletionTokens int64 `json:"completion_tokens,omitempty"`
Model string `json:"model,omitempty"`
ReasoningTokens int64 `json:"reasoning_tokens,omitempty"`
HasToolCalls bool `json:"has_tool_calls,omitempty"`
Transcript string `json:"transcript,omitempty"`
// Media understanding (OpenClaw-style)
MediaUnderstanding []MediaUnderstandingOutput `json:"media_understanding,omitempty"`
MediaUnderstandingDecisions []MediaUnderstandingDecision `json:"media_understanding_decisions,omitempty"`
// Turn tracking for the new schema
TurnID string `json:"turn_id,omitempty"` // Unique identifier for this assistant turn
AgentID string `json:"agent_id,omitempty"` // Which agent generated this (for multi-agent rooms)
// Tool call tracking
ToolCalls []ToolCallMetadata `json:"tool_calls,omitempty"` // List of tool calls in this turn
// Canonical internal schema payload (AI SDK compatible).
CanonicalSchema string `json:"canonical_schema,omitempty"` // e.g. ai-sdk-ui-message-v1
CanonicalUIMessage map[string]any `json:"canonical_ui_message,omitempty"` // AI SDK UIMessage-compatible payload
// Timing information
StartedAtMs int64 `json:"started_at_ms,omitempty"` // Unix ms when generation started
FirstTokenAtMs int64 `json:"first_token_at_ms,omitempty"` // Unix ms of first token
CompletedAtMs int64 `json:"completed_at_ms,omitempty"` // Unix ms when completed
// Thinking/reasoning content (embedded, not separate)
ThinkingContent string `json:"thinking_content,omitempty"` // Full thinking text
ThinkingTokenCount int `json:"thinking_token_count,omitempty"` // Number of thinking tokens
// History exclusion
ExcludeFromHistory bool `json:"exclude_from_history,omitempty"` // Exclude from LLM context (e.g., welcome messages)
// Multimodal history: media attached to this message for re-injection into prompts.
MediaURL string `json:"media_url,omitempty"` // mxc:// URL for user-sent media (image, PDF, audio, video)
MimeType string `json:"mime_type,omitempty"` // MIME type of user-sent media
GeneratedFiles []GeneratedFileRef `json:"generated_files,omitempty"` // Files generated by the assistant in this turn
}
MessageMetadata keeps a tiny summary of each exchange so we can rebuild prompts using database history.
func (*MessageMetadata) CopyFrom ¶
func (mm *MessageMetadata) CopyFrom(other any)
CopyFrom allows the metadata struct to participate in mautrix's meta merge.
type MessageRole ¶
type MessageRole string
MessageRole represents the role of a message sender
const ( RoleSystem MessageRole = "system" RoleUser MessageRole = "user" RoleAssistant MessageRole = "assistant" RoleTool MessageRole = "tool" )
type MessagesConfig ¶
type MessagesConfig struct {
ResponsePrefix string `yaml:"responsePrefix"`
AckReaction string `yaml:"ackReaction"`
AckReactionScope string `yaml:"ackReactionScope"` // group-mentions|group-all|direct|all|off|none
RemoveAckAfter bool `yaml:"removeAckAfterReply"`
GroupChat *GroupChatConfig `yaml:"groupChat"`
DirectChat *DirectChatConfig `yaml:"directChat"`
Queue *QueueConfig `yaml:"queue"`
InboundDebounce *InboundDebounceConfig `yaml:"inbound"`
}
MessagesConfig defines message rendering settings (OpenClaw-style).
type ModelBackend ¶
type ModelBackend string
ModelBackend identifies which backend to use for a model All backends use the OpenAI SDK with different base URLs
const ( BackendOpenAI ModelBackend = "openai" BackendOpenRouter ModelBackend = "openrouter" )
func ParseModelPrefix ¶
func ParseModelPrefix(modelID string) (backend ModelBackend, actualModel string)
ParseModelPrefix extracts the backend and actual model ID from a prefixed model Examples:
- "openai/gpt-5.2" → (BackendOpenAI, "gpt-5.2")
- "anthropic/claude-sonnet-4.5" (no routing prefix) → ("", "anthropic/claude-sonnet-4.5")
- "gpt-4o" (no prefix) → ("", "gpt-4o")
type ModelCache ¶
type ModelCache struct {
Models []ModelInfo `json:"models,omitempty"`
LastRefresh int64 `json:"last_refresh,omitempty"`
CacheDuration int64 `json:"cache_duration,omitempty"` // seconds
}
ModelCache stores available models (cached in UserLoginMetadata) Uses provider-agnostic ModelInfo instead of openai.Model
type ModelCapabilities ¶
type ModelCapabilities struct {
SupportsVision bool `json:"supports_vision"`
SupportsReasoning bool `json:"supports_reasoning"` // Models that support reasoning_effort parameter
SupportsPDF bool `json:"supports_pdf"`
SupportsImageGen bool `json:"supports_image_gen"`
SupportsAudio bool `json:"supports_audio"` // Models that accept audio input
SupportsVideo bool `json:"supports_video"` // Models that accept video input
SupportsToolCalling bool `json:"supports_tool_calling"` // Models that support function calling
}
ModelCapabilities stores computed capabilities for a model This is NOT sent to the API, just used for local caching
type ModelCapabilitiesEventContent ¶
type ModelCapabilitiesEventContent struct {
AvailableModels []ModelInfo `json:"available_models"`
}
ModelCapabilitiesEventContent represents available models and their capabilities
type ModelCatalogEntry ¶
type ModelCatalogEntry struct {
ID string `json:"id"`
Name string `json:"name,omitempty"`
Provider string `json:"provider"`
ContextWindow int `json:"contextWindow,omitempty"`
MaxOutputTokens int `json:"maxTokens,omitempty"`
Reasoning bool `json:"reasoning,omitempty"`
Input []string `json:"input,omitempty"`
}
type ModelDefinitionConfig ¶
type ModelDefinitionConfig struct {
ID string `yaml:"id"`
Name string `yaml:"name"`
Reasoning bool `yaml:"reasoning"`
Input []string `yaml:"input"`
ContextWindow int `yaml:"context_window"`
MaxTokens int `yaml:"max_tokens"`
}
ModelDefinitionConfig defines a model entry for catalog seeding.
type ModelInfo ¶
type ModelInfo struct {
ID string `json:"id"`
Name string `json:"name"`
Provider string `json:"provider"`
API string `json:"api,omitempty"`
Description string `json:"description,omitempty"`
SupportsVision bool `json:"supports_vision"`
SupportsToolCalling bool `json:"supports_tool_calling"`
SupportsPDF bool `json:"supports_pdf,omitempty"`
SupportsReasoning bool `json:"supports_reasoning"`
SupportsWebSearch bool `json:"supports_web_search"`
SupportsImageGen bool `json:"supports_image_gen,omitempty"`
SupportsAudio bool `json:"supports_audio,omitempty"`
SupportsVideo bool `json:"supports_video,omitempty"`
ContextWindow int `json:"context_window,omitempty"`
MaxOutputTokens int `json:"max_output_tokens,omitempty"`
AvailableTools []string `json:"available_tools,omitempty"`
}
ModelInfo describes a single AI model's capabilities
type ModelProviderConfig ¶
type ModelProviderConfig struct {
Models []ModelDefinitionConfig `yaml:"models"`
}
ModelProviderConfig describes models for a specific provider.
type ModelsConfig ¶
type ModelsConfig struct {
Mode string `yaml:"mode"` // merge | replace
Providers map[string]ModelProviderConfig `yaml:"providers"`
}
ModelsConfig configures model catalog seeding (OpenClaw-style).
type NexusToolsConfig ¶
type NexusToolsConfig struct {
Enabled *bool `yaml:"enabled"`
BaseURL string `yaml:"base_url"`
MCPEndpoint string `yaml:"mcp_endpoint"`
Token string `yaml:"token"`
AuthType string `yaml:"auth_type"` // bearer | apikey
TimeoutSeconds int `yaml:"timeout_seconds"`
}
NexusToolsConfig configures Nexus tool bridging to a clay-nexus backend.
type NonFallbackError ¶
type NonFallbackError struct {
Err error
}
NonFallbackError marks an error as ineligible for model fallback. This is used when partial output has already been sent.
func (*NonFallbackError) Error ¶
func (e *NonFallbackError) Error() string
func (*NonFallbackError) Unwrap ¶
func (e *NonFallbackError) Unwrap() error
type NormalizedLocation ¶
type OpenAIConnector ¶
type OpenAIConnector struct {
Config Config
// contains filtered or unexported fields
}
OpenAIConnector wires mautrix bridgev2 to the OpenAI chat APIs.
func (*OpenAIConnector) CreateLogin ¶
func (oc *OpenAIConnector) CreateLogin(ctx context.Context, user *bridgev2.User, flowID string) (bridgev2.LoginProcess, error)
func (*OpenAIConnector) FillPortalBridgeInfo ¶
func (oc *OpenAIConnector) FillPortalBridgeInfo(portal *bridgev2.Portal, content *event.BridgeEventContent)
FillPortalBridgeInfo sets custom room type for AI rooms
func (*OpenAIConnector) GetBridgeInfoVersion ¶
func (oc *OpenAIConnector) GetBridgeInfoVersion() (info, capabilities int)
func (*OpenAIConnector) GetCapabilities ¶
func (oc *OpenAIConnector) GetCapabilities() *bridgev2.NetworkGeneralCapabilities
func (*OpenAIConnector) GetConfig ¶
func (oc *OpenAIConnector) GetConfig() (example string, data any, upgrader configupgrade.Upgrader)
func (*OpenAIConnector) GetDBMetaTypes ¶
func (oc *OpenAIConnector) GetDBMetaTypes() database.MetaTypes
func (*OpenAIConnector) GetLoginFlows ¶
func (oc *OpenAIConnector) GetLoginFlows() []bridgev2.LoginFlow
Package-level flow definitions (use Provider* constants as flow IDs)
func (*OpenAIConnector) GetName ¶
func (oc *OpenAIConnector) GetName() bridgev2.BridgeName
func (*OpenAIConnector) Init ¶
func (oc *OpenAIConnector) Init(bridge *bridgev2.Bridge)
func (*OpenAIConnector) LoadUserLogin ¶
func (*OpenAIConnector) SetMatrixCredentials ¶
func (oc *OpenAIConnector) SetMatrixCredentials(accessToken, homeserver string)
SetMatrixCredentials seeds Beeper provider config from the Matrix account, if unset.
func (*OpenAIConnector) Stop ¶
func (oc *OpenAIConnector) Stop(ctx context.Context)
type OpenAILogin ¶
type OpenAILogin struct {
User *bridgev2.User
Connector *OpenAIConnector
FlowID string
}
OpenAILogin maps a Matrix user to a synthetic OpenAI "login".
func (*OpenAILogin) Cancel ¶
func (ol *OpenAILogin) Cancel()
func (*OpenAILogin) SubmitUserInput ¶
type OpenAIProvider ¶
type OpenAIProvider struct {
// contains filtered or unexported fields
}
OpenAIProvider implements AIProvider for OpenAI's API
func NewOpenAIProviderWithBaseURL ¶
func NewOpenAIProviderWithBaseURL(apiKey, baseURL string, log zerolog.Logger) (*OpenAIProvider, error)
NewOpenAIProviderWithBaseURL creates an OpenAI provider with custom base URL Used for OpenRouter, Beeper proxy, or custom endpoints
func NewOpenAIProviderWithPDFPlugin ¶
func NewOpenAIProviderWithPDFPlugin(apiKey, baseURL, userID, pdfEngine string, headers map[string]string, log zerolog.Logger) (*OpenAIProvider, error)
NewOpenAIProviderWithPDFPlugin creates an OpenAI provider with PDF plugin middleware. Used for OpenRouter/Beeper to enable universal PDF support via file-parser plugin.
func NewOpenAIProviderWithUserID ¶
func NewOpenAIProviderWithUserID(apiKey, baseURL, userID string, log zerolog.Logger) (*OpenAIProvider, error)
NewOpenAIProviderWithUserID creates an OpenAI provider that passes user_id with each request. Used for Beeper proxy to ensure correct rate limiting and feature flags per user.
func (*OpenAIProvider) Client ¶
func (o *OpenAIProvider) Client() openai.Client
Client returns the underlying OpenAI client for direct access Used by the bridge for advanced features like Responses API
func (*OpenAIProvider) Generate ¶
func (o *OpenAIProvider) Generate(ctx context.Context, params GenerateParams) (*GenerateResponse, error)
Generate performs a non-streaming generation using Responses API
func (*OpenAIProvider) GenerateStream ¶
func (o *OpenAIProvider) GenerateStream(ctx context.Context, params GenerateParams) (<-chan StreamEvent, error)
GenerateStream generates a streaming response from OpenAI using Responses API
func (*OpenAIProvider) ListModels ¶
func (o *OpenAIProvider) ListModels(ctx context.Context) ([]ModelInfo, error)
ListModels returns available OpenAI models
func (*OpenAIProvider) Name ¶
func (o *OpenAIProvider) Name() string
type OpenAIRemoteMessage ¶
type OpenAIRemoteMessage struct {
PortalKey networkid.PortalKey
ID networkid.MessageID
Sender bridgev2.EventSender
Content string
Timestamp time.Time
Metadata *MessageMetadata
FormattedContent string
ReplyToEventID id.EventID
ToolCallEventIDs []string
ImageEventIDs []string
}
OpenAIRemoteMessage represents a GPT answer that should be bridged to Matrix.
func (*OpenAIRemoteMessage) AddLogContext ¶
func (m *OpenAIRemoteMessage) AddLogContext(c zerolog.Context) zerolog.Context
func (*OpenAIRemoteMessage) ConvertMessage ¶
func (m *OpenAIRemoteMessage) ConvertMessage(ctx context.Context, portal *bridgev2.Portal, intent bridgev2.MatrixAPI) (*bridgev2.ConvertedMessage, error)
func (*OpenAIRemoteMessage) GetID ¶
func (m *OpenAIRemoteMessage) GetID() networkid.MessageID
func (*OpenAIRemoteMessage) GetPortalKey ¶
func (m *OpenAIRemoteMessage) GetPortalKey() networkid.PortalKey
func (*OpenAIRemoteMessage) GetSender ¶
func (m *OpenAIRemoteMessage) GetSender() bridgev2.EventSender
func (*OpenAIRemoteMessage) GetStreamOrder ¶
func (m *OpenAIRemoteMessage) GetStreamOrder() int64
func (*OpenAIRemoteMessage) GetTimestamp ¶
func (m *OpenAIRemoteMessage) GetTimestamp() time.Time
func (*OpenAIRemoteMessage) GetTransactionID ¶
func (m *OpenAIRemoteMessage) GetTransactionID() networkid.TransactionID
GetTransactionID implements RemoteMessageWithTransactionID
func (*OpenAIRemoteMessage) GetType ¶
func (m *OpenAIRemoteMessage) GetType() bridgev2.RemoteEventType
type OpenCodeConfig ¶
type OpenCodeConfig struct {
Enabled *bool `yaml:"enabled"`
AutoStart *bool `yaml:"auto_start"`
Command string `yaml:"command"`
Hostname string `yaml:"hostname"`
Port int `yaml:"port"`
Username string `yaml:"username"`
Password string `yaml:"password"`
IsolateXDG *bool `yaml:"isolate_xdg"`
HomeBaseDir string `yaml:"home_base_dir"`
}
OpenCodeConfig configures optional OpenCode local server autostart/restore.
type OrchestrationConfig ¶
type OrchestrationConfig struct {
Mode string `json:"mode"` // "user_directed", "auto"
AllowParallel bool `json:"allow_parallel"`
MaxConcurrent int `json:"max_concurrent,omitempty"`
}
OrchestrationConfig defines how agents work together
type PDFConfig ¶
type PDFConfig struct {
Engine string `json:"engine,omitempty"` // pdf-text (free), mistral-ocr (OCR, paid, default), native
}
PDFConfig stores per-room PDF processing configuration
type PDFPluginConfig ¶
type PDFPluginConfig struct {
ID string `json:"id"`
Config json.RawMessage `json:"config,omitempty"`
}
PDFPluginConfig holds configuration for the PDF file-parser plugin
type PortalInitOpts ¶
type PortalInitOpts struct {
ModelID string
Title string
SystemPrompt string
CopyFrom *PortalMetadata // For forked chats - copies config from source
PortalKey *networkid.PortalKey
}
PortalInitOpts contains options for initializing a chat portal
type PortalMetadata ¶
type PortalMetadata struct {
Model string `json:"model,omitempty"` // Set from room state
SystemPrompt string `json:"system_prompt,omitempty"` // Set from room state
ResponsePrefix string `json:"response_prefix,omitempty"` // Per-room response prefix override
Temperature float64 `json:"temperature,omitempty"` // Set from room state
MaxContextMessages int `json:"max_context_messages,omitempty"` // Set from room state
MaxCompletionTokens int `json:"max_completion_tokens,omitempty"` // Set from room state
ReasoningEffort string `json:"reasoning_effort,omitempty"` // none, low, medium, high, xhigh
Slug string `json:"slug,omitempty"`
Title string `json:"title,omitempty"`
TitleGenerated bool `json:"title_generated,omitempty"` // True if title was auto-generated
WelcomeSent bool `json:"welcome_sent,omitempty"`
AutoGreetingSent bool `json:"auto_greeting_sent,omitempty"`
Capabilities ModelCapabilities `json:"capabilities,omitempty"`
LastRoomStateSync int64 `json:"last_room_state_sync,omitempty"` // Track when we've synced room state
PDFConfig *PDFConfig `json:"pdf_config,omitempty"` // Per-room PDF processing configuration
ConversationMode string `json:"conversation_mode,omitempty"`
LastResponseID string `json:"last_response_id,omitempty"`
EmitThinking bool `json:"emit_thinking,omitempty"`
EmitToolArgs bool `json:"emit_tool_args,omitempty"`
ThinkingLevel string `json:"thinking_level,omitempty"` // off|minimal|low|medium|high|xhigh
VerboseLevel string `json:"verbose_level,omitempty"` // off|on|full
ElevatedLevel string `json:"elevated_level,omitempty"` // off|on|ask|full
GroupActivation string `json:"group_activation,omitempty"` // mention|always
GroupActivationNeedsIntro bool `json:"group_activation_needs_intro,omitempty"`
GroupIntroSent bool `json:"group_intro_sent,omitempty"`
SendPolicy string `json:"send_policy,omitempty"` // allow|deny
SessionResetAt int64 `json:"session_reset_at,omitempty"`
AbortedLastRun bool `json:"aborted_last_run,omitempty"`
CompactionCount int `json:"compaction_count,omitempty"`
MemoryFlushAt int64 `json:"memory_flush_at,omitempty"`
MemoryFlushCompactionCount int `json:"memory_flush_compaction_count,omitempty"`
MemoryBootstrapAt int64 `json:"memory_bootstrap_at,omitempty"`
SessionBootstrappedAt int64 `json:"session_bootstrapped_at,omitempty"`
SessionBootstrapByAgent map[string]int64 `json:"session_bootstrap_by_agent,omitempty"`
// Agent-related metadata
AgentID string `json:"agent_id,omitempty"` // Which agent is the ghost for this room
AgentPrompt string `json:"agent_prompt,omitempty"` // Cached prompt for the assigned agent
IsBuilderRoom bool `json:"is_builder_room,omitempty"` // True if this is the Manage AI Chats room (protected from overrides)
IsRawMode bool `json:"is_raw_mode,omitempty"` // True if this is a playground/raw mode room (no directive processing)
IsCronRoom bool `json:"is_cron_room,omitempty"` // True if this is a hidden cron room
CronJobID string `json:"cron_job_id,omitempty"` // Cron job ID for cron rooms
SubagentParentRoomID string `json:"subagent_parent_room_id,omitempty"` // Parent room ID for subagent sessions
// OpenCode session metadata
IsOpenCodeRoom bool `json:"is_opencode_room,omitempty"`
OpenCodeInstanceID string `json:"opencode_instance_id,omitempty"`
OpenCodeSessionID string `json:"opencode_session_id,omitempty"`
OpenCodeReadOnly bool `json:"opencode_read_only,omitempty"`
OpenCodeTitlePending bool `json:"opencode_title_pending,omitempty"`
// Codex app-server session metadata (isolated, does not integrate with OpenCode sessions list).
IsCodexRoom bool `json:"is_codex_room,omitempty"`
CodexThreadID string `json:"codex_thread_id,omitempty"`
CodexCwd string `json:"codex_cwd,omitempty"`
// Ack reaction config - similar to OpenClaw's ack reactions
AckReactionEmoji string `json:"ack_reaction_emoji,omitempty"` // Emoji to react with when message received (e.g., "👀", "🤔"). Empty = disabled.
AckReactionRemoveAfter bool `json:"ack_reaction_remove_after,omitempty"` // Remove the ack reaction after replying
// Runtime-only overrides (not persisted)
DisabledTools []string `json:"-"`
// Debounce configuration (0 = use default, -1 = disabled)
DebounceMs int `json:"debounce_ms,omitempty"`
// Per-session typing overrides (OpenClaw-style).
TypingMode string `json:"typing_mode,omitempty"` // never|instant|thinking|message
TypingIntervalSeconds *int `json:"typing_interval_seconds,omitempty"` // Optional per-session override
// Anthropic prompt cache TTL tracking
LastCacheTTLRefresh int64 `json:"last_cache_ttl_refresh,omitempty"` // Unix millis of last cache-eligible request
}
PortalMetadata stores per-room tuning knobs for the assistant.
type PreDeltaError ¶
type PreDeltaError struct {
Err error
}
PreDeltaError indicates a failure before any assistant output was streamed.
func (*PreDeltaError) Error ¶
func (e *PreDeltaError) Error() string
func (*PreDeltaError) Unwrap ¶
func (e *PreDeltaError) Unwrap() error
type PreviewWithImage ¶
type PreviewWithImage struct {
Preview *event.BeeperLinkPreview
ImageData []byte
ImageURL string // Original image URL for reference
}
PreviewWithImage holds a preview along with its downloaded image data.
type ProviderBraveConfig ¶
type ProviderBraveConfig struct {
Enabled *bool `yaml:"enabled"`
BaseURL string `yaml:"base_url"`
APIKey string `yaml:"api_key"`
TimeoutSecs int `yaml:"timeout_seconds"`
CacheTtlSecs int `yaml:"cache_ttl_seconds"`
SearchLang string `yaml:"search_lang"`
UILang string `yaml:"ui_lang"`
DefaultCountry string `yaml:"default_country"`
DefaultFreshness string `yaml:"default_freshness"`
}
type ProviderConfig ¶
type ProviderConfig struct {
APIKey string `yaml:"api_key"`
BaseURL string `yaml:"base_url"`
DefaultModel string `yaml:"default_model"`
DefaultPDFEngine string `yaml:"default_pdf_engine"` // pdf-text, mistral-ocr (default), native
}
ProviderConfig holds settings for a specific AI provider.
type ProviderDirectConfig ¶
type ProviderExaConfig ¶
type ProviderExaConfig struct {
Enabled *bool `yaml:"enabled"`
BaseURL string `yaml:"base_url"`
APIKey string `yaml:"api_key"`
Type string `yaml:"type"`
Category string `yaml:"category"`
NumResults int `yaml:"num_results"`
IncludeText bool `yaml:"include_text"`
TextMaxCharacters int `yaml:"text_max_chars"`
Highlights bool `yaml:"highlights"`
}
type ProvidersConfig ¶
type ProvidersConfig struct {
Beeper ProviderConfig `yaml:"beeper"`
OpenAI ProviderConfig `yaml:"openai"`
OpenRouter ProviderConfig `yaml:"openrouter"`
}
ProvidersConfig contains per-provider configuration.
type ProvisioningAPI ¶
type ProvisioningAPI struct {
// contains filtered or unexported fields
}
ProvisioningAPI handles the provisioning endpoints for user defaults
type ProxyError ¶
type ProxyError struct {
Code string `json:"code"`
Message string `json:"message"`
Details string `json:"details"`
Provider string `json:"provider"`
Retryable bool `json:"retryable"`
Type string `json:"type"`
Status int `json:"status"`
}
ProxyError represents a structured error from the hungryserv proxy
func ParseProxyError ¶
func ParseProxyError(err error) *ProxyError
ParseProxyError attempts to parse a structured proxy error from an error message
type ProxyErrorResponse ¶
type ProxyErrorResponse struct {
Error ProxyError `json:"error"`
}
ProxyErrorResponse is the wrapper for proxy errors
type PruningConfig ¶
type PruningConfig struct {
// Mode controls pruning strategy.
// "off" disables proactive pruning.
// "cache-ttl" enables proactive pruning using TTL-like refresh behavior.
Mode string `yaml:"mode" json:"mode,omitempty"`
// TTL is the refresh interval for cache-ttl mode.
// Default: 1h
TTL time.Duration `yaml:"ttl" json:"ttl,omitempty"`
// Enabled turns on proactive context pruning
Enabled bool `yaml:"enabled" json:"enabled"`
// SoftTrimRatio is the context usage ratio (0.0-1.0) that triggers soft trimming
// Default: 0.3 (30% of context window)
SoftTrimRatio float64 `yaml:"soft_trim_ratio" json:"soft_trim_ratio,omitempty"`
// HardClearRatio is the context usage ratio (0.0-1.0) that triggers hard clearing
// Default: 0.5 (50% of context window)
HardClearRatio float64 `yaml:"hard_clear_ratio" json:"hard_clear_ratio,omitempty"`
// KeepLastAssistants protects the N most recent assistant messages from pruning
// Default: 3
KeepLastAssistants int `yaml:"keep_last_assistants" json:"keep_last_assistants,omitempty"`
// MinPrunableChars is the minimum total chars in prunable tool results before hard clear kicks in
// Default: 50000
MinPrunableChars int `yaml:"min_prunable_chars" json:"min_prunable_chars,omitempty"`
// SoftTrimMaxChars is the threshold for considering a tool result "large" (triggering soft trim)
// Default: 4000
SoftTrimMaxChars int `yaml:"soft_trim_max_chars" json:"soft_trim_max_chars,omitempty"`
// SoftTrimHeadChars is how many chars to keep from the start when soft trimming
// Default: 1500
SoftTrimHeadChars int `yaml:"soft_trim_head_chars" json:"soft_trim_head_chars,omitempty"`
// SoftTrimTailChars is how many chars to keep from the end when soft trimming
// Default: 1500
SoftTrimTailChars int `yaml:"soft_trim_tail_chars" json:"soft_trim_tail_chars,omitempty"`
// HardClearEnabled allows disabling hard clear phase
// Default: true
HardClearEnabled *bool `yaml:"hard_clear_enabled" json:"hard_clear_enabled,omitempty"`
// HardClearPlaceholder is the text that replaces cleared tool results
// Default: "[Old tool result content cleared]"
HardClearPlaceholder string `yaml:"hard_clear_placeholder" json:"hard_clear_placeholder,omitempty"`
// ToolsAllow is a list of tool name patterns to prune (supports wildcards: list_*, *_search)
// Empty means all tools are prunable (unless in deny list)
ToolsAllow []string `yaml:"tools_allow" json:"tools_allow,omitempty"`
// ToolsDeny is a list of tool name patterns to never prune (supports wildcards)
ToolsDeny []string `yaml:"tools_deny" json:"tools_deny,omitempty"`
// SummarizationEnabled enables LLM-based summarization instead of placeholder text
// Default: true (when compaction is enabled)
SummarizationEnabled *bool `yaml:"summarization_enabled" json:"summarization_enabled,omitempty"`
// SummarizationModel is the model to use for generating summaries
// Default: "anthropic/claude-opus-4.6"
SummarizationModel string `yaml:"summarization_model" json:"summarization_model,omitempty"`
// MaxSummaryTokens is the maximum tokens for generated summaries
// Default: 500
MaxSummaryTokens int `yaml:"max_summary_tokens" json:"max_summary_tokens,omitempty"`
// When exceeded, oldest messages are dropped and summarized
// Default: 0.5 (50%)
MaxHistoryShare float64 `yaml:"max_history_share" json:"max_history_share,omitempty"`
// ReserveTokens is the token budget reserved for compaction output
// Default: 2000
ReserveTokens int `yaml:"reserve_tokens" json:"reserve_tokens,omitempty"`
// CustomInstructions are additional instructions for the summarization model
CustomInstructions string `yaml:"custom_instructions" json:"custom_instructions,omitempty"`
// MemoryFlush runs a pre-compaction memory write pass.
MemoryFlush *MemoryFlushConfig `yaml:"memory_flush" json:"memory_flush,omitempty"`
// MaxHistoryTurns limits conversation history to the last N user turns (and their associated
// assistant responses). This reduces token usage for long-running DM sessions.
// A value of 0 means no limit (default behavior).
// Default: 0 (unlimited)
MaxHistoryTurns int `yaml:"max_history_turns" json:"max_history_turns,omitempty"`
}
PruningConfig configures context pruning behavior (matches OpenClaw's AgentContextPruningConfig)
func DefaultPruningConfig ¶
func DefaultPruningConfig() *PruningConfig
DefaultPruningConfig returns OpenClaw's default settings
type QueueConfig ¶
type QueueConfig struct {
Mode string `yaml:"mode"`
ByChannel map[string]string `yaml:"byChannel"`
DebounceMs *int `yaml:"debounceMs"`
DebounceMsByChannel map[string]int `yaml:"debounceMsByChannel"`
Cap *int `yaml:"cap"`
Drop string `yaml:"drop"`
}
QueueConfig mirrors OpenClaw's queue settings.
type QueueDirective ¶
type QueueDropPolicy ¶
type QueueDropPolicy string
const ( QueueDropOld QueueDropPolicy = "old" QueueDropNew QueueDropPolicy = "new" QueueDropSummarize QueueDropPolicy = "summarize" )
type QueueInlineOptions ¶
type QueueInlineOptions struct {
DebounceMs *int
Cap *int
DropPolicy *QueueDropPolicy
}
type QueueSettings ¶
type QueueSettings struct {
Mode QueueMode
DebounceMs int
Cap int
DropPolicy QueueDropPolicy
}
type ReactionFeedback ¶
type ReactionFeedback struct {
Emoji string // The emoji used (e.g., "👍", "👎")
Timestamp time.Time // When the reaction was added
Sender string // Who sent the reaction (display name or user ID)
MessageID string // Which message was reacted to (event ID or timestamp)
RoomName string // Room/channel name for context
Action string // "added" or "removed"
}
ReactionFeedback represents a user reaction to an AI message. Similar to OpenClaw's system events, these are queued and drained when building the next prompt.
func DrainReactionFeedback ¶
func DrainReactionFeedback(roomID id.RoomID) []ReactionFeedback
DrainReactionFeedback returns and clears all reaction feedback for a room.
type ReactionQueue ¶
type ReactionQueue struct {
// contains filtered or unexported fields
}
ReactionQueue holds reaction feedback for a room.
func (*ReactionQueue) AddReaction ¶
func (q *ReactionQueue) AddReaction(feedback ReactionFeedback)
AddReaction adds a reaction feedback to the queue. Skips consecutive duplicates like OpenClaw does.
func (*ReactionQueue) DrainFeedback ¶
func (q *ReactionQueue) DrainFeedback() []ReactionFeedback
DrainFeedback returns all queued feedback and clears the queue.
type ReasoningEffortOption ¶
type ReasoningEffortOption struct {
Value string `json:"value"` // minimal, low, medium, high, xhigh
Label string `json:"label"` // Display name
}
ReasoningEffortOption represents an available reasoning effort level
type ReplyTarget ¶
func (ReplyTarget) EffectiveReplyTo ¶
func (t ReplyTarget) EffectiveReplyTo() id.EventID
type ReqSetDefaults ¶
type ReqSetDefaults struct {
Model *string `json:"model,omitempty"`
SystemPrompt *string `json:"system_prompt,omitempty"`
Temperature *float64 `json:"temperature,omitempty"`
ReasoningEffort *string `json:"reasoning_effort,omitempty"`
}
ReqSetDefaults is the request body for PUT /v1/defaults
type ResponseDirectives ¶
type ResponseDirectives struct {
// Text is the cleaned response text with directives stripped.
Text string
// IsSilent indicates the response should not be sent (NO_REPLY token present).
IsSilent bool
// ReplyToEventID is the Matrix event ID to reply to (from [[reply_to:<id>]] or [[reply_to_current]]).
ReplyToEventID id.EventID
// ReplyToCurrent indicates [[reply_to_current]] was used (reply to triggering message).
ReplyToCurrent bool
// HasReplyTag indicates a reply tag was present in the original text.
HasReplyTag bool
}
ResponseDirectives contains parsed directives from an LLM response. Matches OpenClaw's directive parsing behavior.
func ParseResponseDirectives ¶
func ParseResponseDirectives(text string, currentEventID id.EventID) *ResponseDirectives
ParseResponseDirectives extracts directives from LLM response text. currentEventID is the triggering message's event ID (used for [[reply_to_current]]).
type ResponsePrefixContext ¶
type ResponsePrefixContext struct {
Model string
ModelFull string
Provider string
ThinkingLevel string
IdentityName string
}
ResponsePrefixContext mirrors OpenClaw's template context.
type ResultStatus ¶
type ResultStatus string
ResultStatus represents the status of a tool result
const ( ResultStatusSuccess ResultStatus = "success" ResultStatusError ResultStatus = "error" ResultStatusPartial ResultStatus = "partial" ResultStatusDenied ResultStatus = "denied" )
type RoomCapabilitiesEventContent ¶
type RoomCapabilitiesEventContent struct {
Capabilities *ModelCapabilities `json:"capabilities,omitempty"`
AvailableTools []ToolInfo `json:"available_tools,omitempty"`
ReasoningEffortOptions []ReasoningEffortOption `json:"reasoning_effort_options,omitempty"`
Provider string `json:"provider,omitempty"`
EffectiveSettings *EffectiveSettings `json:"effective_settings,omitempty"`
}
RoomCapabilitiesEventContent represents bridge-controlled room capabilities This is protected by power levels (100) so only the bridge bot can modify
type RoomSettingsEventContent ¶
type RoomSettingsEventContent struct {
Model string `json:"model,omitempty"`
SystemPrompt string `json:"system_prompt,omitempty"`
Temperature *float64 `json:"temperature,omitempty"`
MaxContextMessages int `json:"max_context_messages,omitempty"`
MaxCompletionTokens int `json:"max_completion_tokens,omitempty"`
ReasoningEffort string `json:"reasoning_effort,omitempty"`
ConversationMode string `json:"conversation_mode,omitempty"` // "messages" or "responses"
AgentID string `json:"agent_id,omitempty"`
EmitThinking *bool `json:"emit_thinking,omitempty"`
EmitToolArgs *bool `json:"emit_tool_args,omitempty"`
}
RoomSettingsEventContent represents user-editable room settings This uses normal power levels (0) so users can modify
type SearchConfig ¶
type SearchConfig struct {
Provider string `yaml:"provider"`
Fallbacks []string `yaml:"fallbacks"`
Exa ProviderExaConfig `yaml:"exa"`
Brave ProviderBraveConfig `yaml:"brave"`
Perplexity ProviderPerplexityConfig `yaml:"perplexity"`
OpenRouter ProviderOpenRouterConfig `yaml:"openrouter"`
}
type ServiceConfig ¶
type ServiceConfigMap ¶
type ServiceConfigMap map[string]ServiceConfig
type ServiceTokens ¶
type ServiceTokens struct {
OpenAI string `json:"openai,omitempty"`
OpenRouter string `json:"openrouter,omitempty"`
Exa string `json:"exa,omitempty"`
Brave string `json:"brave,omitempty"`
Perplexity string `json:"perplexity,omitempty"`
DesktopAPI string `json:"desktop_api,omitempty"`
DesktopAPIInstances map[string]DesktopAPIInstance `json:"desktop_api_instances,omitempty"`
MCPServers map[string]MCPServerConfig `json:"mcp_servers,omitempty"`
}
ServiceTokens stores optional per-login credentials for external services.
type SessionConfig ¶
type SessionConfig struct {
Scope string `yaml:"scope"`
MainKey string `yaml:"mainKey"`
Store string `yaml:"store"`
}
SessionConfig configures session store behavior (OpenClaw-style).
type SettingExplanation ¶
type SettingExplanation struct {
Value any `json:"value"`
Source SettingSource `json:"source"`
Reason string `json:"reason,omitempty"` // Only when limited/unavailable
}
SettingExplanation describes why a setting has its current value
type SettingSource ¶
type SettingSource string
SettingSource indicates where a setting value came from
const ( SourceAgentPolicy SettingSource = "agent_policy" SourceRoomOverride SettingSource = "room_override" SourceUserDefault SettingSource = "user_default" SourceProviderConfig SettingSource = "provider_config" SourceGlobalDefault SettingSource = "global_default" SourceModelLimit SettingSource = "model_limitation" SourceProviderLimit SettingSource = "provider_limitation" )
type StatusDisplay ¶
type StatusDisplay struct {
Icon string `json:"icon,omitempty"`
Animation string `json:"animation,omitempty"` // "pulse", "spin", etc.
Color string `json:"color,omitempty"`
}
StatusDisplay contains display hints for status indicators
type StepBoundaryContent ¶
type StepBoundaryContent struct {
TurnID string `json:"turn_id"`
AgentID string `json:"agent_id,omitempty"`
StepNumber int `json:"step_number"`
StepType string `json:"step_type"` // "tool_response_processed", etc.
PreviousToolCalls []string `json:"previous_tool_calls,omitempty"`
Display *StepDisplay `json:"display,omitempty"`
}
StepBoundaryContent represents a step boundary within a turn
type StepDisplay ¶
type StepDisplay struct {
Label string `json:"label,omitempty"`
}
StepDisplay contains display hints for step boundaries
type StreamContentType ¶
type StreamContentType string
StreamContentType identifies the type of content in a stream delta
const ( StreamContentText StreamContentType = "text" StreamContentReasoning StreamContentType = "reasoning" StreamContentToolInput StreamContentType = "tool_input" StreamContentToolResult StreamContentType = "tool_result" StreamContentCode StreamContentType = "code" StreamContentImage StreamContentType = "image" )
type StreamCursor ¶
type StreamCursor struct {
BlockType string `json:"block_type,omitempty"` // "text", "code", etc.
CharOffset int `json:"char_offset,omitempty"`
Field string `json:"field,omitempty"` // For tool_input, which field
}
StreamCursor provides position information for streaming
type StreamDeltaContent ¶
type StreamDeltaContent struct {
TurnID string `json:"turn_id"`
AgentID string `json:"agent_id,omitempty"`
TargetEvent string `json:"target_event,omitempty"` // Event ID being updated
ContentType StreamContentType `json:"content_type"`
Delta string `json:"delta"`
Seq int `json:"seq"`
// For tool_input streaming
CallID string `json:"call_id,omitempty"`
ToolName string `json:"tool_name,omitempty"`
// Cursor information
Cursor *StreamCursor `json:"cursor,omitempty"`
}
StreamDeltaContent represents a streaming delta event
type StreamEvent ¶
type StreamEvent struct {
Type StreamEventType
Delta string // Text chunk for delta events
ReasoningDelta string // Thinking/reasoning chunk
ToolCall *ToolCallResult // For tool_call events
FinishReason string // For complete events
ResponseID string // Response ID (for Responses API)
Usage *UsageInfo // Token usage (usually on complete)
Error error // For error events
}
StreamEvent represents a single event from a streaming response
type StreamEventType ¶
type StreamEventType string
StreamEventType identifies the type of streaming event
const ( StreamEventDelta StreamEventType = "delta" // Text content delta StreamEventReasoning StreamEventType = "reasoning" // Reasoning/thinking delta StreamEventToolCall StreamEventType = "tool_call" // Tool call request StreamEventComplete StreamEventType = "complete" // Generation complete StreamEventError StreamEventType = "error" // Error occurred )
type StreamingConfig ¶
type StreamingConfig struct {
Enabled bool `json:"enabled,omitempty"`
}
StreamingConfig contains streaming behavior settings
type SystemEvent ¶
type ThinkingContent ¶
type ThinkingContent struct {
Content string `json:"content,omitempty"`
TokenCount int `json:"token_count,omitempty"`
DurationMs int64 `json:"duration_ms,omitempty"`
Summary string `json:"summary,omitempty"`
}
ThinkingContent represents embedded thinking/reasoning content
type TimingInfo ¶
type TimingInfo struct {
StartedAt int64 `json:"started_at,omitempty"` // Unix ms
FirstTokenAt int64 `json:"first_token_at,omitempty"` // Unix ms
CompletedAt int64 `json:"completed_at,omitempty"` // Unix ms
}
TimingInfo contains timing information for events
type ToolApprovalDecision ¶
type ToolApprovalDecision struct {
Approve bool
Always bool // Persist allow rule when true (only meaningful when Approve=true).
Reason string // Optional; forwarded upstream when supported.
DecidedAt time.Time
DecidedBy id.UserID
}
ToolApprovalDecision is a user decision for a pending tool approval request.
type ToolApprovalKind ¶
type ToolApprovalKind string
const ( ToolApprovalKindMCP ToolApprovalKind = "mcp" ToolApprovalKindBuiltin ToolApprovalKind = "builtin" )
type ToolApprovalsConfig ¶
type ToolApprovalsConfig struct {
// MCPAlwaysAllow contains exact-match allow rules for MCP approvals.
// Matching is done on normalized (trim + lowercase) server label + tool name.
MCPAlwaysAllow []MCPAlwaysAllowRule `json:"mcp_always_allow,omitempty"`
// BuiltinAlwaysAllow contains exact-match allow rules for builtin tool approvals.
// Matching is done on normalized (trim + lowercase) tool name + action.
// Action "" means "any action".
BuiltinAlwaysAllow []BuiltinAlwaysAllowRule `json:"builtin_always_allow,omitempty"`
}
ToolApprovalsConfig stores per-login persisted tool approval rules. This is used by the tool approval system to support "always allow" decisions.
type ToolApprovalsRuntimeConfig ¶
type ToolApprovalsRuntimeConfig struct {
Enabled *bool `yaml:"enabled"`
TTLSeconds int `yaml:"ttlSeconds"`
RequireForMCP *bool `yaml:"requireForMcp"`
RequireForTools []string `yaml:"requireForTools"`
AskFallback string `yaml:"askFallback"` // "deny" (default) | "allow"
}
ToolApprovalsRuntimeConfig controls runtime behaviour for tool approvals. This gates OpenAI MCP approvals (mcp_approval_request) and selected dangerous builtin tools.
func (*ToolApprovalsRuntimeConfig) WithDefaults ¶
func (c *ToolApprovalsRuntimeConfig) WithDefaults() *ToolApprovalsRuntimeConfig
type ToolArtifact ¶
type ToolArtifact struct {
Type string `json:"type"` // "file", "image"
MxcURI string `json:"mxc_uri,omitempty"`
Filename string `json:"filename,omitempty"`
Mimetype string `json:"mimetype,omitempty"`
Size int `json:"size,omitempty"`
}
ToolArtifact represents a file or image generated by a tool
type ToolCallContent ¶
type ToolCallContent struct {
// Standard Matrix fallback
Body string `json:"body"`
MsgType string `json:"msgtype"`
// Tool call details
ToolCall *ToolCallData `json:"com.beeper.ai.tool_call"`
}
ToolCallContent represents a tool call timeline event
type ToolCallData ¶
type ToolCallData struct {
CallID string `json:"call_id"`
TurnID string `json:"turn_id"`
AgentID string `json:"agent_id,omitempty"`
ToolName string `json:"tool_name"`
ToolType ToolType `json:"tool_type"`
Status ToolStatus `json:"status"`
// Input arguments (fully accumulated)
Input map[string]any `json:"input,omitempty"`
// Display hints
Display *ToolDisplay `json:"display,omitempty"`
// Reference to result event (set after completion)
ResultEvent string `json:"result_event,omitempty"`
// MCP-specific fields
MCPServer string `json:"mcp_server,omitempty"`
// Timing
Timing *TimingInfo `json:"timing,omitempty"`
// Approval flow
RequiresApproval bool `json:"requires_approval,omitempty"`
Approval *ApprovalInfo `json:"approval,omitempty"`
}
ToolCallData contains the tool call metadata
type ToolCallMetadata ¶
type ToolCallMetadata struct {
CallID string `json:"call_id"`
ToolName string `json:"tool_name"`
ToolType string `json:"tool_type"` // builtin, provider, function, mcp
Input map[string]any `json:"input,omitempty"`
Output map[string]any `json:"output,omitempty"`
Status string `json:"status"` // pending, running, completed, failed, timeout, cancelled
ResultStatus string `json:"result_status,omitempty"` // success, error, partial
ErrorMessage string `json:"error_message,omitempty"`
StartedAtMs int64 `json:"started_at_ms,omitempty"`
CompletedAtMs int64 `json:"completed_at_ms,omitempty"`
// Event IDs for timeline events (if emitted as separate events)
CallEventID string `json:"call_event_id,omitempty"`
ResultEventID string `json:"result_event_id,omitempty"`
}
ToolCallMetadata tracks a tool call within a message
type ToolCallResult ¶
ToolCallResult represents a tool/function call from the model
type ToolDefinition ¶
type ToolDefinition struct {
Name string
Description string
Parameters map[string]any
Execute func(ctx context.Context, args map[string]any) (string, error)
}
ToolDefinition defines a tool that can be used by the AI
func BuiltinTools ¶
func BuiltinTools() []ToolDefinition
BuiltinTools returns the list of available builtin tools
type ToolDisplay ¶
type ToolDisplay struct {
Title string `json:"title,omitempty"`
Icon string `json:"icon,omitempty"` // mxc:// URL
Collapsed bool `json:"collapsed,omitempty"`
}
ToolDisplay contains display hints for tool rendering
type ToolInfo ¶
type ToolInfo struct {
Name string `json:"name"`
DisplayName string `json:"display_name"` // Human-readable name for UI
Type string `json:"type"` // "builtin", "provider", "plugin", "mcp"
Description string `json:"description,omitempty"`
Enabled bool `json:"enabled"`
Available bool `json:"available"` // Based on model capabilities and provider
Source SettingSource `json:"source,omitempty"` // Where enabled state came from
Reason string `json:"reason,omitempty"` // Only when limited/unavailable
}
ToolInfo describes a tool and its status for room state broadcasting
type ToolOutputPreview ¶
type ToolOutputPreview struct {
Stdout string `json:"stdout,omitempty"`
Stderr string `json:"stderr,omitempty"`
Truncated bool `json:"truncated,omitempty"`
}
ToolOutputPreview contains preview of tool output
type ToolProgressContent ¶
type ToolProgressContent struct {
CallID string `json:"call_id"`
TurnID string `json:"turn_id"`
AgentID string `json:"agent_id,omitempty"`
ToolName string `json:"tool_name"`
Status ToolStatus `json:"status"`
Progress *ToolProgressDetails `json:"progress,omitempty"`
// Output preview (for long-running tools, etc.)
OutputPreview *ToolOutputPreview `json:"output_preview,omitempty"`
}
ToolProgressContent represents tool execution progress
type ToolProgressDetails ¶
type ToolProgressDetails struct {
Stage string `json:"stage,omitempty"` // "executing", "processing", etc.
Percent int `json:"percent,omitempty"` // 0-100
Message string `json:"message,omitempty"`
}
ToolProgressDetails contains progress information
type ToolProvidersConfig ¶
type ToolProvidersConfig struct {
Search *SearchConfig `yaml:"search"`
Fetch *FetchConfig `yaml:"fetch"`
Media *MediaToolsConfig `yaml:"media"`
Nexus *NexusToolsConfig `yaml:"nexus"`
MCP *MCPToolsConfig `yaml:"mcp"`
VFS *VFSToolsConfig `yaml:"vfs"`
}
ToolProvidersConfig configures external tool providers like search and fetch.
type ToolResultContent ¶
type ToolResultContent struct {
// Standard Matrix fallback
Body string `json:"body"`
MsgType string `json:"msgtype"`
Format string `json:"format,omitempty"`
FormattedBody string `json:"formatted_body,omitempty"`
// Tool result details
ToolResult *ToolResultData `json:"com.beeper.ai.tool_result"`
}
ToolResultContent represents a tool result timeline event
type ToolResultData ¶
type ToolResultData struct {
CallID string `json:"call_id"`
TurnID string `json:"turn_id"`
AgentID string `json:"agent_id,omitempty"`
ToolName string `json:"tool_name"`
Status ResultStatus `json:"status"`
// Output data
Output map[string]any `json:"output,omitempty"`
// Artifacts (files, images generated by tool)
Artifacts []ToolArtifact `json:"artifacts,omitempty"`
// Display hints
Display *ToolResultDisplay `json:"display,omitempty"`
}
ToolResultData contains the tool result metadata
type ToolResultDisplay ¶
type ToolResultDisplay struct {
Format string `json:"format,omitempty"` // "search_results", "code_output", etc.
Expandable bool `json:"expandable,omitempty"`
DefaultExpanded bool `json:"default_expanded,omitempty"`
ShowStdout bool `json:"show_stdout,omitempty"`
ShowArtifacts bool `json:"show_artifacts,omitempty"`
}
ToolResultDisplay contains display hints for tool result rendering
type ToolStatus ¶
type ToolStatus string
ToolStatus represents the state of a tool call
const ( ToolStatusPending ToolStatus = "pending" ToolStatusRunning ToolStatus = "running" ToolStatusCompleted ToolStatus = "completed" ToolStatusFailed ToolStatus = "failed" ToolStatusTimeout ToolStatus = "timeout" ToolStatusCancelled ToolStatus = "cancelled" ToolStatusApprovalRequired ToolStatus = "approval_required" )
type ToolStrictMode ¶
type ToolStrictMode int
const ( ToolStrictOff ToolStrictMode = iota ToolStrictAuto ToolStrictOn )
type TurnCancelledContent ¶
type TurnCancelledContent struct {
TurnID string `json:"turn_id"`
AgentID string `json:"agent_id,omitempty"`
CancelledAt int64 `json:"cancelled_at"` // Unix ms
Reason string `json:"reason,omitempty"`
PartialContent string `json:"partial_content,omitempty"`
ToolCallsCancelled []string `json:"tool_calls_cancelled,omitempty"`
}
TurnCancelledContent represents a cancelled turn event
type TurnStatus ¶
type TurnStatus string
TurnStatus represents the state of an assistant turn
const ( TurnStatusPending TurnStatus = "pending" TurnStatusThinking TurnStatus = "thinking" TurnStatusGenerating TurnStatus = "generating" TurnStatusToolUse TurnStatus = "tool_use" TurnStatusCompleted TurnStatus = "completed" TurnStatusFailed TurnStatus = "failed" TurnStatusCancelled TurnStatus = "cancelled" )
type TypingContext ¶
type TypingController ¶
type TypingController struct {
// contains filtered or unexported fields
}
TypingController manages typing indicators with TTL and refresh. Similar to OpenClaw's TypingController pattern.
func NewTypingController ¶
func NewTypingController(client *AIClient, ctx context.Context, portal *bridgev2.Portal, opts TypingControllerOptions) *TypingController
NewTypingController creates a new typing controller.
func (*TypingController) IsActive ¶
func (tc *TypingController) IsActive() bool
IsActive returns whether typing is currently active.
func (*TypingController) MarkDispatchIdle ¶
func (tc *TypingController) MarkDispatchIdle()
MarkDispatchIdle marks the dispatcher as idle.
func (*TypingController) MarkRunComplete ¶
func (tc *TypingController) MarkRunComplete()
MarkRunComplete marks the AI run as complete. Typing will stop when both run is complete and dispatch is idle.
func (*TypingController) RefreshTTL ¶
func (tc *TypingController) RefreshTTL()
RefreshTTL resets the TTL timer, keeping typing active longer. Call this when activity occurs (tool calls, text chunks).
func (*TypingController) Start ¶
func (tc *TypingController) Start()
Start begins the typing indicator with automatic refresh.
func (*TypingController) Stop ¶
func (tc *TypingController) Stop()
Stop stops the typing indicator and cleans up.
type TypingControllerOptions ¶
type TypingMode ¶
type TypingMode string
const ( TypingModeNever TypingMode = "never" TypingModeInstant TypingMode = "instant" TypingModeThinking TypingMode = "thinking" TypingModeMessage TypingMode = "message" )
type TypingSignaler ¶
type TypingSignaler struct {
// contains filtered or unexported fields
}
func NewTypingSignaler ¶
func NewTypingSignaler(typing *TypingController, mode TypingMode, isHeartbeat bool) *TypingSignaler
func (*TypingSignaler) SignalReasoningDelta ¶
func (ts *TypingSignaler) SignalReasoningDelta()
func (*TypingSignaler) SignalRunStart ¶
func (ts *TypingSignaler) SignalRunStart()
func (*TypingSignaler) SignalTextDelta ¶
func (ts *TypingSignaler) SignalTextDelta(text string)
func (*TypingSignaler) SignalToolStart ¶
func (ts *TypingSignaler) SignalToolStart()
type UnifiedMessage ¶
type UnifiedMessage struct {
Role MessageRole
Content []ContentPart
ToolCalls []ToolCallResult // For assistant messages with tool calls
ToolCallID string // For tool result messages
Name string // Optional name for the message sender
}
UnifiedMessage is a provider-agnostic message format
func (*UnifiedMessage) HasImages ¶
func (m *UnifiedMessage) HasImages() bool
HasImages returns true if the message contains image content
func (*UnifiedMessage) HasMultimodalContent ¶
func (m *UnifiedMessage) HasMultimodalContent() bool
HasMultimodalContent returns true if the message contains any non-text content
func (*UnifiedMessage) Text ¶
func (m *UnifiedMessage) Text() string
Text returns the text content of a message (concatenating all text parts)
type UsageInfo ¶
type UsageInfo struct {
PromptTokens int
CompletionTokens int
TotalTokens int
ReasoningTokens int // For models with extended thinking
}
UsageInfo contains token usage information
type UserDefaults ¶
type UserDefaults struct {
Model string `json:"model,omitempty"`
SystemPrompt string `json:"system_prompt,omitempty"`
Temperature *float64 `json:"temperature,omitempty"`
ReasoningEffort string `json:"reasoning_effort,omitempty"`
}
UserDefaults stores user-level default settings for new chats
type UserLoginMetadata ¶
type UserLoginMetadata struct {
Persona string `json:"persona,omitempty"`
Provider string `json:"provider,omitempty"` // Selected provider (beeper, openai, openrouter)
APIKey string `json:"api_key,omitempty"`
BaseURL string `json:"base_url,omitempty"` // Per-user API endpoint
CodexHome string `json:"codex_home,omitempty"` // Isolated CODEX_HOME for this login (provider=codex)
CodexHomeManaged bool `json:"codex_home_managed,omitempty"` // True if we created this CODEX_HOME (false for existing ~/.codex)
CodexCommand string `json:"codex_command,omitempty"` // Optional per-login codex binary override
CodexAuthMode string `json:"codex_auth_mode,omitempty"` // chatgpt|apiKey|existing
CodexAccountEmail string `json:"codex_account_email,omitempty"` // Optional, from account/read
TitleGenerationModel string `json:"title_generation_model,omitempty"` // Model to use for generating chat titles
NextChatIndex int `json:"next_chat_index,omitempty"`
DefaultChatPortalID string `json:"default_chat_portal_id,omitempty"`
ModelCache *ModelCache `json:"model_cache,omitempty"`
ChatsSynced bool `json:"chats_synced,omitempty"` // True after initial bootstrap completed successfully
Gravatar *GravatarState `json:"gravatar,omitempty"`
Timezone string `json:"timezone,omitempty"`
ResponsePrefix string `json:"response_prefix,omitempty"`
// FileAnnotationCache stores parsed PDF content from OpenRouter's file-parser plugin
// Key is the file hash (SHA256), pruned after 7 days
FileAnnotationCache map[string]FileAnnotation `json:"file_annotation_cache,omitempty"`
// User-level defaults for new chats (set via provisioning API)
Defaults *UserDefaults `json:"defaults,omitempty"`
// Optional per-login tokens for external services
ServiceTokens *ServiceTokens `json:"service_tokens,omitempty"`
// Tool approval rules (e.g. "always allow" decisions for MCP approvals or dangerous builtin tools).
ToolApprovals *ToolApprovalsConfig `json:"tool_approvals,omitempty"`
// AgentModelOverrides stores per-agent model overrides (agent ID -> model ID).
AgentModelOverrides map[string]string `json:"agent_model_overrides,omitempty"`
// Agent Builder room for managing agents
BuilderRoomID networkid.PortalID `json:"builder_room_id,omitempty"`
// Custom agents store (source of truth for user-created agents).
CustomAgents map[string]*AgentDefinitionContent `json:"custom_agents,omitempty"`
// Last active room per agent (used for heartbeat delivery).
LastActiveRoomByAgent map[string]string `json:"last_active_room_by_agent,omitempty"`
// Heartbeat dedupe state per agent.
HeartbeatState map[string]HeartbeatState `json:"heartbeat_state,omitempty"`
// LastHeartbeatEvent is the last emitted heartbeat event for this login (command-only debug surface).
LastHeartbeatEvent *HeartbeatEventPayload `json:"last_heartbeat_event,omitempty"`
// OpenCode instances connected for this login (keyed by instance ID).
OpenCodeInstances map[string]*opencodebridge.OpenCodeInstance `json:"opencode_instances,omitempty"`
// OpenCode managed local server (optional).
OpenCodeLocalPort int `json:"opencode_local_port,omitempty"`
OpenCodeLocalUsername string `json:"opencode_local_username,omitempty"`
OpenCodeLocalPassword string `json:"opencode_local_password,omitempty"`
// Provider health tracking
ConsecutiveErrors int `json:"consecutive_errors,omitempty"`
LastErrorAt int64 `json:"last_error_at,omitempty"` // Unix timestamp
}
UserLoginMetadata is stored on each login row to keep per-user settings.
type VFSToolsConfig ¶
type VFSToolsConfig struct {
ApplyPatch *ApplyPatchToolsConfig `yaml:"apply_patch"`
}
VFSToolsConfig configures virtual filesystem tools.
Source Files
¶
- abort_helpers.go
- abort_triggers.go
- account_hints.go
- ack_reactions.go
- agent_activity.go
- agent_display.go
- agents_list_tool.go
- agentstore.go
- agentstore_room_lookup.go
- approval_errors.go
- audio_analysis.go
- audio_generation.go
- audio_mime.go
- backfill_opencode.go
- beeper_models_generated.go
- bootstrap_context.go
- bot_check.go
- bridge_db.go
- bridge_state_backend.go
- broken_login_client.go
- builder.go
- cache_ttl.go
- chat.go
- client.go
- codex_client.go
- codex_login.go
- command_registry.go
- commands.go
- commands_clay.go
- commands_helpers.go
- commands_manage.go
- commands_mcp.go
- commands_memory.go
- commands_opencode.go
- commands_parity.go
- commands_playground.go
- config.go
- connector.go
- context_compaction.go
- context_overrides.go
- context_pruning.go
- context_value.go
- cron_agent.go
- cron_command_format.go
- cron_delivery.go
- cron_isolated.go
- cron_logger.go
- cron_message.go
- cron_rooms.go
- cron_runtime.go
- cron_sessions.go
- debounce.go
- dedupe.go
- delivery_target.go
- desktop_api_sessions.go
- desktop_networks.go
- duration.go
- envelope.go
- error_logging.go
- errors.go
- errors_extended.go
- events.go
- gravatar.go
- group_activation.go
- group_history.go
- handleai.go
- handlematrix.go
- heartbeat_config.go
- heartbeat_context.go
- heartbeat_delivery.go
- heartbeat_events.go
- heartbeat_runner.go
- heartbeat_session.go
- heartbeat_state.go
- heartbeat_visibility.go
- heartbeat_wake.go
- identifiers.go
- identity_sync.go
- image_analysis.go
- image_generation.go
- image_generation_tool.go
- image_understanding.go
- inbound_commands.go
- inbound_debounce.go
- inbound_directive_apply.go
- internal_dispatch.go
- linkpreview.go
- logger_util.go
- login.go
- logout_cleanup.go
- matrix_helpers.go
- matrix_payload.go
- mcp_client.go
- mcp_servers.go
- media_download.go
- media_helpers.go
- media_prompt.go
- media_send.go
- media_understanding_attachments.go
- media_understanding_cli.go
- media_understanding_defaults.go
- media_understanding_format.go
- media_understanding_providers.go
- media_understanding_resolve.go
- media_understanding_runner.go
- media_understanding_scope.go
- media_understanding_types.go
- memory_batches.go
- memory_config.go
- memory_embeddings.go
- memory_flush.go
- memory_index.go
- memory_injection.go
- memory_manager.go
- memory_manager_close.go
- memory_provider.go
- memory_session_events.go
- memory_sessions.go
- memory_sessions_cleanup.go
- memory_sync.go
- memory_vector.go
- mentions.go
- message_id.go
- message_pins.go
- message_results.go
- message_send.go
- message_status.go
- messages.go
- metadata.go
- model_api.go
- model_catalog.go
- model_contacts.go
- model_fallback.go
- models.go
- models_api.go
- opencode_host.go
- opencode_local.go
- owner_allowlist.go
- parse_utils.go
- pending_queue.go
- portal_cleanup.go
- prompt_params.go
- provider.go
- provider_openai.go
- provisioning.go
- pruning.go
- queue_directive.go
- queue_helpers.go
- queue_notice.go
- queue_resolution.go
- queue_settings.go
- queue_types.go
- raw_mode_prompt.go
- reaction_feedback.go
- reaction_handling.go
- reactions.go
- reasoning_fallback.go
- remote_message.go
- reply_mentions.go
- reply_policy.go
- response_directives.go
- response_finalization.go
- response_prefix.go
- response_prefix_template.go
- response_retry.go
- room_activity.go
- room_capabilities.go
- room_runs.go
- session_greeting.go
- session_keys.go
- session_store.go
- session_transcript_openclaw.go
- sessions_tools.go
- source_citations.go
- status_events_context.go
- status_text.go
- stream_events.go
- streaming.go
- streaming_directives.go
- subagent_announce.go
- subagent_conversion.go
- subagent_registry.go
- subagent_spawn.go
- system_ack.go
- system_events.go
- system_prompts.go
- text_files.go
- timezone.go
- toast.go
- token_resolver.go
- tokenizer.go
- tool_aliases.go
- tool_approvals.go
- tool_approvals_policy.go
- tool_approvals_rules.go
- tool_call_id.go
- tool_configured.go
- tool_descriptions.go
- tool_execution.go
- tool_policy.go
- tool_policy_chain.go
- tool_registry.go
- tool_schema_sanitize.go
- tools.go
- tools_analyze_image.go
- tools_apply_patch.go
- tools_beeper_docs.go
- tools_beeper_feedback.go
- tools_cron.go
- tools_matrix_api.go
- tools_message_actions.go
- tools_message_desktop.go
- tools_nexus.go
- tools_nexus_compact.go
- tools_search_fetch.go
- trace.go
- turn_validation.go
- typing_context.go
- typing_controller.go
- typing_mode.go
- typing_queue.go
- typing_state.go
- video_analysis.go