Documentation
¶
Index ¶
- Variables
- func DisableReasoningCollector()
- func EnableReasoningCollector() error
- func ExtractBoolean(llm LLM, f Fragment, opts ...Option) (*structures.Boolean, error)
- func ExtractGoal(llm LLM, f Fragment, opts ...Option) (*structures.Goal, error)
- func ExtractKnowledgeGaps(llm LLM, f Fragment, opts ...Option) ([]string, error)
- func ExtractPlan(llm LLM, f Fragment, goal *structures.Goal, opts ...Option) (*structures.Plan, error)
- func IsGoalAchieved(llm LLM, f Fragment, goal *structures.Goal, opts ...Option) (*structures.Boolean, error)
- func ReEvaluatePlan(llm LLM, f, subtaskFragment Fragment, goal *structures.Goal, ...) (*structures.Plan, error)
- func SetReasoningCollectorPath(path string)
- func WithContext(ctx context.Context) func(o *Options)
- func WithFeedbackCallback(fn func() *Fragment) func(o *Options)
- func WithForceReasoning() func(o *Options)
- func WithForgeProvider(provider func() []ToolDefinitionInterface) func(o *Options)
- func WithGaps(gaps ...string) func(o *Options)
- func WithGuidelines(guidelines ...Guideline) func(o *Options)
- func WithIterations(i int) func(o *Options)
- func WithLoopDetection(steps int) func(o *Options)
- func WithMCPArgs(args map[string]string) func(o *Options)
- func WithMCPs(sessions ...*mcp.ClientSession) func(o *Options)
- func WithMaxAdjustmentAttempts(attempts int) func(o *Options)
- func WithMaxAttempts(i int) func(o *Options)
- func WithMaxRetries(retries int) func(o *Options)
- func WithPrompt(t prompt.PromptType, p prompt.StaticPrompt) func(o *Options)
- func WithReasoningCallback(fn func(string)) func(o *Options)
- func WithSinkState(tool ToolDefinitionInterface) func(o *Options)
- func WithStartWithAction(tool *ToolChoice) func(o *Options)
- func WithStatusCallback(fn func(string)) func(o *Options)
- func WithToolCallBack(fn func(*ToolChoice, *SessionState) ToolCallDecision) func(o *Options)
- func WithToolCallResultCallback(fn func(ToolStatus)) func(o *Options)
- func WithToolFeedback(handler func(toolName string, success bool, latency time.Duration, err error)) func(o *Options)
- func WithTools(tools ...ToolDefinitionInterface) func(o *Options)
- type Fragment
- func ContentReview(llm LLM, originalFragment Fragment, opts ...Option) (Fragment, error)
- func ExecutePlan(llm LLM, conv Fragment, plan *structures.Plan, goal *structures.Goal, ...) (Fragment, error)
- func ExecuteTools(llm LLM, f Fragment, opts ...Option) (Fragment, error)
- func NewEmptyFragment() Fragment
- func NewFragment(messages ...openai.ChatCompletionMessage) Fragment
- func ToolReasoner(llm LLM, f Fragment, opts ...Option) (Fragment, error)
- func (f Fragment) AddLastMessage(f2 Fragment) Fragment
- func (r Fragment) AddMessage(role, content string, mm ...Multimedia) Fragment
- func (r Fragment) AddStartMessage(role, content string, mm ...Multimedia) Fragment
- func (r Fragment) AddToolMessage(content, toolCallID string) Fragment
- func (f Fragment) AllFragmentsStrings() string
- func (r Fragment) ExtractStructure(ctx context.Context, llm LLM, s structures.Structure) error
- func (f Fragment) GetMessages() []openai.ChatCompletionMessage
- func (f Fragment) LastAssistantAndToolMessages() []openai.ChatCompletionMessage
- func (f Fragment) LastMessage() *openai.ChatCompletionMessage
- func (f Fragment) SelectTool(ctx context.Context, llm LLM, availableTools Tools, forceTool string) (Fragment, *ToolChoice, error)
- func (f Fragment) String() string
- type Guideline
- type GuidelineMetadata
- type GuidelineMetadataList
- type Guidelines
- type IntentionResponse
- type LLM
- type Multimedia
- type OpenAIClient
- type Option
- type Options
- type PlanStatus
- type ReasoningCollector
- type ReasoningEntry
- type SessionState
- type Status
- type Tool
- type ToolCallDecision
- type ToolChoice
- type ToolDefinition
- type ToolDefinitionInterface
- type ToolStatus
- type Tools
Constants ¶
This section is empty.
Variables ¶
var ( ErrNoToolSelected error = errors.New("no tool selected by the LLM") ErrLoopDetected error = errors.New("loop detected: same tool called repeatedly with same parameters") ErrToolCallCallbackInterrupted error = errors.New("interrupted via ToolCallCallback") )
var (
ErrGoalNotAchieved error = errors.New("goal not achieved")
)
Functions ¶
func DisableReasoningCollector ¶
func DisableReasoningCollector()
DisableReasoningCollector disables the reasoning collector
func EnableReasoningCollector ¶
func EnableReasoningCollector() error
EnableReasoningCollector enables the reasoning collector
func ExtractBoolean ¶
ExtractBoolean extracts a boolean from a conversation
func ExtractGoal ¶
ExtractGoal extracts a goal from a conversation
func ExtractKnowledgeGaps ¶
func ExtractPlan ¶
func ExtractPlan(llm LLM, f Fragment, goal *structures.Goal, opts ...Option) (*structures.Plan, error)
ExtractPlan extracts a plan from a conversation To override the prompt, define a PromptPlanType, PromptReEvaluatePlanType and PromptSubtaskExtractionType
func IsGoalAchieved ¶
func IsGoalAchieved(llm LLM, f Fragment, goal *structures.Goal, opts ...Option) (*structures.Boolean, error)
IsGoalAchieved checks if a goal has been achieved
func ReEvaluatePlan ¶
func ReEvaluatePlan(llm LLM, f, subtaskFragment Fragment, goal *structures.Goal, toolStatuses []ToolStatus, subtask string, opts ...Option) (*structures.Plan, error)
ExtractPlan extracts a plan from a conversation to override the prompt, define a PromptReEvaluatePlanType and PromptSubtaskExtractionType
func SetReasoningCollectorPath ¶
func SetReasoningCollectorPath(path string)
SetReasoningCollectorPath sets a custom path for the reasoning collector
func WithContext ¶
WithContext sets the execution context for the agent
func WithFeedbackCallback ¶
WithFeedbackCallback sets a callback to get continous feedback during execution of plans
func WithForceReasoning ¶
func WithForceReasoning() func(o *Options)
WithForceReasoning enables forcing the LLM to reason before selecting tools
func WithForgeProvider ¶ added in v0.9.0
func WithForgeProvider(provider func() []ToolDefinitionInterface) func(o *Options)
WithForgeProvider sets a dynamic tool provider for hot-loaded tools.
func WithGuidelines ¶
WithGuidelines adds behavioral guidelines for the agent to follow. The guildelines allows a more curated selection of the tool to use and only relevant are shown to the LLM during tool selection.
func WithIterations ¶
WithIterations allows to set the number of refinement iterations
func WithLoopDetection ¶
WithLoopDetection enables loop detection to prevent repeated tool calls If the same tool with the same parameters is called more than 'steps' times, it will be detected
func WithMCPArgs ¶
WithMCPArgs sets the arguments for the MCP prompts
func WithMCPs ¶
func WithMCPs(sessions ...*mcp.ClientSession) func(o *Options)
WithMCPs adds Model Context Protocol client sessions for external tool integration. When specified, the tools available in the MCPs will be available to the cogito pipelines
func WithMaxAdjustmentAttempts ¶
WithMaxAdjustmentAttempts sets the maximum number of adjustment attempts when using tool call callbacks This prevents infinite loops when the user provides adjustment feedback Default is 5 attempts
func WithMaxAttempts ¶
WithMaxAttempts sets the maximum number of execution attempts
func WithMaxRetries ¶
WithMaxRetries sets the maximum number of retries for LLM calls
func WithPrompt ¶
func WithPrompt(t prompt.PromptType, p prompt.StaticPrompt) func(o *Options)
WithPrompt allows to set a custom prompt for a given PromptType
func WithReasoningCallback ¶
WithReasoningCallback sets a callback function to receive reasoning updates during execution
func WithSinkState ¶
func WithSinkState(tool ToolDefinitionInterface) func(o *Options)
func WithStartWithAction ¶
func WithStartWithAction(tool *ToolChoice) func(o *Options)
WithStartWithAction sets the initial tool choice to start with
func WithStatusCallback ¶
WithStatusCallback sets a callback function to receive status updates during execution
func WithToolCallBack ¶
func WithToolCallBack(fn func(*ToolChoice, *SessionState) ToolCallDecision) func(o *Options)
WithToolCallBack allows to set a callback to intercept and modify tool calls before execution The callback receives the proposed tool choice and session state, and returns a ToolCallDecision that can approve, reject, provide adjustment feedback, or directly modify the tool choice
func WithToolCallResultCallback ¶
func WithToolCallResultCallback(fn func(ToolStatus)) func(o *Options)
WithToolCallResultCallback runs the callback on every tool result
func WithToolFeedback ¶ added in v0.9.0
func WithToolFeedback(handler func(toolName string, success bool, latency time.Duration, err error)) func(o *Options)
WithToolFeedback sets a callback for tool execution results.
func WithTools ¶
func WithTools(tools ...ToolDefinitionInterface) func(o *Options)
WithTools allows to set the tools available to the Agent. Pass *ToolDefinition[T] instances - they will automatically generate openai.Tool via their Tool() method. Example: WithTools(&ToolDefinition[SearchArgs]{...}, &ToolDefinition[WeatherArgs]{...})
Types ¶
type Fragment ¶
type Fragment struct {
Messages []openai.ChatCompletionMessage
ParentFragment *Fragment
Status *Status
Multimedia []Multimedia
}
func ContentReview ¶
ContentReview refines an LLM response until for a fixed number of iterations or if the LLM doesn't find anymore gaps
func ExecutePlan ¶
func ExecutePlan(llm LLM, conv Fragment, plan *structures.Plan, goal *structures.Goal, opts ...Option) (Fragment, error)
ExecutePlan Executes an already-defined plan with a set of options. To override its prompt, configure PromptPlanExecutionType, PromptPlanType, PromptReEvaluatePlanType and PromptSubtaskExtractionType
func ExecuteTools ¶
ExecuteTools runs a fragment through an LLM, and executes Tools. It returns a new fragment with the tool result at the end The result is guaranteed that can be called afterwards with llm.Ask() to explain the result to the user.
func NewEmptyFragment ¶
func NewEmptyFragment() Fragment
func NewFragment ¶
func NewFragment(messages ...openai.ChatCompletionMessage) Fragment
func ToolReasoner ¶
ToolReasoner forces the LLM to reason about available tools in a fragment
func (Fragment) AddLastMessage ¶
func (Fragment) AddMessage ¶
func (r Fragment) AddMessage(role, content string, mm ...Multimedia) Fragment
func (Fragment) AddStartMessage ¶
func (r Fragment) AddStartMessage(role, content string, mm ...Multimedia) Fragment
func (Fragment) AddToolMessage ¶
AddToolMessage adds a tool result message with the specified tool_call_id
func (Fragment) AllFragmentsStrings ¶
AllFragmentsStrings walks through all the fragment parents to retrieve all the conversations and represent that as a string This is particularly useful if chaining different fragments and want to still feed the conversation as a context to the LLM.
func (Fragment) ExtractStructure ¶
ExtractStructure extracts a structure from the result using the provided JSON schema definition and unmarshals it into the provided destination
func (Fragment) GetMessages ¶
func (f Fragment) GetMessages() []openai.ChatCompletionMessage
Messages returns the chat completion messages from this fragment, automatically prepending a force-text-reply system message if tool calls are detected. This ensures LLMs provide natural language responses instead of JSON tool syntax when Ask() is called after ExecuteTools().
func (Fragment) LastAssistantAndToolMessages ¶
func (f Fragment) LastAssistantAndToolMessages() []openai.ChatCompletionMessage
func (Fragment) LastMessage ¶
func (f Fragment) LastMessage() *openai.ChatCompletionMessage
type GuidelineMetadata ¶
type GuidelineMetadataList ¶
type GuidelineMetadataList []GuidelineMetadata
type Guidelines ¶
type Guidelines []Guideline
func GetRelevantGuidelines ¶
func GetRelevantGuidelines(llm LLM, guidelines Guidelines, fragment Fragment, opts ...Option) (Guidelines, error)
func (Guidelines) ToMetadata ¶
func (g Guidelines) ToMetadata() GuidelineMetadataList
type IntentionResponse ¶
IntentionResponse is used to extract the tool choice from the intention tool
type OpenAIClient ¶
type OpenAIClient struct {
// contains filtered or unexported fields
}
func NewOpenAILLM ¶
func NewOpenAILLM(model, apiKey, baseURL string) *OpenAIClient
func (*OpenAIClient) Ask ¶
Ask prompts to the LLM with the provided messages and returns a Fragment containing the response. The Fragment.GetMessages() method automatically handles force-text-reply when tool calls are present in the conversation history.
func (*OpenAIClient) CreateChatCompletion ¶
func (llm *OpenAIClient) CreateChatCompletion(ctx context.Context, request openai.ChatCompletionRequest) (openai.ChatCompletionResponse, error)
type Option ¶
type Option func(*Options)
var ( // EnableDeepContext enables full context to the LLM when chaining conversations // It might yield to better results to the cost of bigger context use. EnableDeepContext Option = func(o *Options) { o.deepContext = true } // EnableToolReasoner enables the reasoning about the need to call other tools // before each tool call, preventing calling more tools than necessary. EnableToolReasoner Option = func(o *Options) { o.toolReasoner = true } // DisableToolReEvaluator disables the re-evaluation of the need to call other tools // after each tool call. It might yield to better results to the cost of more // LLM calls. DisableToolReEvaluator Option = func(o *Options) { o.toolReEvaluator = false } // DisableSinkState disables the use of a sink state // when the LLM decides that no tool is needed DisableSinkState Option = func(o *Options) { o.sinkState = false } // EnableInfiniteExecution enables infinite, long-term execution on Plans EnableInfiniteExecution Option = func(o *Options) { o.infiniteExecution = true } // EnableStrictGuidelines enforces cogito to pick tools only from the guidelines EnableStrictGuidelines Option = func(o *Options) { o.strictGuidelines = true } // EnableAutoPlan enables cogito to automatically use planning if needed EnableAutoPlan Option = func(o *Options) { o.autoPlan = true } // EnableAutoPlanReEvaluator enables cogito to automatically re-evaluate the need to use planning EnableAutoPlanReEvaluator Option = func(o *Options) { o.planReEvaluator = true } // EnableMCPPrompts enables the use of MCP prompts EnableMCPPrompts Option = func(o *Options) { o.mcpPrompts = true } )
type Options ¶
type Options struct {
// contains filtered or unexported fields
}
Options contains all configuration options for the Cogito agent It allows customization of behavior, tools, prompts, and execution parameters
type PlanStatus ¶
type PlanStatus struct {
Plan structures.Plan
Tools []ToolStatus
}
type ReasoningCollector ¶
type ReasoningCollector struct {
// contains filtered or unexported fields
}
ReasoningCollector collects LLM reasoning responses for pattern analysis Enable by setting COGITO_COLLECT_REASONING=true or calling EnableReasoningCollector()
type ReasoningEntry ¶
type ReasoningEntry struct {
Timestamp time.Time `json:"timestamp"`
Reasoning string `json:"reasoning"`
ExtractedTool string `json:"extracted_tool"`
AvailableTools []string `json:"available_tools"`
MatchedStrategy string `json:"matched_strategy"` // "first_word", "pattern", "keyword", "gerund", "none"
MatchedPattern string `json:"matched_pattern"` // The specific pattern that matched
Success bool `json:"success"`
}
ReasoningEntry represents a collected reasoning sample
type SessionState ¶
type SessionState struct {
ToolChoice *ToolChoice `json:"tool_choice"`
Fragment Fragment `json:"fragment"`
}
type Status ¶
type Status struct {
Iterations int
ToolsCalled Tools
ToolResults []ToolStatus
Plans []PlanStatus
PastActions []ToolStatus // Track past actions for loop detections
ReasoningLog []string // Track reasoning for each iteration
PendingToolChoices []*ToolChoice // Pending parallel tool calls to process
}
type ToolCallDecision ¶
type ToolCallDecision struct {
// Approved: true to proceed with the tool call, false to interrupt execution
Approved bool
// Adjustment: feedback string for the LLM to interpret and adjust the tool call
// Empty string means no adjustment needed. If provided, the LLM will re-evaluate
// the tool call based on this feedback.
Adjustment string
// Modified: directly modified tool choice that takes precedence over Adjustment
// If set, this tool choice is used directly without re-querying the LLM
// This allows programmatic modification of tool arguments
Modified *ToolChoice
// Skip: skip this tool call but continue execution (alternative to Approved: false)
// When true, the tool call is skipped and execution continues
Skip bool
}
ToolCallDecision represents the decision made by a tool call callback It allows the callback to approve, reject, provide adjustment feedback, or directly modify the tool choice
type ToolChoice ¶
type ToolChoice struct {
Name string `json:"name"`
Arguments map[string]any `json:"arguments"`
ID string `json:"id"`
Reasoning string `json:"reasoning"`
}
func ToolReEvaluator ¶
func ToolReEvaluator(llm LLM, f Fragment, previousTool ToolStatus, tools Tools, guidelines Guidelines, opts ...Option) (*ToolChoice, string, error)
ToolReEvaluator evaluates the conversation after a tool execution and determines next steps Calls pickAction/toolSelection with reEvaluationTemplate and the conversation that already has tool results
type ToolDefinition ¶
type ToolDefinition[T any] struct { ToolRunner Tool[T] InputArguments any Name, Description string }
func (*ToolDefinition[T]) Execute ¶
Execute implements ToolDef.Execute by marshaling the arguments map to type T and calling ToolRunner.Run
func (ToolDefinition[T]) Tool ¶
func (t ToolDefinition[T]) Tool() openai.Tool
type ToolDefinitionInterface ¶
type ToolDefinitionInterface interface {
Tool() openai.Tool
// Execute runs the tool with the given arguments (as JSON map) and returns the result
// The context allows passing request-scoped data (current pair, reasoning, trace IDs, etc.)
Execute(ctx context.Context, args map[string]any) (string, error)
}
func NewToolDefinition ¶
func NewToolDefinition[T any](toolRunner Tool[T], inputArguments any, name, description string) ToolDefinitionInterface
type ToolStatus ¶
type ToolStatus struct {
Executed bool
ToolArguments ToolChoice
Result string
Name string
}
type Tools ¶
type Tools []ToolDefinitionInterface
func (Tools) Definitions ¶
func (t Tools) Definitions() []*openai.FunctionDefinition
func (Tools) Find ¶
func (t Tools) Find(name string) ToolDefinitionInterface