Documentation
¶
Overview ¶
Package agentkit provides a flexible framework for building LLM-powered agents with tool calling.
Package agentkit provides types and client for OpenAI's Responses API ¶
Package agentkit provides tracing capabilities for LLM applications
Index ¶
- Variables
- func FilterEvents(input <-chan Event, types ...EventType) <-chan Event
- func GetAgentName(ctx context.Context) (string, bool)
- func GetConversationID(ctx context.Context) (string, bool)
- func GetDeps[T any](ctx context.Context) (T, error)
- func GetIteration(ctx context.Context) (int, bool)
- func GetSpanID(ctx context.Context) (string, bool)
- func GetTraceID(ctx context.Context) (string, bool)
- func MustGetDeps[T any](ctx context.Context) Tdeprecated
- func RefreshModelCosts()
- func RegisterModelCost(model string, config ModelCostConfig)
- func SchemaFromStruct(sample any) (map[string]any, error)
- func WithAgentName(ctx context.Context, name string) context.Context
- func WithConversation(ctx context.Context, conversationID string) context.Context
- func WithDeps(ctx context.Context, deps any) context.Context
- func WithEventPublisher(ctx context.Context, publisher EventPublisher) context.Context
- func WithIteration(ctx context.Context, iteration int) context.Context
- func WithSpanID(ctx context.Context, spanID string) context.Context
- func WithTraceID(ctx context.Context, traceID string) context.Context
- func WithTracer(ctx context.Context, tracer Tracer) context.Context
- type APIError
- type Agent
- func (a *Agent) AddContext(ctx context.Context, conversationID string, content string) error
- func (a *Agent) AddTool(tool Tool)
- func (a *Agent) AppendToConversation(ctx context.Context, conversationID string, turn ConversationTurn) error
- func (a *Agent) AsHandoffTool(name, description string, opts ...HandoffOption) Tool
- func (a *Agent) AsTool(name, description string) Tool
- func (a *Agent) ClearConversation(ctx context.Context, conversationID string) error
- func (a *Agent) DeleteConversation(ctx context.Context, conversationID string) error
- func (a *Agent) ForkConversation(ctx context.Context, originalID, newID, userMessage string) error
- func (a *Agent) GetConversation(ctx context.Context, conversationID string) (Conversation, error)
- func (a *Agent) Handoff(ctx context.Context, to *Agent, task string, opts ...HandoffOption) (*HandoffResult, error)
- func (a *Agent) Run(ctx context.Context, userMessage string) <-chan Event
- func (a *Agent) SaveConversation(ctx context.Context, conv Conversation) error
- func (a *Agent) Use(m Middleware)
- type ApprovalConfig
- type ApprovalHandler
- type ApprovalRequest
- type CollaborationContribution
- type CollaborationOption
- type CollaborationResult
- type CollaborationRound
- type CollaborationSession
- func (cs *CollaborationSession) AsTool(name, description string, opts ...CollaborationOption) Tool
- func (cs *CollaborationSession) Configure(opts ...CollaborationOption) *CollaborationSession
- func (cs *CollaborationSession) Discuss(ctx context.Context, topic string, opts ...CollaborationOption) (*CollaborationResult, error)
- type ConcurrencyMode
- type Config
- type Conversation
- type ConversationStore
- type ConversationTurn
- type CostInfo
- type Event
- func ActionDetected(description, toolID string) Event
- func ActionResult(description string, result any) Event
- func AgentComplete(agentName, output string, totalTokens, iterations int, durationMs int64) Event
- func AgentCompleteWithUsage(agentName, output string, usage providers.TokenUsage, iterations int, ...) Event
- func AgentStart(agentName string) Event
- func ApprovalDenied(toolName, callID, reason string) Event
- func ApprovalGranted(toolName, callID string) Event
- func ApprovalNeeded(request ApprovalRequest) Event
- func ApprovalRejected(request ApprovalRequest) Event
- func ApprovalRequired(request ApprovalRequest) Event
- func CollaborationAgentContribution(agentName, contribution string) Event
- func Decision(action string, confidence float64, reasoning string) Event
- func Error(err error) Event
- func FinalOutput(summary, response string) Event
- func HandoffComplete(fromAgent, toAgent, result string) Event
- func HandoffStart(fromAgent, toAgent, task, reason string) Event
- func NewEvent(eventType EventType, data map[string]any) Event
- func Progress(iteration, maxIterations int, description string) Event
- func ReasoningChunk(chunk string) Event
- func ResponseChunk(chunk string) Event
- func Thinking(content string) Event
- func ThinkingChunk(chunk string) Event
- func ToolError(toolName string, err error) Event
- func ToolResult(toolName string, result any) Event
- type EventPublisher
- type EventRecorder
- type EventType
- type GenerationOptions
- type Handoff
- type HandoffConfiguration
- type HandoffContext
- type HandoffOption
- type HandoffResult
- type HandoffTraceItem
- type LLMProvider
- type LogLevel
- type LoggingConfig
- type Middleware
- type MockLLM
- func (m *MockLLM) CreateResponse(ctx context.Context, req ResponseRequest) (*ResponseObject, error)
- func (m *MockLLM) CreateResponseStream(ctx context.Context, req ResponseRequest) (ResponseStreamClient, error)
- func (m *MockLLM) WithFinalResponse(text string) *MockLLM
- func (m *MockLLM) WithResponse(text string, toolCalls []ToolCall) *MockLLM
- func (m *MockLLM) WithStream(chunks []providers.StreamChunk) *MockLLM
- type ModelCostConfig
- type NoOpTracer
- func (n *NoOpTracer) Flush(ctx context.Context) error
- func (n *NoOpTracer) LogEvent(ctx context.Context, name string, attributes map[string]any) error
- func (n *NoOpTracer) LogGeneration(ctx context.Context, opts GenerationOptions) error
- func (n *NoOpTracer) SetSpanAttributes(ctx context.Context, attributes map[string]any) error
- func (n *NoOpTracer) SetSpanOutput(ctx context.Context, output any) error
- func (n *NoOpTracer) SetTraceAttributes(ctx context.Context, attributes map[string]any) error
- func (n *NoOpTracer) StartSpan(ctx context.Context, name string, opts ...SpanOption) (context.Context, func())
- func (n *NoOpTracer) StartTrace(ctx context.Context, name string, opts ...TraceOption) (context.Context, func())
- type ParallelConfig
- type ParameterSchema
- func (ps *ParameterSchema) Optional() *ParameterSchema
- func (ps *ParameterSchema) Required() *ParameterSchema
- func (ps *ParameterSchema) ToMap() map[string]any
- func (ps *ParameterSchema) ToMapStrict() map[string]any
- func (ps *ParameterSchema) WithDescription(desc string) *ParameterSchema
- func (ps *ParameterSchema) WithEnum(values ...string) *ParameterSchema
- func (ps *ParameterSchema) WithProperty(name string, schema *ParameterSchema) *ParameterSchema
- type PendingFormatter
- type ProviderAdapter
- type ReasoningEffort
- type ResponseAnnotation
- type ResponseContentItem
- type ResponseDelta
- type ResponseError
- type ResponseImageURL
- type ResponseInput
- type ResponseObject
- type ResponseOutputItem
- type ResponseReasoning
- type ResponseRequest
- type ResponseStream
- type ResponseStreamChunk
- type ResponseStreamClient
- type ResponseTextConfig
- type ResponseTextFormat
- type ResponseTokensDetails
- type ResponseTool
- type ResponseToolCall
- type ResponseToolChoice
- type ResponseToolFunction
- type ResponseUsage
- type ResponsesClient
- type ResultFormatter
- type RetryConfig
- type SpanConfig
- type SpanOption
- type SpanType
- type SystemPromptFunc
- type TimeoutConfig
- type Tool
- func (t *Tool) Execute(ctx context.Context, argsJSON string) (any, error)
- func (t *Tool) FormatPending(args map[string]any) string
- func (t *Tool) FormatResult(result any) string
- func (t *Tool) Name() string
- func (t *Tool) ToOpenAI() interface{}
- func (t *Tool) ToToolDefinition() providers.ToolDefinition
- type ToolBuilder
- func (tb *ToolBuilder) Build() Tool
- func (tb *ToolBuilder) WithConcurrency(mode ConcurrencyMode) *ToolBuilder
- func (tb *ToolBuilder) WithDescription(desc string) *ToolBuilder
- func (tb *ToolBuilder) WithHandler(handler ToolHandler) *ToolBuilder
- func (tb *ToolBuilder) WithJSONSchema(schema map[string]any) *ToolBuilder
- func (tb *ToolBuilder) WithParameter(name string, schema *ParameterSchema) *ToolBuilder
- func (tb *ToolBuilder) WithPendingFormatter(formatter PendingFormatter) *ToolBuilder
- func (tb *ToolBuilder) WithRawParameters(params map[string]any) *ToolBuilder
- func (tb *ToolBuilder) WithResultFormatter(formatter ResultFormatter) *ToolBuilder
- func (tb *ToolBuilder) WithStrictMode(strict bool) *ToolBuilder
- type ToolCall
- type ToolHandler
- type TraceConfig
- type TraceOption
- func WithEnvironment(env string) TraceOption
- func WithMetadata(metadata map[string]any) TraceOption
- func WithRelease(release string) TraceOption
- func WithSessionID(sessionID string) TraceOption
- func WithTags(tags ...string) TraceOption
- func WithTraceInput(input any) TraceOption
- func WithTraceStartTime(startTime time.Time) TraceOption
- func WithUserID(userID string) TraceOption
- func WithVersion(version string) TraceOption
- type Tracer
- type UsageInfo
Constants ¶
This section is empty.
Variables ¶
var ( NewMemoryConversationStore = conversation.NewMemoryConversationStore DefaultRetryConfig = retry.DefaultRetryConfig DefaultTimeoutConfig = timeout.DefaultTimeoutConfig DefaultLoggingConfig = logging.DefaultLoggingConfig DefaultParallelConfig = parallel.DefaultParallelConfig ErrConversationNotFound = conversation.ErrConversationNotFound )
Function re-exports for convenience
var ( ErrMissingAPIKey = errors.New("agentkit: APIKey is required") ErrInvalidIterations = errors.New("agentkit: MaxIterations must be between 1 and 100") ErrInvalidTemperature = errors.New("agentkit: Temperature must be between 0.0 and 2.0") ErrInvalidReasoningEffort = errors.New("agentkit: ReasoningEffort must be valid") )
Common validation errors.
var ( ErrCollaborationNoFacilitator = errors.New("agentkit: collaboration requires a facilitator agent") ErrCollaborationNoPeers = errors.New("agentkit: collaboration requires at least one peer agent") ErrCollaborationTopicEmpty = errors.New("agentkit: collaboration topic cannot be empty") ErrCollaborationFailed = errors.New("agentkit: collaboration failed") )
var ( ErrHandoffAgentNil = errors.New("agentkit: handoff target agent cannot be nil") ErrHandoffTaskEmpty = errors.New("agentkit: handoff task cannot be empty") ErrHandoffExecutionFail = errors.New("agentkit: handoff execution failed") )
var DefaultModelCosts = map[string]ModelCostConfig{
"gpt-5.2": {
InputCostPer1MTokens: 2.50,
OutputCostPer1MTokens: 10.00,
},
"gpt-4o": {
InputCostPer1MTokens: 5.00,
OutputCostPer1MTokens: 15.00,
},
"gpt-4o-2024-11-20": {
InputCostPer1MTokens: 2.50,
OutputCostPer1MTokens: 10.00,
},
"gpt-4o-mini": {
InputCostPer1MTokens: 0.150,
OutputCostPer1MTokens: 0.600,
},
"gpt-4o-mini-2024-07-18": {
InputCostPer1MTokens: 0.150,
OutputCostPer1MTokens: 0.600,
},
"gpt-4-turbo": {
InputCostPer1MTokens: 10.00,
OutputCostPer1MTokens: 30.00,
},
"gpt-4-turbo-2024-04-09": {
InputCostPer1MTokens: 10.00,
OutputCostPer1MTokens: 30.00,
},
"gpt-5.1-codex-max": {
InputCostPer1MTokens: 2.50,
OutputCostPer1MTokens: 10.00,
},
}
DefaultModelCosts provides FALLBACK pricing for common OpenAI models. These are used when: 1. Dynamic pricing is disabled (ModelPricingAPIURL = "") 2. API fetch fails or times out 3. Model not found in API response
By default, AgentKit fetches real-time pricing from models.dev API automatically. The API fetch is non-blocking and has a conservative timeout (5 seconds).
Priority order for pricing: 1. Custom pricing (via RegisterModelCost) - highest priority 2. Dynamic pricing (from API) - fetched automatically 3. Fallback pricing (below) - used if API unavailable
Fallback prices last updated: January 8, 2026 Source: https://openai.com/api/pricing/
var DisableCostCalculation = false
DisableCostCalculation can be set to true to skip all cost calculations. This is useful if you don't need cost tracking or want to avoid outdated pricing estimates.
var ErrDepsNotFound = errors.New("agentkit: dependencies not found in context")
ErrDepsNotFound is returned when dependencies are not found in context
var ErrInvalidStructSchema = errors.New("agentkit: struct schema requires a struct type")
ErrInvalidStructSchema is returned when a schema cannot be built from the provided type.
var ModelPricingAPIURL = "https://models.dev/api.json"
ModelPricingAPIURL is the endpoint for fetching real-time model pricing Set to empty string to disable dynamic price fetching
var ModelPricingTimeout = 5 * time.Second
ModelPricingTimeout is the timeout for fetching model prices from the API
Functions ¶
func FilterEvents ¶
FilterEvents forwards only events with matching types.
func GetAgentName ¶
GetAgentName retrieves the agent name from the context.
func GetConversationID ¶
GetConversationID retrieves the conversation ID from the context
func GetDeps ¶
GetDeps retrieves dependencies from the context, returning an error if not found. This is the preferred method for accessing dependencies as it allows for proper error handling.
func GetIteration ¶
GetIteration retrieves the iteration index from the context.
func GetTraceID ¶
GetTraceID retrieves the trace ID from the context.
func MustGetDeps
deprecated
MustGetDeps retrieves dependencies from the context or panics.
Deprecated: Use GetDeps instead for better error handling. This method is kept for backward compatibility but should only be used in controlled environments where dependencies are guaranteed to exist.
func RefreshModelCosts ¶
func RefreshModelCosts()
RefreshModelCosts triggers a fresh fetch of model costs from the API This is useful if you want to update prices without restarting the application
func RegisterModelCost ¶
func RegisterModelCost(model string, config ModelCostConfig)
RegisterModelCost registers a custom model cost configuration This takes precedence over both API-fetched and default pricing
func SchemaFromStruct ¶
SchemaFromStruct builds a JSON schema object from a struct value or pointer.
func WithAgentName ¶
WithAgentName adds the agent name to the context.
func WithConversation ¶
WithConversation adds a conversation ID to the context
func WithEventPublisher ¶
func WithEventPublisher(ctx context.Context, publisher EventPublisher) context.Context
WithEventPublisher adds an event publisher to the context
func WithIteration ¶
WithIteration adds the iteration index to the context.
func WithSpanID ¶
WithSpanID adds a span ID to the context for request correlation.
func WithTraceID ¶
WithTraceID adds a trace ID to the context for request correlation.
Types ¶
type APIError ¶
type APIError struct {
StatusCode int
Code interface{} `json:"code"`
Message string `json:"message"`
Type string `json:"type"`
}
APIError represents an error returned by the Responses API
type Agent ¶
type Agent struct {
// contains filtered or unexported fields
}
Agent orchestrates LLM interactions with tool calling and streaming.
func (*Agent) AddContext ¶
func (*Agent) AppendToConversation ¶
func (*Agent) AsHandoffTool ¶
func (a *Agent) AsHandoffTool(name, description string, opts ...HandoffOption) Tool
AsHandoffTool converts an agent into a Tool that can be registered with another agent. This enables handoffs to be triggered by the LLM through tool calling.
Example:
researchAgent := agentkit.NewAgent(researchConfig)
coordinatorAgent := agentkit.NewAgent(coordinatorConfig)
// Register as a tool
coordinatorAgent.RegisterTool(researchAgent.AsHandoffTool(
"research_agent",
"Delegate research tasks to a specialized research agent",
))
func (*Agent) ClearConversation ¶
func (*Agent) DeleteConversation ¶
func (*Agent) ForkConversation ¶
func (*Agent) GetConversation ¶
Conversation management methods
func (*Agent) Handoff ¶
func (a *Agent) Handoff(ctx context.Context, to *Agent, task string, opts ...HandoffOption) (*HandoffResult, error)
Handoff delegates a task to another agent. The receiving agent works independently with an isolated context, then returns the result. The delegating agent can optionally see the full execution trace (thinking, tool calls, etc.) to understand how the work was done. Real-time event streaming to parent ALWAYS happens.
Example:
researchAgent := agentkit.NewAgent(researchConfig)
result, err := coordinator.Handoff(ctx, researchAgent,
"Research the top 3 Go web frameworks in 2026",
WithFullContext(true),
)
func (*Agent) SaveConversation ¶
func (a *Agent) SaveConversation(ctx context.Context, conv Conversation) error
func (*Agent) Use ¶
func (a *Agent) Use(m Middleware)
Use registers middleware for agent execution hooks.
type ApprovalConfig ¶
type ApprovalConfig struct {
// Tools is a list of tool names that require approval
// If empty, no tools require approval
Tools []string
// Handler is called for approval requests
// If nil, all tools in Tools list will be automatically denied
Handler ApprovalHandler
// AllTools, if true, requires approval for ALL tool calls
AllTools bool
}
ApprovalConfig configures which tools require approval
type ApprovalHandler ¶
type ApprovalHandler func(ctx context.Context, request ApprovalRequest) (bool, error)
ApprovalHandler is called when a tool requires approval before execution Returns true to approve, false to deny
type ApprovalRequest ¶
type ApprovalRequest struct {
ToolName string `json:"tool_name"`
Arguments map[string]any `json:"arguments"`
Description string `json:"description"` // Human-friendly description
ConversationID string `json:"conversation_id"` // If available
CallID string `json:"call_id"` // Unique call identifier
}
ApprovalRequest contains information about a tool call that requires approval
type CollaborationContribution ¶
type CollaborationContribution struct {
Agent string // Agent identifier
Content string // What the agent said
Time time.Time // When they contributed
}
CollaborationContribution represents one agent's input in a round.
type CollaborationOption ¶
type CollaborationOption func(*collaborationOptions)
CollaborationOption configures a collaboration session.
func WithCaptureHistory ¶
func WithCaptureHistory(capture bool) CollaborationOption
WithCaptureHistory enables capturing the full conversation history.
func WithMaxRounds ¶
func WithMaxRounds(max int) CollaborationOption
WithMaxRounds sets the maximum number of discussion rounds. Each round gives every agent a chance to contribute.
func WithRoundTimeout ¶
func WithRoundTimeout(timeout time.Duration) CollaborationOption
WithRoundTimeout sets a timeout for each discussion round.
type CollaborationResult ¶
type CollaborationResult struct {
FinalResponse string // The synthesized final answer
Rounds []CollaborationRound // History of the discussion
Summary string // Summary of the collaboration
Participants []string // Names/IDs of participating agents
Metadata map[string]any // Additional metadata
}
CollaborationResult contains the outcome of a collaborative discussion.
type CollaborationRound ¶
type CollaborationRound struct {
Number int // Round number (1-indexed)
Contributions []CollaborationContribution // Each agent's contribution
Synthesis string // How the facilitator synthesized this round
}
CollaborationRound represents one round of discussion.
type CollaborationSession ¶
type CollaborationSession struct {
// contains filtered or unexported fields
}
CollaborationSession represents a real-time discussion between multiple agents. Unlike handoffs, collaborations are not hierarchical - all agents are peers who contribute to a shared conversation. Think of this as a breakout room where everyone hashes out ideas together.
func NewCollaborationSession ¶
func NewCollaborationSession(facilitator *Agent, peers ...*Agent) *CollaborationSession
NewCollaborationSession creates a new collaboration session. The facilitator runs the conversation, and peers contribute as equals.
Example:
session := agentkit.NewCollaborationSession(
facilitatorAgent,
engineerAgent, designerAgent, productAgent,
)
result, err := session.Discuss(ctx, "How should we design the authentication API?")
func (*CollaborationSession) AsTool ¶
func (cs *CollaborationSession) AsTool(name, description string, opts ...CollaborationOption) Tool
AsTool converts a collaboration session into a Tool that can be registered with another agent. This enables collaborations to be triggered by the LLM through tool calling, with the topic provided dynamically at runtime.
Example:
session := agentkit.NewCollaborationSession(facilitator, engineer, designer, product)
coordinatorAgent.AddTool(session.AsTool(
"design_collaboration",
"Form a collaborative discussion with engineering, design, and product teams",
))
The LLM can then decide when to collaborate and provide the topic:
coordinatorAgent.Run(ctx, "We need to decide on the authentication approach...") // LLM calls: design_collaboration(topic: "How should we design the authentication API?")
func (*CollaborationSession) Configure ¶
func (cs *CollaborationSession) Configure(opts ...CollaborationOption) *CollaborationSession
Configure applies options to the collaboration session.
func (*CollaborationSession) Discuss ¶
func (cs *CollaborationSession) Discuss(ctx context.Context, topic string, opts ...CollaborationOption) (*CollaborationResult, error)
Discuss starts a collaborative discussion on a topic. All agents participate as peers, sharing ideas and building on each other's contributions.
Example:
result, err := session.Discuss(ctx,
"What's the best approach for handling user sessions?",
WithMaxRounds(5),
)
type ConcurrencyMode ¶
type ConcurrencyMode string
ConcurrencyMode controls whether a tool can run in parallel.
const ( ConcurrencyParallel ConcurrencyMode = "parallel" ConcurrencySerial ConcurrencyMode = "serial" )
type Config ¶
type Config struct {
APIKey string
Model string
SystemPrompt SystemPromptFunc
MaxIterations int
Temperature float32
ReasoningEffort providers.ReasoningEffort
ReasoningSummary string
TextVerbosity string
TextFormat string
Store bool
StreamResponses bool
ToolChoice string
Retry *RetryConfig
Timeout *TimeoutConfig
ConversationStore ConversationStore
Approval *ApprovalConfig
Provider providers.Provider
LLMProvider LLMProvider // DEPRECATED: Use Provider instead
Logging *LoggingConfig
EventBuffer int
ParallelToolExecution *ParallelConfig
Tracer Tracer
AgentName string
}
Config holds agent configuration.
type Conversation ¶
type Conversation = conversation.Conversation
Type aliases for internal package types
type ConversationStore ¶
type ConversationStore = conversation.ConversationStore
Type aliases for internal package types
type ConversationTurn ¶
type ConversationTurn = conversation.ConversationTurn
Type aliases for internal package types
type CostInfo ¶
type CostInfo struct {
PromptCost float64 // Estimated cost for prompt tokens in USD
CompletionCost float64 // Estimated cost for completion tokens in USD
TotalCost float64 // Estimated total cost in USD
}
CostInfo tracks cost breakdown NOTE: OpenAI's API does NOT provide cost information. Cost is estimated based on published pricing and may be inaccurate. Set DisableCostCalculation = true to disable cost estimation entirely.
func CalculateCost ¶
CalculateCost calculates the ESTIMATED cost of an LLM call based on token usage. Returns nil if: - Cost calculation is disabled (DisableCostCalculation = true) - No tokens were used - Model pricing is unknown
NOTE: OpenAI's API provides usage (tokens) but NOT cost. This function estimates cost based on: 1. Real-time pricing from models.dev API (if available and fetched) 2. Fallback to hardcoded pricing (if API unavailable)
The API fetch happens asynchronously and never blocks execution.
type Event ¶
type Event struct {
Type EventType `json:"type"`
Data map[string]any `json:"data"`
Timestamp time.Time `json:"timestamp"`
TraceID string `json:"trace_id,omitempty"`
SpanID string `json:"span_id,omitempty"`
}
Event represents a streaming event emitted during agent execution
func ActionDetected ¶
ActionDetected creates an action detected event
func ActionResult ¶
ActionResult creates an action result event
func AgentComplete ¶
AgentComplete creates an agent complete event
func AgentCompleteWithUsage ¶
func AgentCompleteWithUsage(agentName, output string, usage providers.TokenUsage, iterations int, durationMs int64) Event
AgentCompleteWithUsage creates an agent complete event including detailed token usage.
func ApprovalDenied ¶
ApprovalDenied creates an approval denied event
func ApprovalGranted ¶
ApprovalGranted creates an approval granted event
func ApprovalNeeded ¶
func ApprovalNeeded(request ApprovalRequest) Event
ApprovalNeeded is an alias for ApprovalRequired
func ApprovalRejected ¶
func ApprovalRejected(request ApprovalRequest) Event
ApprovalRejected creates an approval rejected event
func ApprovalRequired ¶
func ApprovalRequired(request ApprovalRequest) Event
ApprovalRequired creates an approval required event
func CollaborationAgentContribution ¶
CollaborationAgentContribution creates a collaboration agent contribution event
func FinalOutput ¶
FinalOutput creates a final output event
func HandoffComplete ¶
HandoffComplete creates a handoff complete event
func HandoffStart ¶
HandoffStart creates a handoff start event
func ReasoningChunk ¶
ReasoningChunk creates a reasoning summary chunk event
func ResponseChunk ¶
ResponseChunk creates a response text chunk event
func ThinkingChunk ¶
ThinkingChunk creates a thinking chunk event
func ToolResult ¶
ToolResult creates a tool result event (alias for ActionResult)
type EventPublisher ¶
type EventPublisher func(Event)
EventPublisher is a function that publishes events
func GetEventPublisher ¶
func GetEventPublisher(ctx context.Context) (EventPublisher, bool)
GetEventPublisher retrieves the event publisher from the context
type EventRecorder ¶
type EventRecorder struct {
// contains filtered or unexported fields
}
EventRecorder captures events for replay or inspection.
func NewEventRecorder ¶
func NewEventRecorder() *EventRecorder
NewEventRecorder creates a new recorder.
func (*EventRecorder) Events ¶
func (r *EventRecorder) Events() []Event
Events returns a copy of recorded events.
func (*EventRecorder) Record ¶
func (r *EventRecorder) Record(input <-chan Event) <-chan Event
Record captures events while forwarding them.
type EventType ¶
type EventType string
EventType represents the type of streaming event
const ( // Content streaming events EventTypeThinkingChunk EventType = "thinking_chunk" EventTypeReasoningChunk EventType = "reasoning_chunk" EventTypeResponseChunk EventType = "response_chunk" EventTypeFinalOutput EventType = "final_output" // Agent lifecycle events EventTypeAgentStart EventType = "agent.start" EventTypeAgentComplete EventType = "agent.complete" // Tool execution events EventTypeActionDetected EventType = "action_detected" EventTypeActionResult EventType = "action_result" // Multi-agent coordination events EventTypeHandoffStart EventType = "handoff.start" EventTypeHandoffComplete EventType = "handoff.complete" EventTypeCollaborationAgentMessage EventType = "collaboration.agent.contribution" // Human-in-the-loop events EventTypeApprovalRequired EventType = "approval_required" EventTypeApprovalGranted EventType = "approval_granted" EventTypeApprovalDenied EventType = "approval_denied" // Progress and decision events EventTypeProgress EventType = "progress" EventTypeDecision EventType = "decision" // Error events EventTypeError EventType = "error" )
type GenerationOptions ¶
type GenerationOptions struct {
// Name of the generation
Name string
// Model name (e.g., "gpt-4o")
Model string
// ModelParameters like temperature, max_tokens, etc.
ModelParameters map[string]any
// Input prompt/messages
Input any
// Output completion/response
Output any
// Usage token counts
Usage *UsageInfo
// Cost in USD (optional, can be calculated from usage)
Cost *CostInfo
// Metadata stores arbitrary key-value data
Metadata map[string]any
// StartTime when generation started
StartTime time.Time
// EndTime when generation completed
EndTime time.Time
// CompletionStartTime when the model began generating (for streaming)
CompletionStartTime *time.Time
// PromptName links to a managed prompt in Langfuse
PromptName string
// PromptVersion links to a specific prompt version
PromptVersion int
// Level specifies the log level
Level LogLevel
// StatusMessage describes errors or warnings
StatusMessage string
}
GenerationOptions holds data for an LLM generation
type Handoff ¶
type Handoff struct {
// contains filtered or unexported fields
}
Handoff represents a delegation of work from one agent to another. The receiving agent works in an isolated context and returns results. This mimics how real people delegate: "Go figure this out and report back."
type HandoffConfiguration ¶
type HandoffConfiguration struct {
// contains filtered or unexported fields
}
HandoffConfiguration represents a reusable handoff setup. This allows creating a handoff configuration once and converting it to a tool.
func NewHandoffConfiguration ¶
func NewHandoffConfiguration(from, to *Agent, opts ...HandoffOption) *HandoffConfiguration
NewHandoffConfiguration creates a reusable handoff configuration. This is useful when you want to create the configuration once and convert it to a tool.
Example:
handoffConfig := agentkit.NewHandoffConfiguration(coordinator, researchAgent, WithFullContext(true))
tool := handoffConfig.AsTool("research", "Delegate research tasks")
coordinator.RegisterTool(tool)
func (*HandoffConfiguration) AsTool ¶
func (h *HandoffConfiguration) AsTool(name, description string) Tool
AsTool converts the handoff configuration into a Tool that can be registered with an agent. The LLM will decide when to use this tool and what task to provide.
Example:
researchHandoff := agentkit.NewHandoffConfiguration(coordinator, researchAgent)
tool := researchHandoff.AsTool(
"delegate_research",
"Delegate research tasks to a specialized research agent",
)
coordinator.RegisterTool(tool)
func (*HandoffConfiguration) Configure ¶
func (h *HandoffConfiguration) Configure(opts ...HandoffOption) *HandoffConfiguration
Configure applies additional options to the handoff configuration.
func (*HandoffConfiguration) Execute ¶
func (h *HandoffConfiguration) Execute(ctx context.Context, task string) (*HandoffResult, error)
Execute performs the handoff with a specific task.
type HandoffContext ¶
type HandoffContext struct {
Background string // Context about why this handoff is happening
Metadata map[string]any // Additional structured data
}
HandoffContext provides additional information for the delegated agent.
type HandoffOption ¶
type HandoffOption func(*handoffOptions)
HandoffOption configures a handoff.
func WithContext ¶
func WithContext(ctx HandoffContext) HandoffOption
WithContext provides additional background information to the delegated agent.
func WithFullContext ¶
func WithFullContext(include bool) HandoffOption
WithFullContext controls whether to return the full conversation context (thinking, tool calls, etc.) OR just the final result in the HandoffResult. When false, only the final response is returned. When true, the complete execution trace is included, which is useful for debugging or learning from the approach taken but increases context usage. NOTE: Real-time event streaming to parent agents ALWAYS happens regardless of this setting.
func WithMaxTurns ¶
func WithMaxTurns(max int) HandoffOption
WithMaxTurns limits the number of conversation turns the delegated agent can take.
type HandoffResult ¶
type HandoffResult struct {
Response string // The final response from the delegated agent
Summary string // Optional summary of the work done
Trace []HandoffTraceItem // Execution trace (if fullContext was enabled)
Metadata map[string]any // Additional result metadata
}
HandoffResult contains the outcome of a delegation.
type HandoffTraceItem ¶
type HandoffTraceItem struct {
Type string `json:"type"` // "thought", "tool_call", "tool_result", "response"
Content string `json:"content"` // The actual content of this step
}
HandoffTraceItem represents a single step in the delegated agent's execution.
type LLMProvider ¶
type LLMProvider interface {
CreateResponse(ctx context.Context, req ResponseRequest) (*ResponseObject, error)
CreateResponseStream(ctx context.Context, req ResponseRequest) (ResponseStreamClient, error)
}
LLMProvider abstracts the Responses API client for testing and custom providers. DEPRECATED: Use Provider interface instead for better decoupling.
func NewProviderAdapter ¶
func NewProviderAdapter(provider providers.Provider) LLMProvider
NewProviderAdapter creates an adapter from a Provider to LLMProvider.
type LoggingConfig ¶
type LoggingConfig = logging.LoggingConfig
Type aliases for internal package types
type MockLLM ¶
type MockLLM struct {
// contains filtered or unexported fields
}
MockLLM is a convenience wrapper around providers/mock.Provider for easier testing. It provides a builder pattern for configuring mock responses.
func NewMockLLM ¶
func NewMockLLM() *MockLLM
NewMockLLM creates a new mock LLM provider for testing. This is a convenience wrapper around providers/mock that maintains backward compatibility with the old testing API.
Usage:
mock := agentkit.NewMockLLM().
WithResponse("thinking...", []agentkit.ToolCall{
{Name: "search", Args: map[string]any{"query": "test"}},
}).
WithFinalResponse("done")
agent, _ := agentkit.New(agentkit.Config{
LLMProvider: mock,
Model: "test-model",
})
func (*MockLLM) CreateResponse ¶
func (m *MockLLM) CreateResponse(ctx context.Context, req ResponseRequest) (*ResponseObject, error)
CreateResponse implements the LLMProvider interface for backward compatibility.
func (*MockLLM) CreateResponseStream ¶
func (m *MockLLM) CreateResponseStream(ctx context.Context, req ResponseRequest) (ResponseStreamClient, error)
CreateResponseStream implements the LLMProvider interface for backward compatibility.
func (*MockLLM) WithFinalResponse ¶
WithFinalResponse appends a mock final response without tool calls.
func (*MockLLM) WithResponse ¶
WithResponse appends a mock response with optional tool calls.
func (*MockLLM) WithStream ¶
func (m *MockLLM) WithStream(chunks []providers.StreamChunk) *MockLLM
WithStream appends a mock stream of response chunks.
type ModelCostConfig ¶
type ModelCostConfig struct {
InputCostPer1MTokens float64 // Cost per 1M input tokens in USD
OutputCostPer1MTokens float64 // Cost per 1M output tokens in USD
}
ModelCostConfig defines the pricing for a specific model
type NoOpTracer ¶
type NoOpTracer struct{}
NoOpTracer is a tracer that does nothing (used when tracing is disabled)
func (*NoOpTracer) LogGeneration ¶
func (n *NoOpTracer) LogGeneration(ctx context.Context, opts GenerationOptions) error
func (*NoOpTracer) SetSpanAttributes ¶
func (*NoOpTracer) SetSpanOutput ¶
func (n *NoOpTracer) SetSpanOutput(ctx context.Context, output any) error
func (*NoOpTracer) SetTraceAttributes ¶
func (*NoOpTracer) StartSpan ¶
func (n *NoOpTracer) StartSpan(ctx context.Context, name string, opts ...SpanOption) (context.Context, func())
func (*NoOpTracer) StartTrace ¶
func (n *NoOpTracer) StartTrace(ctx context.Context, name string, opts ...TraceOption) (context.Context, func())
type ParallelConfig ¶
type ParallelConfig = parallel.ParallelConfig
Type aliases for internal package types
type ParameterSchema ¶
type ParameterSchema struct {
// contains filtered or unexported fields
}
ParameterSchema defines a tool parameter schema
func ArrayOf ¶
func ArrayOf(itemSchema *ParameterSchema) *ParameterSchema
ArrayOf creates an array parameter schema for complex item schemas.
func StructToSchema ¶
func StructToSchema[T any]() (*ParameterSchema, error)
StructToSchema converts a Go struct type to a ParameterSchema using reflection. It supports struct tags: `json`, `required`, `desc`, `enum`, `default`. This is useful for defining complex parameter schemas without manual WithProperty chains.
Supported struct tags:
- json: field name (use "-" to skip field)
- required: "true" marks field as required
- desc: field description
- enum: comma-separated allowed values
- default: default value
Example:
type Filters struct {
EmailDomain string `json:"email_domain" desc:"Filter by email domain"`
Status string `json:"status" required:"true" enum:"active,inactive"`
AgeRange struct {
Min int `json:"min" desc:"Minimum age"`
Max int `json:"max" desc:"Maximum age"`
} `json:"age_range"`
}
schema, _ := agentkit.StructToSchema[Filters]()
tool := agentkit.NewTool("search").
WithParameter("filters", schema).
Build()
func (*ParameterSchema) Optional ¶
func (ps *ParameterSchema) Optional() *ParameterSchema
Optional marks the parameter as optional
func (*ParameterSchema) Required ¶
func (ps *ParameterSchema) Required() *ParameterSchema
Required marks the parameter as required
func (*ParameterSchema) ToMap ¶
func (ps *ParameterSchema) ToMap() map[string]any
ToMap converts the schema to a map for OpenAI
func (*ParameterSchema) ToMapStrict ¶
func (ps *ParameterSchema) ToMapStrict() map[string]any
ToMapStrict converts the schema to a map with strict mode enabled This wraps optional fields in anyOf with null and ensures all constraints are met
func (*ParameterSchema) WithDescription ¶
func (ps *ParameterSchema) WithDescription(desc string) *ParameterSchema
WithDescription sets the parameter description
func (*ParameterSchema) WithEnum ¶
func (ps *ParameterSchema) WithEnum(values ...string) *ParameterSchema
WithEnum sets allowed values for the parameter
func (*ParameterSchema) WithProperty ¶
func (ps *ParameterSchema) WithProperty(name string, schema *ParameterSchema) *ParameterSchema
WithProperty adds a property to an object parameter schema.
type PendingFormatter ¶
PendingFormatter formats the display message when a tool is about to execute It receives the tool name and parsed arguments
type ProviderAdapter ¶
type ProviderAdapter struct {
// contains filtered or unexported fields
}
ProviderAdapter adapts the new Provider interface to the legacy LLMProvider interface. This maintains backward compatibility while allowing new code to use the cleaner Provider interface.
func (*ProviderAdapter) CreateResponse ¶
func (a *ProviderAdapter) CreateResponse(ctx context.Context, req ResponseRequest) (*ResponseObject, error)
CreateResponse implements LLMProvider using the new Provider interface.
func (*ProviderAdapter) CreateResponseStream ¶
func (a *ProviderAdapter) CreateResponseStream(ctx context.Context, req ResponseRequest) (ResponseStreamClient, error)
CreateResponseStream implements LLMProvider streaming using the new Provider interface.
type ReasoningEffort ¶
type ReasoningEffort string
ReasoningEffort controls the reasoning strength for reasoning models (o1/o3)
const ( // ReasoningEffortNone disables reasoning entirely (best for low-latency tasks) ReasoningEffortNone ReasoningEffort = "none" // ReasoningEffortMinimal generates very few reasoning tokens for fast time-to-first-token ReasoningEffortMinimal ReasoningEffort = "minimal" // ReasoningEffortLow favors speed and economical token usage ReasoningEffortLow ReasoningEffort = "low" // ReasoningEffortMedium provides balanced reasoning depth and speed (default) ReasoningEffortMedium ReasoningEffort = "medium" // ReasoningEffortHigh allocates significant computational depth for complex problems ReasoningEffortHigh ReasoningEffort = "high" // ReasoningEffortXHigh allocates the largest possible portion of tokens for reasoning (newest models) ReasoningEffortXHigh ReasoningEffort = "xhigh" )
type ResponseAnnotation ¶
type ResponseAnnotation struct {
Type string `json:"type"`
}
ResponseAnnotation represents an annotation in content
type ResponseContentItem ¶
type ResponseContentItem struct {
Type string `json:"type"`
Text string `json:"text,omitempty"`
ImageURL *ResponseImageURL `json:"image_url,omitempty"`
Annotations []ResponseAnnotation `json:"annotations,omitempty"`
ToolCallID string `json:"tool_call_id,omitempty"`
CallID string `json:"call_id,omitempty"`
Content string `json:"content,omitempty"`
Output string `json:"output,omitempty"`
}
ResponseContentItem represents a content item in input/output
type ResponseDelta ¶
type ResponseDelta struct {
Type string `json:"type"`
Index int `json:"index"`
Content []ResponseContentItem `json:"content,omitempty"`
ToolCalls []ResponseToolCall `json:"tool_calls,omitempty"`
}
ResponseDelta is deprecated - using event-based streaming instead
type ResponseError ¶
ResponseError represents an error in the response
type ResponseImageURL ¶
type ResponseImageURL struct {
URL string `json:"url"`
}
ResponseImageURL represents an image URL in content
type ResponseInput ¶
type ResponseInput struct {
Role string `json:"role"`
Content []ResponseContentItem `json:"content"`
}
ResponseInput represents input to the model
type ResponseObject ¶
type ResponseObject struct {
ID string `json:"id"`
Object string `json:"object"`
CreatedAt int64 `json:"created_at"`
Status string `json:"status"` // "completed", "failed", "in_progress", "cancelled", "queued", "incomplete"
Model string `json:"model"`
Output []ResponseOutputItem `json:"output"`
Usage ResponseUsage `json:"usage"`
Error *ResponseError `json:"error"`
PreviousResponseID string `json:"previous_response_id"`
Temperature float32 `json:"temperature"`
ParallelToolCalls bool `json:"parallel_tool_calls"`
ToolChoice any `json:"tool_choice"`
Tools []ResponseTool `json:"tools"`
}
ResponseObject represents a response from the API
type ResponseOutputItem ¶
type ResponseOutputItem struct {
Type string `json:"type"` // "message", "reasoning", "function_call", etc.
ID string `json:"id"`
Status string `json:"status"`
Role string `json:"role,omitempty"`
Name string `json:"name,omitempty"` // For function_call type
CallID string `json:"call_id,omitempty"` // For function_call type
Arguments string `json:"arguments,omitempty"` // For function_call type
Content []ResponseContentItem `json:"content,omitempty"`
Summary []ResponseContentItem `json:"summary,omitempty"`
ToolCalls []ResponseToolCall `json:"tool_calls,omitempty"` // Deprecated - function_call items are separate
}
ResponseOutputItem represents an item in the output array
type ResponseReasoning ¶
type ResponseReasoning struct {
Effort ReasoningEffort `json:"effort,omitempty"`
Summary string `json:"summary,omitempty"`
}
ResponseReasoning represents reasoning configuration for reasoning models
type ResponseRequest ¶
type ResponseRequest struct {
Model string `json:"model"`
Input any `json:"input,omitempty"` // string or []ResponseInput
Instructions string `json:"instructions,omitempty"`
Temperature float32 `json:"temperature,omitempty"` // For GPT models only (ignored for o1/o3)
MaxOutputTokens int `json:"max_output_tokens,omitempty"`
Tools []ResponseTool `json:"tools,omitempty"`
ToolChoice any `json:"tool_choice,omitempty"` // string or ResponseToolChoice
Stream bool `json:"stream,omitempty"`
Store bool `json:"store,omitempty"`
PreviousResponseID string `json:"previous_response_id,omitempty"`
ParallelToolCalls bool `json:"parallel_tool_calls,omitempty"`
TopP float32 `json:"top_p,omitempty"`
Text *ResponseTextConfig `json:"text,omitempty"`
Metadata map[string]string `json:"metadata,omitempty"`
Reasoning *ResponseReasoning `json:"reasoning,omitempty"` // For reasoning models (gpt-5/o-series): use ResponseReasoning with effort
}
ResponseRequest represents a request to create a response Note: For reasoning models (gpt-5, o-series), set Reasoning.Effort instead of Temperature. Temperature is ignored for reasoning models.
type ResponseStream ¶
type ResponseStream struct {
// contains filtered or unexported fields
}
ResponseStream wraps a streaming response
func (*ResponseStream) ReadChunk ¶
func (s *ResponseStream) ReadChunk() (*ResponseStreamChunk, error)
ReadChunk is an alias for Recv() to satisfy ResponseStreamClient interface
func (*ResponseStream) Recv ¶
func (s *ResponseStream) Recv() (*ResponseStreamChunk, error)
Recv receives the next chunk from the stream
type ResponseStreamChunk ¶
type ResponseStreamChunk struct {
Type string `json:"type"` // Event type
SequenceNumber int `json:"sequence_number,omitempty"`
ResponseID string `json:"response_id,omitempty"`
Response *ResponseObject `json:"response,omitempty"`
Error *ResponseError `json:"error,omitempty"` // For error events
ItemID string `json:"item_id,omitempty"`
OutputIndex int `json:"output_index,omitempty"`
Delta string `json:"delta,omitempty"` // For delta events
Text string `json:"text,omitempty"` // For done events with text
Item *ResponseOutputItem `json:"item,omitempty"` // For added/done events
Name string `json:"name,omitempty"` // For function_call_arguments.done
Arguments string `json:"arguments,omitempty"` // For function_call_arguments.done
Status string `json:"status,omitempty"`
Output []ResponseOutputItem `json:"output,omitempty"` // For response.done
Usage *ResponseUsage `json:"usage,omitempty"`
Obfuscation string `json:"obfuscation,omitempty"` // Sent by API, purpose unclear
}
ResponseStreamChunk represents a streaming response chunk Responses API uses event-based streaming with specific event types: - response.output_item.added: New output item started - response.output_text.delta: Text content delta - response.function_call_arguments.delta: Function arguments delta - response.function_call_arguments.done: Function call complete (includes name and arguments) - response.done: Response complete
type ResponseStreamClient ¶
type ResponseStreamClient interface {
ReadChunk() (*ResponseStreamChunk, error)
Close() error
}
ResponseStreamClient defines the interface for streaming responses
type ResponseTextConfig ¶
type ResponseTextConfig struct {
Format ResponseTextFormat `json:"format"`
Verbosity string `json:"verbosity,omitempty"`
}
ResponseTextConfig represents text configuration
type ResponseTextFormat ¶
type ResponseTextFormat struct {
Type string `json:"type"` // "text" or "json_schema"
JSONSchema any `json:"json_schema,omitempty"`
}
ResponseTextFormat represents text format configuration
type ResponseTokensDetails ¶
type ResponseTokensDetails struct {
CachedTokens int `json:"cached_tokens,omitempty"`
ReasoningTokens int `json:"reasoning_tokens,omitempty"`
}
ResponseTokensDetails represents detailed token information
type ResponseTool ¶
type ResponseTool struct {
Type string `json:"type"`
Name string `json:"name"`
Description string `json:"description,omitempty"`
Parameters map[string]any `json:"parameters,omitempty"`
Strict bool `json:"strict,omitempty"`
}
ResponseTool represents a tool definition for Responses API Note: In Responses API, name/description/parameters are at top level, not nested
type ResponseToolCall ¶
type ResponseToolCall struct {
ID string `json:"id"`
CallID string `json:"call_id"`
Type string `json:"type"` // "function_call"
Name string `json:"name"`
Arguments string `json:"arguments"`
}
ResponseToolCall represents a tool call in output For Responses API, tool calls are of type "function_call" not "tool_call"
type ResponseToolChoice ¶
type ResponseToolChoice struct {
Type string `json:"type"` // "auto", "required", "none", or "function"
Function *ResponseToolFunction `json:"function,omitempty"`
}
ResponseToolChoice represents tool choice configuration
type ResponseToolFunction ¶
type ResponseToolFunction struct {
Name string `json:"name"`
}
ResponseToolFunction represents a specific function choice
type ResponseUsage ¶
type ResponseUsage struct {
InputTokens int `json:"input_tokens"`
OutputTokens int `json:"output_tokens"`
ReasoningTokens int `json:"reasoning_tokens,omitempty"` // For reasoning models (o1, o3)
TotalTokens int `json:"total_tokens"`
InputTokensDetails ResponseTokensDetails `json:"input_tokens_details"`
OutputTokensDetails ResponseTokensDetails `json:"output_tokens_details"`
}
ResponseUsage represents token usage
type ResponsesClient ¶
type ResponsesClient struct {
// contains filtered or unexported fields
}
ResponsesClient wraps OpenAI's Responses API
func NewResponsesClient ¶
func NewResponsesClient(apiKey string, logger *slog.Logger) *ResponsesClient
NewResponsesClient creates a new Responses API client
func (*ResponsesClient) CreateResponse ¶
func (c *ResponsesClient) CreateResponse(ctx context.Context, req ResponseRequest) (*ResponseObject, error)
CreateResponse creates a non-streaming response
func (*ResponsesClient) CreateResponseStream ¶
func (c *ResponsesClient) CreateResponseStream(ctx context.Context, req ResponseRequest) (ResponseStreamClient, error)
CreateResponseStream creates a streaming response
type ResultFormatter ¶
ResultFormatter formats the display message when a tool completes It receives the tool name and the result returned by the handler
type SpanConfig ¶
type SpanConfig struct {
// Type specifies the span type (span, generation, event, tool, retrieval)
Type SpanType
// Input is the input data for this operation
Input any
// Metadata stores arbitrary key-value data
Metadata map[string]any
// Level specifies the log level (DEBUG, DEFAULT, WARNING, ERROR)
Level LogLevel
}
SpanConfig holds configuration for a span
type SpanOption ¶
type SpanOption func(*SpanConfig)
SpanOption configures span creation
func WithLogLevel ¶
func WithLogLevel(level LogLevel) SpanOption
func WithSpanInput ¶
func WithSpanInput(input any) SpanOption
func WithSpanMetadata ¶
func WithSpanMetadata(metadata map[string]any) SpanOption
func WithSpanType ¶
func WithSpanType(spanType SpanType) SpanOption
Option functions for span configuration
type SpanType ¶
type SpanType string
SpanType represents the type of observation
const ( // SpanTypeSpan is a generic span for non-LLM operations SpanTypeSpan SpanType = "span" // SpanTypeGeneration tracks LLM calls SpanTypeGeneration SpanType = "generation" // SpanTypeEvent tracks point-in-time events SpanTypeEvent SpanType = "event" // SpanTypeTool tracks tool/function calls SpanTypeTool SpanType = "tool" // SpanTypeRetrieval tracks RAG retrieval steps SpanTypeRetrieval SpanType = "retrieval" )
type SystemPromptFunc ¶
SystemPromptFunc builds the system prompt from context.
type TimeoutConfig ¶
type TimeoutConfig = timeout.TimeoutConfig
Type aliases for internal package types
type Tool ¶
type Tool struct {
// contains filtered or unexported fields
}
Tool represents an agent tool with its metadata and handler
func (*Tool) FormatPending ¶
FormatPending formats the pending message for this tool
func (*Tool) FormatResult ¶
FormatResult formats the result message for this tool
func (*Tool) ToOpenAI ¶
func (t *Tool) ToOpenAI() interface{}
ToOpenAI converts the tool to OpenAI function definition. DEPRECATED: This method is kept for backward compatibility but couples the tool to OpenAI. New code should use ToToolDefinition() instead.
func (*Tool) ToToolDefinition ¶
func (t *Tool) ToToolDefinition() providers.ToolDefinition
ToToolDefinition converts the tool to a provider-agnostic ToolDefinition.
type ToolBuilder ¶
type ToolBuilder struct {
// contains filtered or unexported fields
}
ToolBuilder helps construct tools with a fluent API
func NewStructTool ¶
func NewStructTool[T any](name string, handler func(context.Context, T) (any, error)) (*ToolBuilder, error)
NewStructTool creates a tool builder using a struct type for schema and decoding.
func (*ToolBuilder) WithConcurrency ¶
func (tb *ToolBuilder) WithConcurrency(mode ConcurrencyMode) *ToolBuilder
WithConcurrency controls whether a tool can run in parallel.
func (*ToolBuilder) WithDescription ¶
func (tb *ToolBuilder) WithDescription(desc string) *ToolBuilder
WithDescription sets the tool description
func (*ToolBuilder) WithHandler ¶
func (tb *ToolBuilder) WithHandler(handler ToolHandler) *ToolBuilder
WithHandler sets the tool handler function
func (*ToolBuilder) WithJSONSchema ¶
func (tb *ToolBuilder) WithJSONSchema(schema map[string]any) *ToolBuilder
WithJSONSchema sets the full JSON schema for complex tools.
func (*ToolBuilder) WithParameter ¶
func (tb *ToolBuilder) WithParameter(name string, schema *ParameterSchema) *ToolBuilder
WithParameter adds a parameter to the tool
func (*ToolBuilder) WithPendingFormatter ¶
func (tb *ToolBuilder) WithPendingFormatter(formatter PendingFormatter) *ToolBuilder
WithPendingFormatter sets the formatter for pending tool execution messages
func (*ToolBuilder) WithRawParameters ¶
func (tb *ToolBuilder) WithRawParameters(params map[string]any) *ToolBuilder
WithRawParameters sets the full parameters schema for complex tools.
func (*ToolBuilder) WithResultFormatter ¶
func (tb *ToolBuilder) WithResultFormatter(formatter ResultFormatter) *ToolBuilder
WithResultFormatter sets the formatter for tool result messages
func (*ToolBuilder) WithStrictMode ¶
func (tb *ToolBuilder) WithStrictMode(strict bool) *ToolBuilder
WithStrictMode enables or disables OpenAI Structured Outputs for this tool. When true (default), the tool schema uses strict JSON Schema validation, ensuring the model output always matches the schema exactly. Disable only if you need to use JSON Schema features not supported by strict mode.
type ToolCall ¶
ToolCall represents a tool call for testing purposes. This is an alias for providers.ToolCall to maintain backward compatibility.
type ToolHandler ¶
ToolHandler is a function that executes a tool
type TraceConfig ¶
type TraceConfig struct {
// UserID identifies the end-user
UserID string
// SessionID groups related traces (e.g., conversation thread)
SessionID string
// Tags categorize the trace
Tags []string
// Metadata stores arbitrary key-value data
Metadata map[string]any
// Input is the initial input for the trace
Input any
// Version tracks the application version
Version string
// Environment specifies the deployment environment (production, staging, etc.)
Environment string
// Release identifies the release version
Release string
// StartTime is the explicit start time for the trace (optional)
StartTime *time.Time
}
TraceConfig holds configuration for a trace
type TraceOption ¶
type TraceOption func(*TraceConfig)
TraceOption configures trace creation
func WithEnvironment ¶
func WithEnvironment(env string) TraceOption
func WithMetadata ¶
func WithMetadata(metadata map[string]any) TraceOption
func WithRelease ¶
func WithRelease(release string) TraceOption
func WithSessionID ¶
func WithSessionID(sessionID string) TraceOption
func WithTags ¶
func WithTags(tags ...string) TraceOption
func WithTraceInput ¶
func WithTraceInput(input any) TraceOption
func WithTraceStartTime ¶
func WithTraceStartTime(startTime time.Time) TraceOption
WithTraceStartTime sets an explicit start time for the trace
func WithUserID ¶
func WithUserID(userID string) TraceOption
Option functions for trace configuration
func WithVersion ¶
func WithVersion(version string) TraceOption
type Tracer ¶
type Tracer interface {
// StartTrace creates a new trace context for the agent run
// Returns a context with the trace attached and a function to end the trace
StartTrace(ctx context.Context, name string, opts ...TraceOption) (context.Context, func())
// StartSpan creates a new span within the current trace
// Spans represent individual operations like tool calls or LLM generations
StartSpan(ctx context.Context, name string, opts ...SpanOption) (context.Context, func())
// LogGeneration records an LLM generation event
LogGeneration(ctx context.Context, opts GenerationOptions) error
// LogEvent records a simple event within the trace
LogEvent(ctx context.Context, name string, attributes map[string]any) error
// SetTraceAttributes sets attributes on the current trace
SetTraceAttributes(ctx context.Context, attributes map[string]any) error
// SetSpanOutput sets the output on the current span (observation)
SetSpanOutput(ctx context.Context, output any) error
// SetSpanAttributes sets attributes on the current span as observation metadata
SetSpanAttributes(ctx context.Context, attributes map[string]any) error
// Flush ensures all pending traces are sent (important for short-lived applications)
Flush(ctx context.Context) error
}
Tracer defines the interface for tracing LLM operations This interface allows for multiple tracing backend implementations
Source Files
¶
Directories
¶
| Path | Synopsis |
|---|---|
|
internal
|
|
|
testutil
Package testutil provides testing utilities for AgentKit.
|
Package testutil provides testing utilities for AgentKit. |
|
Package providers defines provider-agnostic interfaces and domain models for LLM interactions.
|
Package providers defines provider-agnostic interfaces and domain models for LLM interactions. |
|
mock
Package mock implements a mock Provider for testing.
|
Package mock implements a mock Provider for testing. |
|
openai
Package openai implements the Provider interface for OpenAI's Responses API.
|
Package openai implements the Provider interface for OpenAI's Responses API. |
|
tracing
|
|
|
langfuse
Package langfuse provides Langfuse tracing implementation via OpenTelemetry
|
Package langfuse provides Langfuse tracing implementation via OpenTelemetry |