llm

package
v0.1.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Aug 9, 2025 License: MIT Imports: 11 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

This section is empty.

Functions

This section is empty.

Types

type Choice

type Choice struct {
	Index        int       `json:"index"`
	Message      Message   `json:"message"`
	Delta        Message   `json:"delta,omitempty"`
	FinishReason string    `json:"finish_reason"`
	Logprobs     *Logprobs `json:"logprobs,omitempty"`
}

Choice represents a choice in the completion response

type CompletionRequest

type CompletionRequest struct {
	Messages      []Message        `json:"messages"`
	Model         string           `json:"model,omitempty"`
	Temperature   float64          `json:"temperature,omitempty"`
	MaxTokens     int              `json:"max_tokens,omitempty"`
	Tools         []ToolDefinition `json:"tools,omitempty"`
	ToolChoice    interface{}      `json:"tool_choice,omitempty"`
	Stream        bool             `json:"stream,omitempty"`
	SystemPrompt  string           `json:"system_prompt,omitempty"`
	StopSequences []string         `json:"stop_sequences,omitempty"`
}

CompletionRequest represents a request for completion

type CompletionResponse

type CompletionResponse struct {
	ID                string                 `json:"id"`
	Object            string                 `json:"object"`
	Created           int64                  `json:"created"`
	Model             string                 `json:"model"`
	Choices           []Choice               `json:"choices"`
	Usage             Usage                  `json:"usage"`
	SystemFingerprint string                 `json:"system_fingerprint,omitempty"`
	Metadata          map[string]interface{} `json:"metadata,omitempty"`
}

CompletionResponse represents a response from completion

type ConversationHistory

type ConversationHistory struct {
	// contains filtered or unexported fields
}

ConversationHistory manages conversation history

func NewConversationHistory

func NewConversationHistory() *ConversationHistory

NewConversationHistory creates a new conversation history

func (*ConversationHistory) AddMessage

func (ch *ConversationHistory) AddMessage(message Message)

AddMessage adds a message to the conversation history

func (*ConversationHistory) Clear

func (ch *ConversationHistory) Clear()

Clear clears the conversation history

func (*ConversationHistory) GetLastN

func (ch *ConversationHistory) GetLastN(n int) []Message

GetLastN returns the last N messages

func (*ConversationHistory) GetMessages

func (ch *ConversationHistory) GetMessages() []Message

GetMessages returns all messages in the conversation

func (*ConversationHistory) Size

func (ch *ConversationHistory) Size() int

Size returns the number of messages in the conversation

type Function

type Function struct {
	Name        string                 `json:"name"`
	Description string                 `json:"description"`
	Parameters  map[string]interface{} `json:"parameters"`
}

Function represents a function definition

type FunctionCall

type FunctionCall struct {
	Name      string `json:"name"`
	Arguments string `json:"arguments"`
}

FunctionCall represents a function call

type GeminiProvider

type GeminiProvider struct {
	// contains filtered or unexported fields
}

GeminiProvider implements the Provider interface for Google Gemini This is a mock implementation for demonstration purposes

func NewGeminiProvider

func NewGeminiProvider(config *ProviderConfig) (*GeminiProvider, error)

NewGeminiProvider creates a new Gemini provider

func (*GeminiProvider) Close

func (p *GeminiProvider) Close() error

Close closes the provider

func (*GeminiProvider) Complete

Complete generates a completion

func (*GeminiProvider) CompleteStream

func (p *GeminiProvider) CompleteStream(ctx context.Context, req CompletionRequest, callback StreamCallback) error

CompleteStream generates a streaming completion

func (*GeminiProvider) CompleteStreamWithMode

func (p *GeminiProvider) CompleteStreamWithMode(ctx context.Context, req CompletionRequest, callback StreamCallback, mode StreamMode) error

CompleteStreamWithMode generates a streaming completion with explicit mode

func (*GeminiProvider) CompleteWithMode

func (p *GeminiProvider) CompleteWithMode(ctx context.Context, req CompletionRequest, mode StreamMode) (*CompletionResponse, error)

CompleteWithMode generates a completion with explicit streaming mode

func (*GeminiProvider) GetConfig

func (p *GeminiProvider) GetConfig() map[string]interface{}

GetConfig returns provider configuration

func (*GeminiProvider) GetDefaultModels

func (p *GeminiProvider) GetDefaultModels() []string

GetDefaultModels returns the default Gemini models

func (*GeminiProvider) GetMaxTokens

func (p *GeminiProvider) GetMaxTokens(model string) int

GetMaxTokens returns the maximum tokens for a model

func (*GeminiProvider) GetModels

func (p *GeminiProvider) GetModels(ctx context.Context) ([]string, error)

GetModels returns available models

func (*GeminiProvider) GetName

func (p *GeminiProvider) GetName() string

GetName returns the provider name

func (*GeminiProvider) GetStreamingConfig

func (p *GeminiProvider) GetStreamingConfig() *StreamingConfig

GetStreamingConfig returns the current streaming configuration

func (*GeminiProvider) IsHealthy

func (p *GeminiProvider) IsHealthy(ctx context.Context) error

IsHealthy checks if the provider is healthy

func (*GeminiProvider) SetConfig

func (p *GeminiProvider) SetConfig(config map[string]interface{}) error

SetConfig updates provider configuration

func (*GeminiProvider) SetStreamingConfig

func (p *GeminiProvider) SetStreamingConfig(config *StreamingConfig) error

SetStreamingConfig updates the streaming configuration

func (*GeminiProvider) SupportsStreaming

func (p *GeminiProvider) SupportsStreaming() bool

SupportsStreaming returns whether the provider supports streaming

func (*GeminiProvider) SupportsToolCalls

func (p *GeminiProvider) SupportsToolCalls() bool

SupportsToolCalls returns whether the provider supports tool calls

type Logprobs

type Logprobs struct {
	Tokens        []string             `json:"tokens"`
	TokenLogprobs []float64            `json:"token_logprobs"`
	TopLogprobs   []map[string]float64 `json:"top_logprobs"`
}

Logprobs represents log probabilities

type Message

type Message struct {
	Role       string                 `json:"role"` // "system", "user", "assistant", "tool"
	Content    string                 `json:"content"`
	Name       string                 `json:"name,omitempty"`
	ToolCalls  []ToolCall             `json:"tool_calls,omitempty"`
	ToolCallID string                 `json:"tool_call_id,omitempty"`
	Metadata   map[string]interface{} `json:"metadata,omitempty"`
}

Message represents a message in a conversation

type OllamaMessage

type OllamaMessage struct {
	Role    string `json:"role"`
	Content string `json:"content"`
}

OllamaMessage represents an Ollama message

type OllamaModelInfo

type OllamaModelInfo struct {
	Name       string    `json:"name"`
	ModifiedAt time.Time `json:"modified_at"`
	Size       int64     `json:"size"`
	Digest     string    `json:"digest"`
}

OllamaModelInfo represents information about an Ollama model

type OllamaModelsResponse

type OllamaModelsResponse struct {
	Models []OllamaModelInfo `json:"models"`
}

OllamaModelsResponse represents the response from the models endpoint

type OllamaOptions

type OllamaOptions struct {
	Temperature float64  `json:"temperature,omitempty"`
	TopP        float64  `json:"top_p,omitempty"`
	TopK        int      `json:"top_k,omitempty"`
	NumPredict  int      `json:"num_predict,omitempty"`
	Stop        []string `json:"stop,omitempty"`
}

OllamaOptions represents Ollama generation options

type OllamaProvider

type OllamaProvider struct {
	// contains filtered or unexported fields
}

OllamaProvider implements the Provider interface for Ollama

func NewOllamaProvider

func NewOllamaProvider(config *ProviderConfig) (*OllamaProvider, error)

NewOllamaProvider creates a new Ollama provider

func (*OllamaProvider) Close

func (p *OllamaProvider) Close() error

Close closes the provider and cleans up resources

func (*OllamaProvider) Complete

Complete generates a completion

func (*OllamaProvider) CompleteStream

func (p *OllamaProvider) CompleteStream(ctx context.Context, req CompletionRequest, callback StreamCallback) error

CompleteStream generates a streaming completion

func (*OllamaProvider) CompleteStreamWithMode

func (p *OllamaProvider) CompleteStreamWithMode(ctx context.Context, req CompletionRequest, callback StreamCallback, mode StreamMode) error

CompleteStreamWithMode generates a streaming completion with explicit mode

func (*OllamaProvider) CompleteWithMode

func (p *OllamaProvider) CompleteWithMode(ctx context.Context, req CompletionRequest, mode StreamMode) (*CompletionResponse, error)

CompleteWithMode generates a completion with explicit streaming mode

func (*OllamaProvider) DeleteModel

func (p *OllamaProvider) DeleteModel(ctx context.Context, model string) error

DeleteModel deletes a model from Ollama

func (*OllamaProvider) EstimateMessagesTokens

func (p *OllamaProvider) EstimateMessagesTokens(messages []Message) int

EstimateMessagesTokens estimates the number of tokens in messages

func (*OllamaProvider) EstimateTokens

func (p *OllamaProvider) EstimateTokens(text string) int

EstimateTokens estimates the number of tokens in a text

func (*OllamaProvider) GetConfig

func (p *OllamaProvider) GetConfig() map[string]interface{}

GetConfig returns provider configuration

func (*OllamaProvider) GetDefaultModels

func (p *OllamaProvider) GetDefaultModels() []string

GetDefaultModels returns commonly used Ollama models

func (*OllamaProvider) GetMaxTokens

func (p *OllamaProvider) GetMaxTokens(model string) int

GetMaxTokens returns the maximum tokens for a model

func (*OllamaProvider) GetModels

func (p *OllamaProvider) GetModels(ctx context.Context) ([]string, error)

GetModels returns available models

func (*OllamaProvider) GetName

func (p *OllamaProvider) GetName() string

GetName returns the provider name

func (*OllamaProvider) GetProviderInfo

func (p *OllamaProvider) GetProviderInfo() map[string]interface{}

GetProviderInfo returns information about the provider

func (*OllamaProvider) GetStreamingConfig

func (p *OllamaProvider) GetStreamingConfig() *StreamingConfig

GetStreamingConfig returns the current streaming configuration

func (*OllamaProvider) GetTokenLimit

func (p *OllamaProvider) GetTokenLimit(model string) int

GetTokenLimit returns the token limit for a model

func (*OllamaProvider) IsHealthy

func (p *OllamaProvider) IsHealthy(ctx context.Context) error

IsHealthy checks if the provider is healthy

func (*OllamaProvider) PullModel

func (p *OllamaProvider) PullModel(ctx context.Context, model string) error

PullModel pulls a model from the Ollama registry

func (*OllamaProvider) SetConfig

func (p *OllamaProvider) SetConfig(config map[string]interface{}) error

SetConfig updates provider configuration

func (*OllamaProvider) SetStreamingConfig

func (p *OllamaProvider) SetStreamingConfig(config *StreamingConfig) error

SetStreamingConfig updates the streaming configuration

func (*OllamaProvider) SupportsStreaming

func (p *OllamaProvider) SupportsStreaming() bool

SupportsStreaming returns true if the provider supports streaming

func (*OllamaProvider) SupportsToolCalls

func (p *OllamaProvider) SupportsToolCalls() bool

SupportsToolCalls returns true if the provider supports tool calls

func (*OllamaProvider) ValidateModel

func (p *OllamaProvider) ValidateModel(model string) error

ValidateModel checks if a model is valid for this provider

type OllamaRequest

type OllamaRequest struct {
	Model     string          `json:"model"`
	Messages  []OllamaMessage `json:"messages"`
	Stream    bool            `json:"stream,omitempty"`
	Options   OllamaOptions   `json:"options,omitempty"`
	Format    string          `json:"format,omitempty"`
	KeepAlive string          `json:"keep_alive,omitempty"`
}

OllamaRequest represents an Ollama API request

type OllamaResponse

type OllamaResponse struct {
	Model     string        `json:"model"`
	CreatedAt time.Time     `json:"created_at"`
	Message   OllamaMessage `json:"message"`
	Done      bool          `json:"done"`
	Error     string        `json:"error,omitempty"`
}

OllamaResponse represents an Ollama API response

type OpenAIProvider

type OpenAIProvider struct {
	// contains filtered or unexported fields
}

OpenAIProvider implements the Provider interface for OpenAI

func NewOpenAIProvider

func NewOpenAIProvider(config *ProviderConfig) (*OpenAIProvider, error)

NewOpenAIProvider creates a new OpenAI provider

func (*OpenAIProvider) Close

func (p *OpenAIProvider) Close() error

Close closes the provider and cleans up resources

func (*OpenAIProvider) Complete

Complete generates a completion

func (*OpenAIProvider) CompleteStream

func (p *OpenAIProvider) CompleteStream(ctx context.Context, req CompletionRequest, callback StreamCallback) error

CompleteStream generates a streaming completion

func (*OpenAIProvider) CompleteStreamWithMode

func (p *OpenAIProvider) CompleteStreamWithMode(ctx context.Context, req CompletionRequest, callback StreamCallback, mode StreamMode) error

CompleteStreamWithMode generates a streaming completion with explicit mode

func (*OpenAIProvider) CompleteWithMode

func (p *OpenAIProvider) CompleteWithMode(ctx context.Context, req CompletionRequest, mode StreamMode) (*CompletionResponse, error)

CompleteWithMode generates a completion with explicit streaming mode

func (*OpenAIProvider) EstimateMessagesTokens

func (p *OpenAIProvider) EstimateMessagesTokens(messages []Message) int

EstimateMessagesTokens estimates the number of tokens in messages

func (*OpenAIProvider) EstimateTokens

func (p *OpenAIProvider) EstimateTokens(text string) int

EstimateTokens estimates the number of tokens in a text

func (*OpenAIProvider) GetConfig

func (p *OpenAIProvider) GetConfig() map[string]interface{}

GetConfig returns provider configuration

func (*OpenAIProvider) GetDefaultModels

func (p *OpenAIProvider) GetDefaultModels() []string

GetDefaultModels returns commonly used OpenAI models

func (*OpenAIProvider) GetMaxTokens

func (p *OpenAIProvider) GetMaxTokens(model string) int

GetMaxTokens returns the maximum tokens for a model

func (*OpenAIProvider) GetModels

func (p *OpenAIProvider) GetModels(ctx context.Context) ([]string, error)

GetModels returns available models

func (*OpenAIProvider) GetName

func (p *OpenAIProvider) GetName() string

GetName returns the provider name

func (*OpenAIProvider) GetProviderInfo

func (p *OpenAIProvider) GetProviderInfo() map[string]interface{}

GetProviderInfo returns information about the provider

func (*OpenAIProvider) GetStreamingConfig

func (p *OpenAIProvider) GetStreamingConfig() *StreamingConfig

GetStreamingConfig returns the current streaming configuration

func (*OpenAIProvider) GetTokenLimit

func (p *OpenAIProvider) GetTokenLimit(model string) int

GetTokenLimit returns the token limit for a model

func (*OpenAIProvider) IsHealthy

func (p *OpenAIProvider) IsHealthy(ctx context.Context) error

IsHealthy checks if the provider is healthy

func (*OpenAIProvider) SetConfig

func (p *OpenAIProvider) SetConfig(config map[string]interface{}) error

SetConfig updates provider configuration

func (*OpenAIProvider) SetStreamingConfig

func (p *OpenAIProvider) SetStreamingConfig(config *StreamingConfig) error

SetStreamingConfig updates the streaming configuration

func (*OpenAIProvider) SupportsStreaming

func (p *OpenAIProvider) SupportsStreaming() bool

SupportsStreaming returns true if the provider supports streaming

func (*OpenAIProvider) SupportsToolCalls

func (p *OpenAIProvider) SupportsToolCalls() bool

SupportsToolCalls returns true if the provider supports tool calls

func (*OpenAIProvider) ValidateModel

func (p *OpenAIProvider) ValidateModel(model string) error

ValidateModel checks if a model is valid for this provider

type Provider

type Provider interface {
	// GetName returns the provider name
	GetName() string

	// GetModels returns available models
	GetModels(ctx context.Context) ([]string, error)

	// Complete generates a completion (auto-detects streaming based on request)
	Complete(ctx context.Context, req CompletionRequest) (*CompletionResponse, error)

	// CompleteStream generates a streaming completion
	CompleteStream(ctx context.Context, req CompletionRequest, callback StreamCallback) error

	// CompleteWithMode generates a completion with explicit streaming mode
	CompleteWithMode(ctx context.Context, req CompletionRequest, mode StreamMode) (*CompletionResponse, error)

	// CompleteStreamWithMode generates a streaming completion with explicit mode
	CompleteStreamWithMode(ctx context.Context, req CompletionRequest, callback StreamCallback, mode StreamMode) error

	// IsHealthy checks if the provider is healthy
	IsHealthy(ctx context.Context) error

	// GetConfig returns provider configuration
	GetConfig() map[string]interface{}

	// SetConfig updates provider configuration
	SetConfig(config map[string]interface{}) error

	// SupportsStreaming returns true if the provider supports streaming
	SupportsStreaming() bool

	// GetStreamingConfig returns the current streaming configuration
	GetStreamingConfig() *StreamingConfig

	// SetStreamingConfig updates the streaming configuration
	SetStreamingConfig(config *StreamingConfig) error

	// Close closes the provider and cleans up resources
	Close() error
}

Provider represents an LLM provider interface

type ProviderConfig

type ProviderConfig struct {
	Name        string                 `json:"name"`
	Type        string                 `json:"type"`
	Endpoint    string                 `json:"endpoint,omitempty"`
	APIKey      string                 `json:"api_key,omitempty"`
	Model       string                 `json:"model,omitempty"`
	Temperature float64                `json:"temperature,omitempty"`
	MaxTokens   int                    `json:"max_tokens,omitempty"`
	Timeout     time.Duration          `json:"timeout,omitempty"`
	RetryCount  int                    `json:"retry_count,omitempty"`
	RetryDelay  time.Duration          `json:"retry_delay,omitempty"`
	Headers     map[string]string      `json:"headers,omitempty"`
	Streaming   *StreamingConfig       `json:"streaming,omitempty"`
	Metadata    map[string]interface{} `json:"metadata,omitempty"`
}

ProviderConfig represents provider configuration

func DefaultProviderConfig

func DefaultProviderConfig() *ProviderConfig

DefaultProviderConfig returns default provider configuration

type ProviderManager

type ProviderManager struct {
	// contains filtered or unexported fields
}

ProviderManager manages multiple LLM providers

func NewProviderManager

func NewProviderManager() *ProviderManager

NewProviderManager creates a new provider manager

func (*ProviderManager) Close

func (pm *ProviderManager) Close() error

Close closes all providers

func (*ProviderManager) Complete

func (pm *ProviderManager) Complete(ctx context.Context, providerName string, req CompletionRequest) (*CompletionResponse, error)

Complete generates a completion using the specified provider (or default)

func (*ProviderManager) CompleteStream

func (pm *ProviderManager) CompleteStream(ctx context.Context, providerName string, req CompletionRequest, callback StreamCallback) error

CompleteStream generates a streaming completion using the specified provider (or default)

func (*ProviderManager) CompleteStreamWithMode

func (pm *ProviderManager) CompleteStreamWithMode(ctx context.Context, providerName string, req CompletionRequest, callback StreamCallback, mode StreamMode) error

CompleteStreamWithMode generates a streaming completion with explicit mode

func (*ProviderManager) CompleteWithMode

func (pm *ProviderManager) CompleteWithMode(ctx context.Context, providerName string, req CompletionRequest, mode StreamMode) (*CompletionResponse, error)

CompleteWithMode generates a completion with explicit streaming mode

func (*ProviderManager) DisableStreaming

func (pm *ProviderManager) DisableStreaming(providerName string) error

DisableStreaming disables streaming for a provider

func (*ProviderManager) EnableStreaming

func (pm *ProviderManager) EnableStreaming(providerName string) error

EnableStreaming enables streaming for a provider

func (*ProviderManager) GetAllModels

func (pm *ProviderManager) GetAllModels(ctx context.Context) (map[string][]string, error)

GetAllModels returns all available models from all providers

func (*ProviderManager) GetDefaultProvider

func (pm *ProviderManager) GetDefaultProvider() (Provider, error)

GetDefaultProvider returns the default provider

func (*ProviderManager) GetProvider

func (pm *ProviderManager) GetProvider(name string) (Provider, error)

GetProvider returns a provider by name

func (*ProviderManager) GetProviderModels

func (pm *ProviderManager) GetProviderModels(ctx context.Context, providerName string) ([]string, error)

GetProviderModels returns available models for a provider

func (*ProviderManager) HealthCheck

func (pm *ProviderManager) HealthCheck(ctx context.Context) map[string]error

HealthCheck checks the health of all providers

func (*ProviderManager) ListProviders

func (pm *ProviderManager) ListProviders() []string

ListProviders returns all registered provider names

func (*ProviderManager) RegisterProvider

func (pm *ProviderManager) RegisterProvider(name string, provider Provider) error

RegisterProvider registers a new provider

func (*ProviderManager) SetDefaultProvider

func (pm *ProviderManager) SetDefaultProvider(name string) error

SetDefaultProvider sets the default provider

func (*ProviderManager) SetStreamingMode

func (pm *ProviderManager) SetStreamingMode(providerName string, mode StreamMode) error

SetStreamingMode sets the streaming mode for a provider

func (*ProviderManager) UnregisterProvider

func (pm *ProviderManager) UnregisterProvider(name string) error

UnregisterProvider removes a provider

type SimpleTokenCounter

type SimpleTokenCounter struct{}

SimpleTokenCounter is a simple token counter implementation

func NewSimpleTokenCounter

func NewSimpleTokenCounter() *SimpleTokenCounter

NewSimpleTokenCounter creates a new simple token counter

func (*SimpleTokenCounter) CountMessagesTokens

func (stc *SimpleTokenCounter) CountMessagesTokens(messages []Message) (int, error)

CountMessagesTokens counts tokens in messages

func (*SimpleTokenCounter) CountTokens

func (stc *SimpleTokenCounter) CountTokens(text string) (int, error)

CountTokens counts tokens in text (rough approximation)

type StreamCallback

type StreamCallback func(chunk CompletionResponse) error

StreamCallback is called for each streaming chunk

type StreamMode

type StreamMode string

StreamMode represents the streaming mode for providers

const (
	StreamModeNone   StreamMode = "none"   // No streaming
	StreamModeAuto   StreamMode = "auto"   // Auto-detect based on request
	StreamModeForced StreamMode = "forced" // Always stream
)

type StreamingConfig

type StreamingConfig struct {
	Enabled    bool       `json:"enabled"`
	Mode       StreamMode `json:"mode"`
	ChunkSize  int        `json:"chunk_size,omitempty"`
	BufferSize int        `json:"buffer_size,omitempty"`
	FlushDelay int        `json:"flush_delay_ms,omitempty"` // milliseconds
	KeepAlive  bool       `json:"keep_alive,omitempty"`
}

StreamingConfig represents streaming configuration

func DefaultStreamingConfig

func DefaultStreamingConfig() *StreamingConfig

DefaultStreamingConfig returns default streaming configuration

type TokenCounter

type TokenCounter interface {
	CountTokens(text string) (int, error)
	CountMessagesTokens(messages []Message) (int, error)
}

TokenCounter interface for counting tokens

type ToolCall

type ToolCall struct {
	ID       string                 `json:"id"`
	Type     string                 `json:"type"`
	Function FunctionCall           `json:"function"`
	Metadata map[string]interface{} `json:"metadata,omitempty"`
}

ToolCall represents a tool call in a message

type ToolDefinition

type ToolDefinition struct {
	Type     string   `json:"type"`
	Function Function `json:"function"`
}

ToolDefinition represents a tool that can be called by the LLM

type Usage

type Usage struct {
	PromptTokens     int `json:"prompt_tokens"`
	CompletionTokens int `json:"completion_tokens"`
	TotalTokens      int `json:"total_tokens"`
}

Usage represents token usage information

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL