Documentation
¶
Overview ¶
Package llm provides a lightweight interface for Large Language Model operations
Index ¶
- Constants
- Variables
- func CountTokens(text string) int
- func EstimateMaxTokens(promptTokens, contextWindowSize int) int
- func Version() string
- type CompletionRequest
- type CompletionResponse
- type MockModel
- func (m *MockModel) Complete(ctx context.Context, request CompletionRequest) (CompletionResponse, error)
- func (m *MockModel) GetApiKey() string
- func (m *MockModel) GetMaxTokens() int
- func (m *MockModel) GetModel() string
- func (m *MockModel) GetOutputFormat() OutputFormat
- func (m *MockModel) GetProjectID() string
- func (m *MockModel) GetProvider() Provider
- func (m *MockModel) GetRegion() string
- func (m *MockModel) GetTemperature() float64
- func (m *MockModel) GetVerbose() bool
- type ModelInterface
- type ModelOptions
- type OutputFormat
- type Provider
Constants ¶
const ( GeminiModel1Pro = "gemini-pro" GeminiModel1Flash = "gemini-pro-flash" GeminiModel2Pro = "gemini-2-pro" GeminiModel2Flash = "gemini-2-flash" )
Gemini model constants
const ( OpenAIModelGPT35Turbo = "gpt-3.5-turbo" OpenAIModelGPT4 = "gpt-4" OpenAIModelGPT4Turbo = "gpt-4-turbo-preview" OpenAIModelGPT4Vision = "gpt-4-vision-preview" OpenAIModelGPT4OMini = "gpt-4o-mini" OpenAIModelGPT4O = "gpt-4o" )
OpenAI model constants
const ( VertexModelGemini20Flash = "gemini-2.0-flash-001" VertexModelGemini20FlashLite = "gemini-2.0-flash-lite-001" VertexModelGemini20FlashImageGen = "gemini-2.0-flash-exp-image-generation" VertexModelGemini25ProPreview = "gemini-2.5-pro-preview-03-25" VertexModelGemini15Pro = "gemini-1.5-pro" // supported but older VertexModelGemini15Flash = "gemini-1.5-flash" // supported but older )
Vertex AI model constants
Variables ¶
var ErrInvalidRequest = errors.New("invalid request")
ErrInvalidRequest is returned when a request is invalid
ErrServiceUnavailable is returned when the LLM service is unavailable
Functions ¶
func CountTokens ¶
CountTokens provides a simple approximation of token counting Note: This is a basic implementation and not accurate for all models Production code should use model-specific tokenizers
func EstimateMaxTokens ¶
EstimateMaxTokens estimates the maximum number of tokens that could be generated given the model's context window size and the prompt length
Types ¶
type CompletionRequest ¶
type CompletionRequest struct {
// SystemPrompt contains instructions for the LLM
SystemPrompt string `json:"system_prompt"`
// UserPrompt contains the actual query or content to process
UserPrompt string `json:"user_prompt"`
// MaxTokens is the maximum number of tokens to generate
MaxTokens int `json:"max_tokens"`
// Temperature controls randomness in generation (0.0-1.0)
Temperature float64 `json:"temperature"`
}
CompletionRequest represents a request to generate a completion
type CompletionResponse ¶
type CompletionResponse struct {
// Text is the generated completion text
Text string `json:"text"`
// TokensUsed is the number of tokens used for this request
TokensUsed int `json:"tokens_used"`
}
CompletionResponse represents a response from a completion request
type MockModel ¶
type MockModel struct {
// Response is the predefined response to return
Response CompletionResponse
// Error is the predefined error to return
Error error
// contains filtered or unexported fields
}
MockModel implements the ModelInterface for testing purposes
func NewMockModel ¶
func NewMockModel() *MockModel
NewMockModel creates a new mock model with a default response
func NewMockModelWithOptions ¶
func NewMockModelWithOptions(options ModelOptions) *MockModel
NewMockModelWithOptions creates a new mock model with the specified options
func (*MockModel) Complete ¶
func (m *MockModel) Complete(ctx context.Context, request CompletionRequest) (CompletionResponse, error)
Complete implements the ModelInterface
func (*MockModel) GetMaxTokens ¶
GetMaxTokens implements the ModelInterface
func (*MockModel) GetOutputFormat ¶
func (m *MockModel) GetOutputFormat() OutputFormat
GetOutputFormat implements the ModelInterface
func (*MockModel) GetProjectID ¶
GetProjectID implements the ModelInterface
func (*MockModel) GetProvider ¶
GetProvider implements the ModelInterface
func (*MockModel) GetTemperature ¶
GetTemperature implements the ModelInterface
func (*MockModel) GetVerbose ¶
GetVerbose implements the ModelInterface
type ModelInterface ¶
type ModelInterface interface {
// Complete generates a completion for the provided prompt
Complete(ctx context.Context, request CompletionRequest) (CompletionResponse, error)
// GetProvider returns the provider of the model
GetProvider() Provider
// GetOutputFormat returns the output format of the model
GetOutputFormat() OutputFormat
// GetApiKey returns the API key of the model
GetApiKey() string
// GetModel returns the model of the model
GetModel() string
// GetMaxTokens returns the maximum number of tokens of the model
GetMaxTokens() int
// GetTemperature returns the temperature of the model
GetTemperature() float64
// GetProjectID returns the project ID of the model
GetProjectID() string
// GetRegion returns the region of the model
GetRegion() string
// GetVerbose returns the verbose of the model
GetVerbose() bool
}
ModelInterface defines the interface for interacting with Large Language Models
func ImageModel ¶
func ImageModel(provider Provider) (ModelInterface, error)
ImageModel creates an LLM model for image output
func JSONModel ¶
func JSONModel(provider Provider) (ModelInterface, error)
JSONModel creates an LLM model for JSON output
func NewModel ¶
func NewModel(options ModelOptions) (ModelInterface, error)
NewModel creates a new LLM model based on the provided options
func TextModel ¶
func TextModel(provider Provider) (ModelInterface, error)
TextModel creates an LLM model for text output
type ModelOptions ¶
type ModelOptions struct {
Provider Provider
OutputFormat OutputFormat
ApiKey string
Model string
MaxTokens int
Temperature float64
ProjectID string
Region string
Verbose bool
}
ModelOptions contains configuration options for creating an LLM model
type OutputFormat ¶
type OutputFormat string
OutputFormat specifies the desired output format from the LLM
const ( OutputFormatText OutputFormat = "text" OutputFormatJSON OutputFormat = "json" OutputFormatXML OutputFormat = "xml" OutputFormatYAML OutputFormat = "yaml" OutputFormatEnum OutputFormat = "enum" OutputFormatImagePNG OutputFormat = "image/png" OutputFormatImageJPG OutputFormat = "image/jpeg" )