Documentation
¶
Index ¶
- func Float(f float64) *float64
- func Int(i int) *int
- type FullRequest
- type Gen
- type Generator
- func (b *Generator) AddTools(tool ...tools.Tool) *Generator
- func (b *Generator) FrequencyPenalty(freq float64) *Generator
- func (b *Generator) IncludeThinkingParts(thinkingParts bool) *Generator
- func (b *Generator) MaxTokens(maxTokens int) *Generator
- func (b *Generator) Model(model Model) *Generator
- func (b *Generator) Output(s *schema.JSON) *Generator
- func (b *Generator) PresencePenalty(prec float64) *Generator
- func (b *Generator) Prompt(prompts ...prompt.Prompt) (*Response, error)
- func (b *Generator) SetConfig(config Request) *Generator
- func (b *Generator) SetToolConfig(tool tools.Tool) *Generator
- func (b *Generator) SetTools(tool ...tools.Tool) *Generator
- func (b *Generator) StopAt(stop ...string) *Generator
- func (b *Generator) Stream(prompts ...prompt.Prompt) (<-chan *StreamResponse, error)
- func (b *Generator) StrictOutput(strict bool) *Generator
- func (b *Generator) System(prompt string) *Generator
- func (b *Generator) Temperature(temperature float64) *Generator
- func (b *Generator) ThinkingBudget(thinkingBudget int) *Generator
- func (b *Generator) Tools() []tools.Tool
- func (b *Generator) TopK(topK int) *Generator
- func (b *Generator) TopP(topP float64) *Generator
- func (b *Generator) WithContext(ctx context.Context) *Generator
- type Model
- type Option
- func WithContext(ctx context.Context) Option
- func WithFrequencyPenalty(freq float64) Option
- func WithMaxTokens(maxTokens int) Option
- func WithModel(model Model) Option
- func WithOutput(s *schema.JSON) Option
- func WithPresencePenalty(presence float64) Option
- func WithRequest(req Request) Option
- func WithStopAt(stop ...string) Option
- func WithStrictOutput(strict bool) Option
- func WithSystem(prompt string) Option
- func WithTemperature(temperature float64) Option
- func WithThinkingBudget(thinkingBudget int) Option
- func WithThinkingParts(thinkingParts bool) Option
- func WithToolConfig(tool tools.Tool) Option
- func WithTools(tools ...tools.Tool) Option
- func WithTopK(topK int) Option
- func WithTopP(topP float64) Option
- type Prompter
- type Request
- type Response
- type StreamResponse
- type StreamResponseError
- type StreamingResponseType
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
Types ¶
type FullRequest ¶
type Generator ¶
func (*Generator) FrequencyPenalty ¶ added in v0.6.0
func (*Generator) IncludeThinkingParts ¶ added in v0.11.9
func (*Generator) PresencePenalty ¶ added in v0.6.0
func (*Generator) Stream ¶ added in v0.11.7
func (b *Generator) Stream(prompts ...prompt.Prompt) (<-chan *StreamResponse, error)
func (*Generator) StrictOutput ¶ added in v0.8.0
func (*Generator) Temperature ¶
func (*Generator) ThinkingBudget ¶ added in v0.11.9
ThinkingBudget sets the thinking budget for the generator. For models which do not support tokens as thinking budget, the number of tokens is translated into enums "low", "medium", "high". Where "low" is <2.000, "medium" is 2.000-10.000, and "high" is 10.001+.
type Model ¶
type Model struct {
Provider string `json:"provider"`
Name string `json:"name"`
Config map[string]any `json:"config,omitempty"`
Description string `json:"description,omitempty"`
InputContentTypes []string `json:"input_content_types,omitempty"`
InputMaxToken int `json:"input_max_token,omitempty"`
OutputMaxToken int `json:"output_max_token,omitempty"`
SupportTools bool `json:"support_tools,omitempty"`
SupportStructuredOutput bool `json:"support_structured_output,omitempty"`
}
type Option ¶
func WithContext ¶ added in v0.6.0
func WithFrequencyPenalty ¶ added in v0.6.0
func WithMaxTokens ¶
func WithOutput ¶
func WithPresencePenalty ¶ added in v0.6.0
func WithRequest ¶
func WithStopAt ¶
func WithStrictOutput ¶ added in v0.8.0
func WithSystem ¶
func WithTemperature ¶
func WithThinkingBudget ¶ added in v0.11.9
func WithThinkingParts ¶ added in v0.11.9
func WithToolConfig ¶
type Request ¶
type Request struct {
Context context.Context `json:"-"`
Stream bool `json:"stream"`
Model Model `json:"model"`
SystemPrompt string `json:"system_prompt,omitempty"`
OutputSchema *schema.JSON `json:"output_schema,omitempty"`
StrictOutput bool `json:"output_strict,omitempty"`
Tools []tools.Tool `json:"tools,omitempty"`
ToolConfig *tools.Tool `json:"tool,omitempty"`
ThinkingBudget *int `json:"thinking_budget,omitempty"`
ThinkingParts *bool `json:"thinking_parts,omitempty"`
TopP *float64 `json:"top_p,omitempty"`
TopK *int `json:"top_k,omitempty"`
Temperature *float64 `json:"temperature,omitempty"`
MaxTokens *int `json:"max_tokens,omitempty"`
FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"`
PresencePenalty *float64 `json:"presence_penalty,omitempty"`
StopSequences []string `json:"stop_sequences,omitempty"`
}
type Response ¶
type StreamResponse ¶ added in v0.11.7
type StreamResponse struct {
Type StreamingResponseType `json:"type"`
Role prompt.Role `json:"role"`
Index int `json:"index"`
Content string `json:"content"`
ToolCall *tools.Call `json:"tool_call,omitempty"` // Only for TYPE_DELTA
Metadata *models.Metadata `json:"metadata,omitempty"`
}
func (StreamResponse) Error ¶ added in v0.11.7
func (r StreamResponse) Error() error
type StreamResponseError ¶ added in v0.11.7
type StreamResponseError string
func (StreamResponseError) Error ¶ added in v0.11.7
func (s StreamResponseError) Error() string
type StreamingResponseType ¶ added in v0.11.7
type StreamingResponseType string
const TYPE_DELTA StreamingResponseType = "delta"
const TYPE_EOF StreamingResponseType = "EOF"
const TYPE_ERROR StreamingResponseType = "ERROR"
const TYPE_METADATA StreamingResponseType = "metadata"
const TYPE_THINKING_DELTA StreamingResponseType = "thinking_delta"
Click to show internal directories.
Click to hide internal directories.