gen

package
v1.0.10 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jan 27, 2026 License: MIT Imports: 9 Imported by: 1

Documentation

Index

Constants

This section is empty.

Variables

This section is empty.

Functions

func Float added in v0.6.0

func Float(f float64) *float64

func Int added in v0.6.0

func Int(i int) *int

Types

type FullRequest

type FullRequest struct {
	Request
	Prompts []prompt.Prompt `json:"prompts"`
}

type Gen

type Gen interface {
	Provider() string
	Generator(options ...Option) *Generator
}

type Generator

type Generator struct {
	Prompter Prompter
	Request  Request
}

func (*Generator) AddTools

func (b *Generator) AddTools(tool ...tools.Tool) *Generator

func (*Generator) FrequencyPenalty added in v0.6.0

func (b *Generator) FrequencyPenalty(freq float64) *Generator

func (*Generator) IncludeThinkingParts added in v0.11.9

func (b *Generator) IncludeThinkingParts(thinkingParts bool) *Generator

func (*Generator) MaxTokens

func (b *Generator) MaxTokens(maxTokens int) *Generator

func (*Generator) Model

func (b *Generator) Model(model Model) *Generator

func (*Generator) Output added in v0.6.0

func (b *Generator) Output(s *schema.JSON) *Generator

func (*Generator) PresencePenalty added in v0.6.0

func (b *Generator) PresencePenalty(prec float64) *Generator

func (*Generator) Prompt

func (b *Generator) Prompt(prompts ...prompt.Prompt) (*Response, error)

func (*Generator) SetConfig

func (b *Generator) SetConfig(config Request) *Generator

func (*Generator) SetToolConfig

func (b *Generator) SetToolConfig(tool tools.Tool) *Generator

func (*Generator) SetTools

func (b *Generator) SetTools(tool ...tools.Tool) *Generator

func (*Generator) StopAt

func (b *Generator) StopAt(stop ...string) *Generator

func (*Generator) Stream added in v0.11.7

func (b *Generator) Stream(prompts ...prompt.Prompt) (<-chan *StreamResponse, error)

func (*Generator) StrictOutput added in v0.8.0

func (b *Generator) StrictOutput(strict bool) *Generator

func (*Generator) System

func (b *Generator) System(prompt string) *Generator

func (*Generator) Temperature

func (b *Generator) Temperature(temperature float64) *Generator

func (*Generator) ThinkingBudget added in v0.11.9

func (b *Generator) ThinkingBudget(thinkingBudget int) *Generator

ThinkingBudget sets the thinking budget for the generator. For models which do not support tokens as thinking budget, the number of tokens is translated into enums "low", "medium", "high". Where "low" is <2.000, "medium" is 2.000-10.000, and "high" is 10.001+.

func (*Generator) Tools

func (b *Generator) Tools() []tools.Tool

func (*Generator) TopK added in v0.6.0

func (b *Generator) TopK(topK int) *Generator

func (*Generator) TopP

func (b *Generator) TopP(topP float64) *Generator

func (*Generator) WithContext added in v0.6.0

func (b *Generator) WithContext(ctx context.Context) *Generator

type Model

type Model struct {
	Provider string `json:"provider"`
	Name     string `json:"name"`

	Config map[string]any `json:"config,omitempty"`

	Description string `json:"description,omitempty"`

	InputContentTypes []string `json:"input_content_types,omitempty"`

	InputMaxToken  int `json:"input_max_token,omitempty"`
	OutputMaxToken int `json:"output_max_token,omitempty"`

	SupportTools            bool `json:"support_tools,omitempty"`
	SupportStructuredOutput bool `json:"support_structured_output,omitempty"`
}

func ToModel added in v0.11.1

func ToModel(fqn string) (Model, error)

func (Model) FQN

func (m Model) FQN() string

func (Model) String

func (m Model) String() string

type Option

type Option func(generator *Generator) *Generator

func WithContext added in v0.6.0

func WithContext(ctx context.Context) Option

func WithFrequencyPenalty added in v0.6.0

func WithFrequencyPenalty(freq float64) Option

func WithMaxTokens

func WithMaxTokens(maxTokens int) Option

func WithModel

func WithModel(model Model) Option

func WithOutput

func WithOutput(s *schema.JSON) Option

func WithPresencePenalty added in v0.6.0

func WithPresencePenalty(presence float64) Option

func WithRequest

func WithRequest(req Request) Option

func WithStopAt

func WithStopAt(stop ...string) Option

func WithStrictOutput added in v0.8.0

func WithStrictOutput(strict bool) Option

func WithSystem

func WithSystem(prompt string) Option

func WithTemperature

func WithTemperature(temperature float64) Option

func WithThinkingBudget added in v0.11.9

func WithThinkingBudget(thinkingBudget int) Option

func WithThinkingParts added in v0.11.9

func WithThinkingParts(thinkingParts bool) Option

func WithToolConfig

func WithToolConfig(tool tools.Tool) Option

func WithTools

func WithTools(tools ...tools.Tool) Option

func WithTopK added in v0.6.0

func WithTopK(topK int) Option

func WithTopP

func WithTopP(topP float64) Option

type Prompter

type Prompter interface {
	SetRequest(request Request)
	Prompt(prompts ...prompt.Prompt) (*Response, error)
	Stream(prompts ...prompt.Prompt) (<-chan *StreamResponse, error)
}

type Request

type Request struct {
	Context context.Context `json:"-"`

	Stream bool `json:"stream"`

	Model        Model  `json:"model"`
	SystemPrompt string `json:"system_prompt,omitempty"`

	OutputSchema *schema.JSON `json:"output_schema,omitempty"`
	StrictOutput bool         `json:"output_strict,omitempty"`

	Tools      []tools.Tool `json:"tools,omitempty"`
	ToolConfig *tools.Tool  `json:"tool,omitempty"`

	ThinkingBudget *int  `json:"thinking_budget,omitempty"`
	ThinkingParts  *bool `json:"thinking_parts,omitempty"`

	TopP             *float64 `json:"top_p,omitempty"`
	TopK             *int     `json:"top_k,omitempty"`
	Temperature      *float64 `json:"temperature,omitempty"`
	MaxTokens        *int     `json:"max_tokens,omitempty"`
	FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"`
	PresencePenalty  *float64 `json:"presence_penalty,omitempty"`
	StopSequences    []string `json:"stop_sequences,omitempty"`
}

type Response

type Response struct {
	Texts    []string     `json:"texts,omitempty"`
	Thinking []string     `json:"thinking,omitempty"` // Thinking parts, if any
	Tools    []tools.Call `json:"tools,omitempty"`

	Metadata models.Metadata `json:"metadata,omitempty"`
}

func (*Response) AsText

func (r *Response) AsText() (string, error)

func (*Response) AsTools

func (r *Response) AsTools() ([]tools.Call, error)

func (*Response) Eval

func (r *Response) Eval(ctx context.Context) (err error)

func (*Response) IsText

func (r *Response) IsText() bool

func (*Response) IsTools

func (r *Response) IsTools() bool

func (*Response) Unmarshal

func (r *Response) Unmarshal(ref any) error

type StreamResponse added in v0.11.7

type StreamResponse struct {
	Type     StreamingResponseType `json:"type"`
	Role     prompt.Role           `json:"role"`
	Index    int                   `json:"index"`
	Content  string                `json:"content"`
	ToolCall *tools.Call           `json:"tool_call,omitempty"` // Only for TYPE_DELTA

	Metadata *models.Metadata `json:"metadata,omitempty"`
}

func (StreamResponse) Error added in v0.11.7

func (r StreamResponse) Error() error

type StreamResponseError added in v0.11.7

type StreamResponseError string

func (StreamResponseError) Error added in v0.11.7

func (s StreamResponseError) Error() string

type StreamingResponseType added in v0.11.7

type StreamingResponseType string
const TYPE_DELTA StreamingResponseType = "delta"
const TYPE_EOF StreamingResponseType = "EOF"
const TYPE_ERROR StreamingResponseType = "ERROR"
const TYPE_METADATA StreamingResponseType = "metadata"
const TYPE_THINKING_DELTA StreamingResponseType = "thinking_delta"

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL