Documentation
¶
Overview ¶
openai implements an API client for OpenAI https://platform.openai.com/docs/api-reference
Index ¶
- func WithAudio(voice, format string) llm.Opt
- func WithAudioSpeed(v float64) llm.Opt
- func WithDimensions(v uint64) llm.Opt
- func WithDisableParallelToolCalls() llm.Opt
- func WithLogProbs() llm.Opt
- func WithLogitBias(token uint64, bias int64) llm.Opt
- func WithMetadata(k, v string) llm.Opt
- func WithModalities(v ...string) llm.Opt
- func WithQuality(v string) llm.Opt
- func WithReasoningEffort(v string) llm.Opt
- func WithServiceTier(v string) llm.Opt
- func WithSize(v string) llm.Opt
- func WithStore(v bool) llm.Opt
- func WithStreamOptions(fn func(llm.Completion), include_usage bool) llm.Opt
- func WithStyle(v string) llm.Opt
- func WithTopLogProbs(v uint64) llm.Opt
- type Audio
- type Client
- func (openai *Client) DeleteModel(ctx context.Context, model string) error
- func (openai *Client) GenerateEmbedding(ctx context.Context, model string, prompt []string, opts ...llm.Opt) (*embeddings, error)
- func (openai *Client) GetModel(ctx context.Context, model string) (*Model, error)
- func (openai *Client) ListModels(ctx context.Context) ([]Model, error)
- func (openai *Client) Model(ctx context.Context, name string) llm.Model
- func (openai *Client) Models(ctx context.Context) ([]llm.Model, error)
- func (*Client) Name() string
- type Completion
- type Completions
- type Content
- type Embedding
- type Embeddings
- type Format
- type Image
- type ImageResponse
- type Message
- type Metrics
- type Model
- type Prediction
- type Response
- type RoleContent
- type StreamOptions
- type Text
- type ToolCall
- type ToolCallArray
- type ToolCalls
- type ToolChoice
- type ToolResults
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
func WithAudioSpeed ¶ added in v0.0.10
func WithAudioSpeed(v float64) llm.Opt
Parameters for speech output
func WithDimensions ¶
func WithDimensions(v uint64) llm.Opt
Embeddings: The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models.
func WithDisableParallelToolCalls ¶
func WithDisableParallelToolCalls() llm.Opt
Disable parallel tool calling
func WithLogProbs ¶
func WithLogProbs() llm.Opt
Whether to return log probabilities of the output tokens or not.
func WithLogitBias ¶
Tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
func WithMetadata ¶
func WithMetadata(k, v string) llm.Opt
Key-value pair that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.
func WithModalities ¶
func WithModalities(v ...string) llm.Opt
Output types that you would like the model to generate for this request. Supported values are: "text", "audio"
func WithReasoningEffort ¶
func WithReasoningEffort(v string) llm.Opt
Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
func WithServiceTier ¶
func WithServiceTier(v string) llm.Opt
Specifies the latency tier to use for processing the request. Values can be auto or default
func WithStore ¶
func WithStore(v bool) llm.Opt
Whether or not to store the output of this chat completion request for use in model distillation or evals products.
func WithStreamOptions ¶
func WithStreamOptions(fn func(llm.Completion), include_usage bool) llm.Opt
Enable streaming and include usage information in the streaming response
func WithTopLogProbs ¶
func WithTopLogProbs(v uint64) llm.Opt
An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability.
Types ¶
type Audio ¶
type Client ¶
type Client struct { *client.Client *impl.ModelCache }
func (*Client) DeleteModel ¶
Delete a fine-tuned model. You must have the Owner role in your organization to delete a model.
func (*Client) GenerateEmbedding ¶
func (*Client) ListModels ¶
ListModels returns all the models
type Completion ¶
type Completion struct { Index uint64 `json:"index"` Message *Message `json:"message"` Delta *Message `json:"delta,omitempty"` // For streaming Reason string `json:"finish_reason,omitempty"` }
Completion Variation
func (Completion) String ¶
func (c Completion) String() string
type Completions ¶
type Completions []Completion
Completion choices
func (Completions) Attachment ¶ added in v0.0.10
func (c Completions) Attachment(index int) *llm.Attachment
Return attachment content for a specific completion
func (Completions) Choice ¶
func (c Completions) Choice(index int) llm.Completion
Return message for a specific completion
func (Completions) Text ¶
func (c Completions) Text(index int) string
Return the text content for a specific completion
type Content ¶
type Content struct { Type string `json:"type"` // text or content Content string `json:"content,omitempty"` // content content ;-) Text string `json:"text,omitempty"` // text content Audio *llm.Attachment `json:"audio,omitempty"` // audio content Image *Image `json:"image_url,omitempty"` // image content }
func NewContentString ¶
func NewImageData ¶
func NewImageData(image *llm.Attachment) *Content
func NewImageUrl ¶
func NewTextContext ¶
type Embedding ¶
type Embedding struct { Type string `json:"object"` Index uint64 `json:"index"` Vector []float64 `json:"embedding"` }
Embedding is a single vector
func (Embedding) MarshalJSON ¶
type Embeddings ¶
type Embeddings struct { Type string `json:"object"` Model string `json:"model"` Data []Embedding `json:"data"` Metrics }
Embeddings is the metadata for a generated embedding vector
type Format ¶
type Format struct { // Supported response format types are text, json_object or json_schema Type string `json:"type"` }
type Image ¶
func (*Image) Attachment ¶ added in v0.0.10
func (r *Image) Attachment(index int) *llm.Attachment
Return media content for a specific completion
func (*Image) Choice ¶ added in v0.0.10
func (r *Image) Choice(index int) llm.Completion
Return message for a specific completion
type ImageResponse ¶ added in v0.0.10
func (ImageResponse) Attachment ¶ added in v0.0.10
func (r ImageResponse) Attachment(index int) *llm.Attachment
Return media content for a specific completion
func (ImageResponse) Choice ¶ added in v0.0.10
func (r ImageResponse) Choice(index int) llm.Completion
Return message for a specific completion
func (ImageResponse) Num ¶ added in v0.0.10
func (r ImageResponse) Num() int
Return the number of completions
func (ImageResponse) Role ¶ added in v0.0.10
func (r ImageResponse) Role() string
Return the role of the completion
func (ImageResponse) Text ¶ added in v0.0.10
func (r ImageResponse) Text(index int) string
Return the text content for a specific completion
type Message ¶
type Message struct { RoleContent Media *llm.Attachment `json:"audio,omitempty"` Calls ToolCalls `json:"tool_calls,omitempty"` *ToolResults }
Message with text or object content
func (*Message) Attachment ¶ added in v0.0.10
func (message *Message) Attachment(index int) *llm.Attachment
Return the audio
func (*Message) Choice ¶
func (message *Message) Choice(index int) llm.Completion
Return the completion
type Metrics ¶
type Metrics struct { PromptTokens uint64 `json:"prompt_tokens,omitempty"` CompletionTokens uint64 `json:"completion_tokens,omitempty"` TotalTokens uint64 `json:"total_tokens,omitempty"` PromptTokenDetails struct { CachedTokens uint64 `json:"cached_tokens,omitempty"` AudioTokens uint64 `json:"audio_tokens,omitempty"` } `json:"prompt_tokens_details,omitempty"` CompletionTokenDetails struct { ReasoningTokens uint64 `json:"reasoning_tokens,omitempty"` AcceptedPredictionTokens uint64 `json:"accepted_prediction_tokens,omitempty"` RejectedPredictionTokens uint64 `json:"rejected_prediction_tokens,omitempty"` } `json:"completion_tokens_details,omitempty"` }
Metrics
type Response ¶
type Response struct { Id string `json:"id"` Type string `json:"object"` Created uint64 `json:"created"` Model string `json:"model"` SystemFingerprint string `json:"system_fingerprint"` ServiceTier string `json:"service_tier"` Completions `json:"choices"` *Metrics `json:"usage,omitempty"` }
Completion Response
type RoleContent ¶
type StreamOptions ¶
type StreamOptions struct {
IncludeUsage bool `json:"include_usage"`
}
func NewStreamOptions ¶
func NewStreamOptions(include_usage bool) *StreamOptions
type ToolCall ¶
type ToolCall struct { Id string `json:"id,omitempty"` // tool id Type string `json:"type,omitempty"` // tool type (function) Index uint64 `json:"index,omitempty"` // tool index Function struct { Name string `json:"name,omitempty"` // tool name Arguments string `json:"arguments,omitempty"` // tool arguments } `json:"function"` }
type ToolChoice ¶
type ToolChoice struct { Type string `json:"type"` Function struct { Name string `json:"name"` } `json:"function"` }
func NewToolChoice ¶
func NewToolChoice(function string) *ToolChoice
type ToolResults ¶
type ToolResults struct {
Id string `json:"tool_call_id,omitempty"`
}