Documentation
¶
Index ¶
- Constants
- Variables
- type AutoToolChoice
- type ChatCompletion
- type ChatCompletionResponse
- type ChatCompletionResponseChoice
- type ChatCompletionTool
- type ChatCompletionUsage
- type ChatMessage
- type ChatMessageRole
- type ChatModelID
- type ChatSession
- func (session ChatSession) Clone() ChatSession
- func (session ChatSession) LastAssistantContent() string
- func (session ChatSession) LookupLastMessage() (ChatMessage, bool)
- func (session ChatSession) WithAssistantMessage(content string) ChatSession
- func (session ChatSession) WithFunction(fns ...Function) ChatSession
- func (session ChatSession) WithMessage(msgs ...ChatMessage) ChatSession
- func (session ChatSession) WithModel(model ChatModelID) ChatSession
- func (session ChatSession) WithSystemMessage(content string) ChatSession
- func (session ChatSession) WithUserMessage(content string) ChatSession
- type Client
- type FinishReason
- type Function
- type FunctionCall
- type FunctionExec
- type FunctionMapping
- type FunctionName
- type FunctionToolChoice
- type InstructModelID
- type JSONSchema
- type JSONSchemaItems
- type JSONSchemaProperty
- type NoneToolChoice
- type ResponseFormat
- type ToolCall
- type ToolCallID
- type ToolChoice
- type ToolChoiceID
- type ToolType
Constants ¶
const ( ErrContextLengthExceeded errorkit.Error = "ErrContextLengthExceeded" ErrRateLimitExceeded errorkit.Error = "ErrRateLimitExceeded" )
const ExecInterrupt errorkit.Error = "ExecInterrupt"
const FixFunctionHallucination = `
Only use the functions you have been provided with.
You must use JSON format for the argument to make a function call.
`
System
Variables ¶
var DefaultRetryStrategy retry.Strategy[retry.FailureCount] = retry.ExponentialBackoff{ MaxRetries: 16, BackoffDuration: time.Second, }
Functions ¶
This section is empty.
Types ¶
type AutoToolChoice ¶
type AutoToolChoice struct{}
AutoToolChoice means the model can pick between generating a message or calling a function.
func (AutoToolChoice) MarshalJSON ¶
func (AutoToolChoice) MarshalJSON() ([]byte, error)
func (AutoToolChoice) ToolChoiceID ¶
func (AutoToolChoice) ToolChoiceID() ToolChoiceID
type ChatCompletion ¶
type ChatCompletion struct { // Model specifies the ID of the model to use (e.g., "text-davinci-002"). Model ChatModelID `json:"model"` // Messages is an array of ChatMessage structs representing the conversation history. Messages []ChatMessage `json:"messages"` // MaxTokens specifies the maximum number of tokens for the message output. // Optional; if not provided, the API will use the model's maximum limit. MaxTokens *int `json:"max_tokens,omitempty"` // Temperature controls the randomness of the output, ranging from 0.0 to 1.0. // Optional; if not provided, the API will use a default value. Temperature *float64 `json:"temperature,omitempty"` // TopP controls the diversity of the output via nucleus sampling, ranging from 0.0 to 1.0. // Optional; if not provided, the API will use a default value. TopP *float64 `json:"top_p,omitempty"` // FrequencyPenalty alters the likelihood of tokens appearing based on their frequency, ranging from -2.0 to 2.0. // Optional; if not provided, the API will use a default value. FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"` // PresencePenalty alters the likelihood of tokens appearing based on their presence in the prompt, ranging from -2.0 to 2.0. // Optional; if not provided, the API will use a default value. PresencePenalty *float64 `json:"presence_penalty,omitempty"` // StopSequences is an array of strings that indicate the end of the generated content. // Optional; if not provided, the API will decide when to stop the output. StopSequences []string `json:"stop_sequences,omitempty"` // UserID is an optional identifier for the user, used for fine-tuned models. // Optional; if not provided, the API will not personalize the response. UserID string `json:"user,omitempty"` // SessionID is an optional identifier for the session, used for fine-tuned models. // Optional; if not provided, the API will not maintain context between API calls. SessionID string `json:"session_id,omitempty"` // Functions is an array of Function objects that describe the functions // available for the GPT model to call during the chat completion. // Each Function object should contain details like the function's name, // description, and parameters. The GPT model will use this information // to decide whether to call a function based on the user's query. // For example, you can define functions like send_email(to: string, body: string) // or get_current_weather(location: string, unit: 'celsius' | 'fahrenheit'). // Note: Defining functions will count against the model's token limit. Functions []Function `json:"-"` // FunctionCall is a string that specifies the behavior of function calling // during the chat completion. It can have the following values: // - "auto": The model decides whether to call a function and which function to call. // - "none": Forces the model to not call any function. // - { "name": "<function_name>" }: Forces the model to call a specific function by name. // This field allows you to control the model's decision-making process regarding // function calls, providing a way to either automate or manually control actions. // When left nil, it is interpreted as "auto" on OpenAI API side. FunctionCall *FunctionCall `json:"function_call,omitempty"` // Tools is the list of enabled tooling for the assistant. // There can be a maximum of 128 tools per assistant. // Tools can be of types code_interpreter, retrieval, or function. // example: [{ "type": "code_interpreter" }] Tools []ChatCompletionTool `json:"tools,omitempty"` // ToolChoice controls which (if any) function is called by the model. // "none" is the default when no functions are present (NoneToolChoice). // "auto" is the default if functions are present (AutoToolChoice). ToolChoice ToolChoice `json:"tool_choice,omitempty"` // ResponseFormat is an object specifying the format that the model must output. // // Setting to { "type": "json_object" } enables JSON mode (JSONResponseFormat), // which guarantees the message the model generates is valid JSON. ResponseFormat *ResponseFormat `json:"response_format,omitempty"` }
ChatCompletion represents the parameters for a chat completion request.
func (ChatCompletion) Clone ¶
func (cc ChatCompletion) Clone() ChatCompletion
func (ChatCompletion) MarshalJSON ¶
func (cc ChatCompletion) MarshalJSON() ([]byte, error)
type ChatCompletionResponse ¶
type ChatCompletionResponse struct { // ID is a unique identifier for the chat completion. // Example: "chatcmpl-abc123" ID string `json:"id"` // Object is the object type, always "chat.completion". // Example: "chat.completion" Object string `json:"object" enum:"chat.completion;"` // Created is the Unix timestamp (in seconds) of when the chat completion was created. // Example: 1677858242 Created int `json:"created"` // Model is the model used for the chat completion. // Example: "gpt-3.5-turbo-0613" Model ChatModelID `json:"model"` // Usage contains usage statistics for the completion request. // Example: {"prompt_tokens": 13, "completion_tokens": 7, "total_tokens": 20} Usage ChatCompletionUsage `json:"usage"` // Choices is a list of chat completion choices. Can be more than one if 'n' is greater than 1. // Example: [{"message": {"role": "assistant", "content": "This is a test!"}, "finish_reason": "stop", "index": 0}] Choices []ChatCompletionResponseChoice `json:"choices"` }
ChatCompletionResponse represents the response from a chat completion request.
type ChatCompletionResponseChoice ¶
type ChatCompletionResponseChoice struct { // Message contains the role and content of the message. // Example: {"role": "assistant", "content": "This is a test!"} Message ChatMessage `json:"message"` // FinishReason is the reason the API stopped generating further tokens. // Common values are "stop", "length", etc. // Example: "stop" FinishReason FinishReason `json:"finish_reason"` // Index is the index of the choice in the array. // Example: 0 Index int `json:"index"` }
type ChatCompletionTool ¶
type ChatCompletionUsage ¶
type ChatMessage ¶
type ChatMessage struct { // Role specifies the role of the message sender, usually "system", "user", or "assistant". Role ChatMessageRole `json:"role" enum:"system;user;assistant;"` // Content contains the actual text of the message. Content string `json:"content"` // ToolCallID is the id of the Tool call that this message is responding to // when ChatMessage is used as part of a API request. ToolCallID ToolCallID `json:"tool_call_id,omitempty"` // The tool calls generated by the model, such as function calls. ToolCalls []ToolCall `json:"tool_calls,omitempty"` }
ChatMessage represents a single message in the conversation history.
func MakeFunctionChatMessage ¶
func MakeFunctionChatMessage(tcID ToolCallID, contentDTO any) (ChatMessage, error)
type ChatMessageRole ¶
type ChatMessageRole string
const ( // SystemChatMessage is a prompt meant to instrument the Assistant. SystemChatMessage ChatMessageRole = "system" // UserChatMessage is a user prompt input. UserChatMessage ChatMessageRole = "user" // AssistantChatMessage is a reply type. // GPT for example uses the AssistantChatMessage to reply back to its caller. AssistantChatMessage ChatMessageRole = "assistant" // FunctionChatMessage is used to respond back // to a function call request by the AssistantChatMessage. // DEPRECATED: use ToolChatMessage instead. FunctionChatMessage ChatMessageRole = "function" // ToolChatMessage is used to respond back to a tool request, such as a function call. ToolChatMessage ChatMessageRole = "tool" )
func (ChatMessageRole) From ¶
func (cmr ChatMessageRole) From(content string) ChatMessage
type ChatModelID ¶
type ChatModelID string
ChatModelID represents the ID of the chat model to use.
const ( GPT4 ChatModelID = GPT4_Preview GPT4_Vision ChatModelID = "gpt-4-vision-preview" GPT4_Preview ChatModelID = "gpt-4-1106-preview" GPT4_Stable ChatModelID = "gpt-4" // GPT4_32k // DEPRECATED: use GPT4 GPT4_32k ChatModelID = "gpt-4-32k" // GPT-4 model with 32k tokens )
const ( GPT3 ChatModelID = "gpt-3.5-turbo" // GPT-3.5 Turbo model // GPT3_16k // DEPRECATED: use GPT3 directly GPT3_16k ChatModelID = "gpt-3.5-turbo-16k" // GPT-3.5 Turbo model with 16k tokens )
type ChatSession ¶
type ChatSession struct{ ChatCompletion }
func (ChatSession) Clone ¶
func (session ChatSession) Clone() ChatSession
func (ChatSession) LastAssistantContent ¶
func (session ChatSession) LastAssistantContent() string
func (ChatSession) LookupLastMessage ¶
func (session ChatSession) LookupLastMessage() (ChatMessage, bool)
func (ChatSession) WithAssistantMessage ¶
func (session ChatSession) WithAssistantMessage(content string) ChatSession
func (ChatSession) WithFunction ¶
func (session ChatSession) WithFunction(fns ...Function) ChatSession
func (ChatSession) WithMessage ¶
func (session ChatSession) WithMessage(msgs ...ChatMessage) ChatSession
func (ChatSession) WithModel ¶
func (session ChatSession) WithModel(model ChatModelID) ChatSession
func (ChatSession) WithSystemMessage ¶
func (session ChatSession) WithSystemMessage(content string) ChatSession
func (ChatSession) WithUserMessage ¶
func (session ChatSession) WithUserMessage(content string) ChatSession
type Client ¶
type Client struct { BaseURL string APIKey string HTTPClient *http.Client RetryStrategy retry.Strategy[retry.FailureCount] // contains filtered or unexported fields }
Client represents the OpenAI API client.
func (*Client) ChatCompletion ¶
func (c *Client) ChatCompletion(ctx context.Context, cc ChatCompletion) (ChatCompletionResponse, error)
func (*Client) ChatSession ¶
func (c *Client) ChatSession(ctx context.Context, session ChatSession) (ChatSession, error)
type FinishReason ¶
type FinishReason string
const ( FinishReasonStop FinishReason = "stop" FinishReasonLength FinishReason = "length" FinishReasonFunctionCall FinishReason = "function_call" FinishReasonToolCalls FinishReason = "tool_calls" FinishReasonContentFilter FinishReason = "content_filter" FinishReasonNull FinishReason = "null" )
type Function ¶
type Function struct { Name FunctionName `json:"name"` Description string `json:"description"` Parameters JSONSchema `json:"parameters"` // Exec is used to execute the Function when GPT request it. // If Exec is not supplied, automatic Function execution is disabled for this function. Exec FunctionExec `json:"-"` }
type FunctionCall ¶
type FunctionCall struct { Name FunctionName `json:"name,omitempty"` // Arguments is a JSON encoded call function with arguments in JSON format Arguments string `json:"arguments,omitempty"` }
FunctionCall is the request that the Assistant asks from us to complete.
type FunctionExec ¶
type FunctionMapping ¶
type FunctionMapping interface { GetParameters() JSONSchema Call(FunctionCall) (ChatMessage, error) }
type FunctionName ¶
type FunctionName string
type FunctionToolChoice ¶
type FunctionToolChoice struct { // Name of the function that needs to be executed Name FunctionName }
FunctionToolChoice Will tell GPT to use a specific function from the supplied tooling.
func (FunctionToolChoice) MarshalJSON ¶
func (tc FunctionToolChoice) MarshalJSON() ([]byte, error)
func (FunctionToolChoice) ToolChoiceID ¶
func (tc FunctionToolChoice) ToolChoiceID() ToolChoiceID
type InstructModelID ¶
type InstructModelID string
ChatModelID represents the ID of the chat model to use.
const Babbage InstructModelID = "babbage-002"
Babbage is a GPT base models, which is not optimized for instruction-following and are less capable, but they can be effective when fine-tuned for narrow tasks. They also cost-efficient to use for testing purposes. The Babbage model usage cost is $0.0004 / 1K tokens.
type JSONSchema ¶
type JSONSchema struct { Type string `json:"type"` Properties map[string]JSONSchemaProperty `json:"properties"` Items *JSONSchemaItems `json:"items,omitempty"` // Required mark which property is required. // It gets autopopulated from the properties flagged as "Required" Required []string `json:"required,omitempty"` }
type JSONSchemaItems ¶
type JSONSchemaProperty ¶
type JSONSchemaProperty struct { Type string `json:"type"` Description string `json:"description"` Enum []string `json:"enum,omitempty"` Items *JSONSchemaItems `json:"items,omitempty"` Required bool `json:"-"` }
type NoneToolChoice ¶
type NoneToolChoice struct{}
NoneToolChoice means the model will not call a function and instead generates a message.
func (NoneToolChoice) MarshalJSON ¶
func (NoneToolChoice) MarshalJSON() ([]byte, error)
func (NoneToolChoice) ToolChoiceID ¶
func (NoneToolChoice) ToolChoiceID() ToolChoiceID
type ResponseFormat ¶
type ResponseFormat struct {
Type string `json:"type"`
}
func JSONResponseFormat ¶
func JSONResponseFormat() *ResponseFormat
func TextResponseFormat ¶
func TextResponseFormat() *ResponseFormat
type ToolCall ¶
type ToolCall struct { ID ToolCallID `json:"id"` Type ToolType `json:"type"` FunctionCall *FunctionCall `json:"function,omitempty"` }
func (ToolCall) LookupFunctionCall ¶
func (tc ToolCall) LookupFunctionCall() (FunctionCall, bool)
type ToolCallID ¶
type ToolCallID string
type ToolChoice ¶
type ToolChoice interface { ToolChoiceID() ToolChoiceID json.Marshaler }
type ToolChoiceID ¶
type ToolChoiceID string