chat

package
v1.2.1 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jan 17, 2026 License: MIT Imports: 5 Imported by: 0

Documentation

Overview

Package chat provides a wrapper for the OpenAI Chat API.

Index

Constants

This section is empty.

Variables

This section is empty.

Functions

This section is empty.

Types

type Image

type Image struct {
	URL    string `json:"image_url"` // URL or base64 in format "data:image/jpeg;base64,{base64_image}"
	Detail string `json:"detail"`    // "low"/"high", default "low"
}

type Message

type Message struct {
	Role    string `json:"role"`    // "system"/"developer"/"user"/"assistant"/"function"/"tool"
	Content string `json:"content"` // normally a string, but when images are included, it's an array of objects

	// The refusal message generated by the model.
	Refusal string `json:"refusal,omitempty"`

	// Images are included in Content if present.
	Images []Image `json:"-"`

	// Name is required for "function" role and then must contain function name, otherwise it's optional.
	// [a-zA-Z0-9_]{1,64}
	Name string `json:"name,omitempty"`

	// FunctionCall can only be present if Role is "assistant".
	// Deprecated: use ToolCalls instead.
	FunctionCall openai.FunctionCallData `json:"function_call,omitempty"`

	// ToolCallID is required for "tool" role and then must contain ID found in ToolCall.
	ToolCallID string `json:"tool_call_id,omitempty"`

	// ToolCalls can only be present if Role is "assistant".
	ToolCalls []openai.ToolCallData `json:"tool_calls,omitempty"`
}

Message represents a message in API request or response.

func (Message) MarshalJSON

func (data Message) MarshalJSON() ([]byte, error)

MarshalJSON implements json.Marshaler interface. Encodes chat message while ensuring that FunctionCall is not present if empty. If Images are present, they are encoded into Content array.

type Request

type Request struct {
	Model    string    `json:"model"`    // model name, such as "gpt-3.5-turbo"
	Messages []Message `json:"messages"` // previous messages, including "system" prompt, user input and whole history

	// What sampling temperature to use, between 0 and 2.
	// Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
	// We generally recommend altering this or top_p but not both.
	Temperature float64 `json:"temperature,omitempty"` // default 1

	// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
	// So 0.1 means only the tokens comprising the top 10% probability mass are considered.
	// We generally recommend altering this or temperature but not both.
	TopP float64 `json:"top_p,omitempty"` // default 1

	// How many chat completion choices to generate for each input message.
	N int `json:"n,omitempty"`

	// An object specifying the format that the model must output.
	// Is encoded as {"type": "json_object"}, or {"type": "text"},
	// or {"type": "json_schema", "json_schema": ...}.
	ResponseFormat ResponseFormatStr `json:"response_format,omitempty"` // default "text"

	// If set, partial message deltas will be sent, like in ChatGPT.
	// Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message.
	Stream bool `json:"stream,omitempty"` // default false

	// Up to 4 sequences where the API will stop generating further tokens.
	Stop []string `json:"stop,omitempty"` // default []

	// Deprecated: use MaxCompletionTokens instead.
	// The maximum number of tokens allowed for the generated answer.
	// By default, the number of tokens the model can return will be (4096 - prompt tokens).
	MaxTokens int `json:"max_tokens,omitempty"` // default 4096 - prompt tokens

	// The maximum number of tokens allowed for the generated answer.
	MaxCompletionTokens int `json:"max_completion_tokens,omitempty"`

	// Number between -2.0 and 2.0.
	// Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
	PresencePenalty float64 `json:"presence_penalty,omitempty"` // default 0

	// Number between -2.0 and 2.0.
	// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
	FrequencyPenalty float64 `json:"frequency_penalty,omitempty"` // default 0

	// Modify the likelihood of specified tokens appearing in the completion.
	// Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100.
	// Mathematically, the bias is added to the logits generated by the model prior to sampling.
	// The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;
	// values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
	//
	// Example:
	//  {"20185":-100,"9552":-100} // "20185" is "AI" and "9552" is " AI", use https://platform.openai.com/tokenizer
	LogitBias map[string]int `json:"logit_bias,omitempty"` // default {}

	// Function calls that AI can request to be executed. Not all models support this.
	// Default is empty slice.
	// Here only names of functions are specified and real payload will be added automatically based on names.
	// API field "functions" is deprecated and replaced with "tools" but we use this one for internal list of functions.
	Functions []string `json:"-"` // default []

	// "none", "auto", or "function_name".
	// "none" prohibits function calls, "auto" allows them at AI's discretion and "function_name" forces the use of one specified function.
	// If function name is given, it will be encoded in a tool format like {"type": "function", "function": {"name": "function_name"}}.
	ToolChoice tools.ToolChoiceOption `json:"tool_choice,omitempty"` // default "auto"

	// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
	User string `json:"user,omitempty"` // default ""

	// By default (false), function calls will be executed automatically and request will be repeated with the results.
	// If set to true, function calls will be returned in the response as encoded JSON and must be executed manually.
	ReturnFunctionCalls bool `json:"-"` // default false
}

Request is the request body for the Chat API.

type ResponseFormatStr

type ResponseFormatStr string

ResponseFormatStr represents a format that the model must output. Should be one of:

  • "text" (default, normal text)
  • "json_object" (deprecated, output is valid JSON but no specific schema)
  • JSON schema as a string (output will match schema which must follow supported rule subset)

Is encoded as {"type": "json_object"}, or {"type": "text"}, or {"type": "json_schema", "json_schema": ...}.

func (ResponseFormatStr) MarshalJSON

func (rfs ResponseFormatStr) MarshalJSON() ([]byte, error)

type Service

type Service interface {
	// Send sends a request to the Chat API.
	Send(req Request) (string, error)

	// NewRequest creates a new empty request.
	NewRequest() *Request

	// NewMessage creates a new empty message.
	NewMessage() *Message
}

Service defines methods to operate on chat.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL