Documentation
¶
Index ¶
- Constants
- Variables
- func String(s string) *string
- type APIError
- type Annotation
- type AnnotationType
- type CacheControl
- type ChatCompletionChoice
- type ChatCompletionMessage
- func AssistantMessage(content string) ChatCompletionMessage
- func SystemMessage(content string) ChatCompletionMessage
- func ToolMessage(callID string, content string) ChatCompletionMessage
- func UserMessage(content string) ChatCompletionMessage
- func UserMessageWithImage(text, imageURL string) ChatCompletionMessage
- func UserMessageWithPDF(text, filename, fileData string) ChatCompletionMessage
- func UserMessageWithPDFFromFile(text, filePath string) (ChatCompletionMessage, error)
- type ChatCompletionPlugin
- type ChatCompletionReasoning
- type ChatCompletionRequest
- type ChatCompletionResponse
- type ChatCompletionResponseFormat
- type ChatCompletionResponseFormatJSONSchema
- type ChatCompletionResponseFormatType
- type ChatCompletionStream
- type ChatCompletionStreamChoice
- type ChatCompletionStreamChoiceDelta
- type ChatCompletionStreamChoiceLogprobs
- type ChatCompletionStreamResponse
- type ChatCompletionTokenLogprob
- type ChatCompletionTokenLogprobTopLogprob
- type ChatMessageImageURL
- type ChatMessagePart
- type ChatMessagePartType
- type ChatProvider
- type Client
- type ClientConfig
- type CompletionTokenDetails
- type Content
- type ContentFilterResults
- type CostDetails
- type DataCollection
- type ErrorResponse
- type FileContent
- type FinishReason
- type FunctionCall
- type FunctionDefinition
- type HTTPDoer
- type HTTPRequestBuilder
- type Hate
- type ImageURLDetail
- type IncludeUsage
- type JSONMarshaller
- type JailBreak
- type LogProb
- type LogProbs
- type Marshaller
- type Metadata
- type Option
- type PDFEngine
- type PDFPlugin
- type PluginID
- type Profanity
- type PromptAnnotation
- type PromptFilterResult
- type PromptTokenDetails
- type ProviderSorting
- type RequestBuilder
- type RequestError
- type SearchContextSize
- type SelfHarm
- type Sexual
- type StreamOptions
- type Tool
- type ToolCall
- type ToolType
- type TopLogProbs
- type URLCitation
- type Usage
- type Violence
- type WebSearchOptions
Constants ¶
const ( GPT4o = "openai/chatgpt-4o-latest" DeepseekV3 = "deepseek/deepseek-chat" DeepseekR1 = "deepseek/deepseek-r1" DeepseekR1DistillLlama = "deepseek/deepseek-r1-distill-llama-70b" LiquidLFM7B = "liquid/lfm-7b" Phi3Mini = "microsoft/phi-3-mini-128k-instruct:free" GeminiFlashExp = "google/gemini-2.0-flash-exp:free" GeminiProExp = "google/gemini-pro-1.5-exp" GeminiFlash8B = "google/gemini-flash-1.5-8b" GPT4oMini = "openai/gpt-4o-mini" )
const ( ChatMessageRoleSystem = "system" ChatMessageRoleUser = "user" ChatMessageRoleAssistant = "assistant" ChatMessageRoleFunction = "function" ChatMessageRoleTool = "tool" )
Chat message role defined by the Openrouter API.
Variables ¶
var ( ErrChatCompletionInvalidModel = errors.New("this model is not supported with this method, please use CreateCompletion client method instead") //nolint:lll ErrChatCompletionStreamNotSupported = errors.New("streaming is not supported with this method, please use CreateChatCompletionStream") //nolint:lll ErrContentFieldsMisused = errors.New("can't use both Content and MultiContent properties simultaneously") )
Functions ¶
Types ¶
type APIError ¶
type APIError struct { Code any `json:"code,omitempty"` Message string `json:"message"` Metadata *Metadata `json:"metadata,omitempty"` // Internal fields HTTPStatusCode int `json:"-"` }
APIError provides error information returned by the Openrouter API.
func (*APIError) UnmarshalJSON ¶
type Annotation ¶ added in v0.1.3
type Annotation struct { Type AnnotationType `json:"type"` URLCitation URLCitation `json:"url_citation"` }
type AnnotationType ¶ added in v0.1.3
type AnnotationType string
const (
AnnotationTypeUrlCitation AnnotationType = "url_citation"
)
type CacheControl ¶
type ChatCompletionChoice ¶
type ChatCompletionChoice struct { Index int `json:"index"` Message ChatCompletionMessage `json:"message"` // FinishReason // stop: API returned complete message, // or a message terminated by one of the stop sequences provided via the stop parameter // length: Incomplete model output due to max_tokens parameter or token limit // function_call: The model decided to call a function // content_filter: Omitted content due to a flag from our content filters // null: API response still in progress or incomplete FinishReason FinishReason `json:"finish_reason"` LogProbs *LogProbs `json:"logprobs,omitempty"` }
type ChatCompletionMessage ¶
type ChatCompletionMessage struct { Role string `json:"role"` Content Content `json:"content,omitzero"` Refusal string `json:"refusal,omitempty"` // This property is used for the "reasoning" feature supported by deepseek-reasoner // - https://api-docs.deepseek.com/api/create-chat-completion#responses ReasoningContent *string `json:"reasoning_content,omitempty"` // Reasoning Used by all the other models Reasoning *string `json:"reasoning,omitempty"` FunctionCall *FunctionCall `json:"function_call,omitempty"` // For Role=assistant prompts this may be set to the tool calls generated by the model, such as function calls. ToolCalls []ToolCall `json:"tool_calls,omitempty"` // For Role=tool prompts this should be set to the ID given in the assistant's prior request to call a tool. ToolCallID string `json:"tool_call_id,omitempty"` // Web Search Annotations Annotations []Annotation `json:"annotations,omitempty"` }
func AssistantMessage ¶ added in v0.1.6
func AssistantMessage(content string) ChatCompletionMessage
AssistantMessage creates a new assistant message with the given text content.
func SystemMessage ¶ added in v0.1.6
func SystemMessage(content string) ChatCompletionMessage
SystemMessage creates a new system message with the given text content.
func ToolMessage ¶ added in v0.1.6
func ToolMessage(callID string, content string) ChatCompletionMessage
ToolMessage creates a new tool (response) message with a call ID and content.
func UserMessage ¶ added in v0.1.6
func UserMessage(content string) ChatCompletionMessage
UserMessage creates a new user message with the given text content.
func UserMessageWithImage ¶ added in v0.1.7
func UserMessageWithImage(text, imageURL string) ChatCompletionMessage
UserMessageWithImage creates a new user message with text and image URL.
func UserMessageWithPDF ¶ added in v0.1.7
func UserMessageWithPDF(text, filename, fileData string) ChatCompletionMessage
UserMessageWithPDF creates a new user message with text and PDF file content.
func UserMessageWithPDFFromFile ¶ added in v0.1.7
func UserMessageWithPDFFromFile(text, filePath string) (ChatCompletionMessage, error)
UserMessageWithPDFFromFile creates a user message with text and PDF content from a file. It reads the PDF file and creates a message with the embedded PDF data.
type ChatCompletionPlugin ¶ added in v0.1.6
func CreatePDFPlugin ¶ added in v0.1.7
func CreatePDFPlugin(engine PDFEngine) ChatCompletionPlugin
CreatePDFPlugin creates a completion plugin to process PDFs using the specified engine. The engine can be: "mistral-ocr" (for scanned documents/PDFs with images), "pdf-text" (for well-structured PDFs - free), or "native" (only for models that support file input).
type ChatCompletionReasoning ¶
type ChatCompletionReasoning struct { // Effort The prompt that was used to generate the reasoning. [high, medium, low] Effort *string `json:"prompt,omitempty"` // MaxTokens cannot be simultaneously used with effort. MaxTokens *int `json:"max_tokens,omitempty"` // Exclude defaults to false. Exclude *bool `json:"exclude,omitempty"` }
type ChatCompletionRequest ¶
type ChatCompletionRequest struct { Model string `json:"model,omitempty"` // Optional model fallbacks: https://openrouter.ai/docs/features/model-routing#the-models-parameter Models []string `json:"models,omitempty"` Provider *ChatProvider `json:"provider,omitempty"` Messages []ChatCompletionMessage `json:"messages"` Reasoning *ChatCompletionReasoning `json:"reasoning,omitempty"` Plugins []ChatCompletionPlugin `json:"plugins,omitempty"` // MaxTokens The maximum number of tokens that can be generated in the chat completion. // This value can be used to control costs for text generated via API. // This value is now deprecated in favor of max_completion_tokens, and is not compatible with o1 series models. // refs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-max_tokens MaxTokens int `json:"max_tokens,omitempty"` // MaxCompletionTokens An upper bound for the number of tokens that can be generated for a completion, // including visible output tokens and reasoning tokens https://platform.openai.com/docs/guides/reasoning MaxCompletionTokens int `json:"max_completion_tokens,omitempty"` Temperature float32 `json:"temperature,omitempty"` TopP float32 `json:"top_p,omitempty"` TopK int `json:"top_k,omitempty"` TopA float32 `json:"top_a,omitempty"` N int `json:"n,omitempty"` Stream bool `json:"stream,omitempty"` Stop []string `json:"stop,omitempty"` PresencePenalty float32 `json:"presence_penalty,omitempty"` RepetitionPenalty float32 `json:"repetition_penalty,omitempty"` ResponseFormat *ChatCompletionResponseFormat `json:"response_format,omitempty"` Seed *int `json:"seed,omitempty"` MinP float32 `json:"min_p,omitempty"` FrequencyPenalty float32 `json:"frequency_penalty,omitempty"` // LogitBias is must be a token id string (specified by their token ID in the tokenizer), not a word string. // incorrect: `"logit_bias":{"You": 6}`, correct: `"logit_bias":{"1639": 6}` // refs: https://platform.openai.com/docs/api-reference/chat/create#chat/create-logit_bias LogitBias map[string]int `json:"logit_bias,omitempty"` // LogProbs indicates whether to return log probabilities of the output tokens or not. // If true, returns the log probabilities of each output token returned in the content of message. // This option is currently not available on the gpt-4-vision-preview model. LogProbs bool `json:"logprobs,omitempty"` // TopLogProbs is an integer between 0 and 5 specifying the number of most likely tokens to return at each // token position, each with an associated log probability. // logprobs must be set to true if this parameter is used. TopLogProbs int `json:"top_logprobs,omitempty"` User string `json:"user,omitempty"` // Deprecated: use Tools instead. Functions []FunctionDefinition `json:"functions,omitempty"` // Deprecated: use ToolChoice instead. FunctionCall any `json:"function_call,omitempty"` Tools []Tool `json:"tools,omitempty"` // This can be either a string or an ToolChoice object. ToolChoice any `json:"tool_choice,omitempty"` // Options for streaming response. Only set this when you set stream: true. StreamOptions *StreamOptions `json:"stream_options,omitempty"` // Disable the default behavior of parallel tool calls by setting it: false. ParallelToolCalls any `json:"parallel_tool_calls,omitempty"` // Store can be set to true to store the output of this completion request for use in distillations and evals. // https://platform.openai.com/docs/api-reference/chat/create#chat-create-store Store bool `json:"store,omitempty"` // Metadata to store with the completion. Metadata map[string]string `json:"metadata,omitempty"` // Apply message transforms // https://openrouter.ai/docs/features/message-transforms Transforms []string `json:"transforms,omitempty"` // Optional web search options // https://openrouter.ai/docs/features/web-search#specifying-search-context-size WebSearchOptions WebSearchOptions `json:"web_search_options,omitempty"` Usage *IncludeUsage `json:"usage,omitempty"` }
type ChatCompletionResponse ¶
type ChatCompletionResponse struct { ID string `json:"id"` Object string `json:"object"` Created int64 `json:"created"` Model string `json:"model"` Choices []ChatCompletionChoice `json:"choices"` Citations []string `json:"citations"` Usage *Usage `json:"usage,omitempty"` SystemFingerprint string `json:"system_fingerprint"` }
ChatCompletionResponse represents a response structure for chat completion API.
type ChatCompletionResponseFormat ¶
type ChatCompletionResponseFormat struct { Type ChatCompletionResponseFormatType `json:"type,omitempty"` JSONSchema *ChatCompletionResponseFormatJSONSchema `json:"json_schema,omitempty"` }
type ChatCompletionResponseFormatType ¶
type ChatCompletionResponseFormatType string
const ( ChatCompletionResponseFormatTypeJSONObject ChatCompletionResponseFormatType = "json_object" ChatCompletionResponseFormatTypeJSONSchema ChatCompletionResponseFormatType = "json_schema" ChatCompletionResponseFormatTypeText ChatCompletionResponseFormatType = "text" )
type ChatCompletionStream ¶
type ChatCompletionStream struct {
// contains filtered or unexported fields
}
func (*ChatCompletionStream) Close ¶
func (s *ChatCompletionStream) Close()
Close terminates the stream and cleans up resources.
func (*ChatCompletionStream) Recv ¶
func (s *ChatCompletionStream) Recv() (ChatCompletionStreamResponse, error)
Recv reads the next chunk from the stream.
type ChatCompletionStreamChoice ¶
type ChatCompletionStreamChoice struct { Index int `json:"index"` Delta ChatCompletionStreamChoiceDelta `json:"delta"` Logprobs *ChatCompletionStreamChoiceLogprobs `json:"logprobs,omitempty"` FinishReason FinishReason `json:"finish_reason"` ContentFilterResults *ContentFilterResults `json:"content_filter_results,omitempty"` }
type ChatCompletionStreamChoiceDelta ¶
type ChatCompletionStreamChoiceDelta struct { Content string `json:"content,omitempty"` Role string `json:"role,omitempty"` FunctionCall *FunctionCall `json:"function_call,omitempty"` ToolCalls []ToolCall `json:"tool_calls,omitempty"` Refusal string `json:"refusal,omitempty"` Reasoning *string `json:"reasoning,omitempty"` // This property is used for the "reasoning" feature supported by deepseek-reasoner // which is not in the official documentation. // the doc from deepseek: // - https://api-docs.deepseek.com/api/create-chat-completion#responses ReasoningContent string `json:"reasoning_content,omitempty"` }
type ChatCompletionStreamChoiceLogprobs ¶
type ChatCompletionStreamChoiceLogprobs struct { Content []ChatCompletionTokenLogprob `json:"content,omitempty"` Refusal []ChatCompletionTokenLogprob `json:"refusal,omitempty"` }
type ChatCompletionStreamResponse ¶
type ChatCompletionStreamResponse struct { ID string `json:"id"` Object string `json:"object"` Created int64 `json:"created"` Model string `json:"model"` Choices []ChatCompletionStreamChoice `json:"choices"` SystemFingerprint string `json:"system_fingerprint"` PromptAnnotations []PromptAnnotation `json:"prompt_annotations,omitempty"` PromptFilterResults []PromptFilterResult `json:"prompt_filter_results,omitempty"` // An optional field that will only be present when you set stream_options: {"include_usage": true} in your request. // When present, it contains a null value except for the last chunk which contains the token usage statistics // for the entire request. Usage *Usage `json:"usage,omitempty"` }
type ChatCompletionTokenLogprob ¶
type ChatCompletionTokenLogprob struct { Token string `json:"token"` Bytes []int64 `json:"bytes,omitempty"` Logprob float64 `json:"logprob,omitempty"` TopLogprobs []ChatCompletionTokenLogprobTopLogprob `json:"top_logprobs"` }
type ChatMessageImageURL ¶
type ChatMessageImageURL struct { URL string `json:"url,omitempty"` Detail ImageURLDetail `json:"detail,omitempty"` }
type ChatMessagePart ¶
type ChatMessagePart struct { Type ChatMessagePartType `json:"type,omitempty"` Text string `json:"text,omitempty"` // Prompt caching // https://openrouter.ai/docs/features/prompt-caching CacheControl *CacheControl `json:"cache_control,omitempty"` ImageURL *ChatMessageImageURL `json:"image_url,omitempty"` File *FileContent `json:"file,omitempty"` }
type ChatMessagePartType ¶
type ChatMessagePartType string
const ( ChatMessagePartTypeText ChatMessagePartType = "text" ChatMessagePartTypeImageURL ChatMessagePartType = "image_url" ChatMessagePartTypeFile ChatMessagePartType = "file" )
type ChatProvider ¶
type ChatProvider struct { // The order of the providers in the list determines the order in which they are called. Order []string `json:"order"` // Allow fallbacks to other providers if the primary provider fails. AllowFallbacks bool `json:"allow_fallbacks"` // Only use providers that support all parameters in your request. RequireParameters bool `json:"require_parameters"` // Control whether to use providers that may store data. DataCollection DataCollection `json:"data_collection"` // List of provider slugs to allow for this request. Only []string `json:"only"` // List of provider slugs to skip for this request. Ignore []string `json:"ignore"` // List of quantization levels to filter by (e.g. ["int4", "int8"]). Quantizations []string `json:"quantizations"` // Sort providers by price or throughput. (e.g. "price" or "throughput"). Sort ProviderSorting `json:"sort"` }
Provider Routing: https://openrouter.ai/docs/features/provider-routing
type Client ¶
type Client struct {
// contains filtered or unexported fields
}
func NewClientWithConfig ¶
func NewClientWithConfig(config ClientConfig) *Client
func (*Client) CreateChatCompletion ¶
func (c *Client) CreateChatCompletion( ctx context.Context, request ChatCompletionRequest, ) (response ChatCompletionResponse, err error)
CreateChatCompletion — API call to Create a completion for the chat message.
func (*Client) CreateChatCompletionStream ¶
func (c *Client) CreateChatCompletionStream( ctx context.Context, request ChatCompletionRequest, ) (*ChatCompletionStream, error)
CreateChatCompletionStream — API call to Create a completion for the chat message with streaming.
type ClientConfig ¶
type ClientConfig struct { BaseURL string OrgID string AssistantVersion string HTTPClient HTTPDoer HttpReferer string XTitle string EmptyMessagesLimit uint // contains filtered or unexported fields }
ClientConfig is a configuration for the openrouter client.
func DefaultConfig ¶
func DefaultConfig(authToken string) *ClientConfig
type CompletionTokenDetails ¶ added in v0.1.8
type CompletionTokenDetails struct {
ReasoningTokens int `json:"reasoning_tokens"`
}
type Content ¶
type Content struct { Text string Multi []ChatMessagePart }
Content handles both string and multi-part content.
func (Content) MarshalJSON ¶
MarshalJSON serializes ContentType as a string or array.
func (*Content) UnmarshalJSON ¶
UnmarshalJSON deserializes ContentType from a string or array.
type ContentFilterResults ¶
type ContentFilterResults struct { Hate Hate `json:"hate,omitempty"` SelfHarm SelfHarm `json:"self_harm,omitempty"` Sexual Sexual `json:"sexual,omitempty"` Violence Violence `json:"violence,omitempty"` JailBreak JailBreak `json:"jailbreak,omitempty"` Profanity Profanity `json:"profanity,omitempty"` }
type CostDetails ¶ added in v0.1.8
type CostDetails struct {
UpstreamInferenceCost float64 `json:"upstream_inference_cost"`
}
type DataCollection ¶ added in v0.1.6
type DataCollection string
const ( DataCollectionAllow DataCollection = "allow" DataCollectionDeny DataCollection = "deny" )
type ErrorResponse ¶
type ErrorResponse struct {
Error *APIError `json:"error,omitempty"`
}
type FileContent ¶ added in v0.1.7
FileContent represents file content for PDF processing
type FinishReason ¶
type FinishReason string
const ( FinishReasonStop FinishReason = "stop" FinishReasonLength FinishReason = "length" FinishReasonFunctionCall FinishReason = "function_call" FinishReasonToolCalls FinishReason = "tool_calls" FinishReasonContentFilter FinishReason = "content_filter" FinishReasonNull FinishReason = "null" )
func (FinishReason) MarshalJSON ¶
func (r FinishReason) MarshalJSON() ([]byte, error)
type FunctionCall ¶
type FunctionDefinition ¶
type FunctionDefinition struct { Name string `json:"name"` Description string `json:"description,omitempty"` Strict bool `json:"strict,omitempty"` // Parameters is an object describing the function. // You can pass json.RawMessage to describe the schema, // or you can pass in a struct which serializes to the proper JSON schema. // The jsonschema package is provided for convenience, but you should // consider another specialized library if you require more complex schemas. Parameters any `json:"parameters"` }
type HTTPRequestBuilder ¶
type HTTPRequestBuilder struct {
// contains filtered or unexported fields
}
func NewRequestBuilder ¶
func NewRequestBuilder() *HTTPRequestBuilder
type ImageURLDetail ¶
type ImageURLDetail string
const ( ImageURLDetailHigh ImageURLDetail = "high" ImageURLDetailLow ImageURLDetail = "low" ImageURLDetailAuto ImageURLDetail = "auto" )
type IncludeUsage ¶
type IncludeUsage struct {
Include bool `json:"include"`
}
type JSONMarshaller ¶
type JSONMarshaller struct{}
type LogProb ¶
type LogProb struct { Token string `json:"token"` LogProb float64 `json:"logprob"` Bytes []byte `json:"bytes,omitempty"` // Omitting the field if it is null // TopLogProbs is a list of the most likely tokens and their log probability, at this token position. // In rare cases, there may be fewer than the number of requested top_logprobs returned. TopLogProbs []TopLogProbs `json:"top_logprobs"` }
LogProb represents the probability information for a token.
type LogProbs ¶
type LogProbs struct { // Content is a list of message content tokens with log probability information. Content []LogProb `json:"content"` }
LogProbs is the top-level structure containing the log probability information.
type Marshaller ¶
type PDFEngine ¶ added in v0.1.6
type PDFEngine string
const ( // Best for scanned documents or PDFs with images ($2 per 1,000 pages). PDFEngineMistralOCR PDFEngine = "mistral-ocr" // Best for well-structured PDFs with clear text content (Free). PDFEnginePDFText PDFEngine = "pdf-text" // Only available for models that support file input natively (charged as input tokens). PDFEngineNative PDFEngine = "native" )
type PluginID ¶ added in v0.1.6
type PluginID string
const ( // Processing PDFs: https://openrouter.ai/docs/features/images-and-pdfs#processing-pdfs PluginIDFileParser PluginID = "file-parser" // Web search plugin: https://openrouter.ai/docs/features/web-search PluginIDWeb PluginID = "web" )
type PromptAnnotation ¶
type PromptAnnotation struct { PromptIndex int `json:"prompt_index,omitempty"` ContentFilterResults ContentFilterResults `json:"content_filter_results,omitempty"` }
type PromptFilterResult ¶
type PromptFilterResult struct { Index int `json:"index"` ContentFilterResults ContentFilterResults `json:"content_filter_results,omitempty"` }
type PromptTokenDetails ¶ added in v0.1.8
type PromptTokenDetails struct {
CachedTokens int `json:"cached_tokens"`
}
type ProviderSorting ¶ added in v0.1.6
type ProviderSorting string
const ( ProviderSortingPrice ProviderSorting = "price" ProviderSortingThroughput ProviderSorting = "throughput" ProviderSortingLatency ProviderSorting = "latency" )
type RequestBuilder ¶
type RequestError ¶
RequestError provides information about generic request errors.
func (*RequestError) Error ¶
func (e *RequestError) Error() string
func (*RequestError) Unwrap ¶
func (e *RequestError) Unwrap() error
type SearchContextSize ¶ added in v0.1.3
type SearchContextSize string
const ( SearchContextSizeLow SearchContextSize = "low" SearchContextSizeMedium SearchContextSize = "medium" SearchContextSizeHigh SearchContextSize = "high" )
type StreamOptions ¶
type StreamOptions struct { // If set, an additional chunk will be streamed before the data: [DONE] message. // The usage field on this chunk shows the token usage statistics for the entire request, // and the choices field will always be an empty array. // All other chunks will also include a usage field, but with a null value. IncludeUsage bool `json:"include_usage,omitempty"` }
type Tool ¶
type Tool struct { Type ToolType `json:"type"` Function *FunctionDefinition `json:"function,omitempty"` }
type ToolCall ¶
type ToolCall struct { // Index is not nil only in chat completion chunk object Index *int `json:"index,omitempty"` ID string `json:"id,omitempty"` Type ToolType `json:"type"` Function FunctionCall `json:"function"` }
type TopLogProbs ¶
type URLCitation ¶ added in v0.1.3
type Usage ¶
type Usage struct { PromptTokens int `json:"prompt_tokens"` CompletionTokens int `json:"completion_tokens"` CompletionTokenDetails CompletionTokenDetails `json:"completion_token_details"` TotalTokens int `json:"total_tokens"` Cost float64 `json:"cost"` CostDetails CostDetails `json:"cost_details"` PromptTokenDetails PromptTokenDetails `json:"prompt_token_details"` }
Usage Represents the total token usage per request to OpenAI.
type WebSearchOptions ¶ added in v0.1.3
type WebSearchOptions struct {
SearchContextSize SearchContextSize `json:"search_context_size"`
}
Source Files
¶
Directories
¶
Path | Synopsis |
---|---|
examples
|
|
Package jsonschema provides very simple functionality for representing a JSON schema as a (nested) struct.
|
Package jsonschema provides very simple functionality for representing a JSON schema as a (nested) struct. |