Documentation
¶
Index ¶
- Constants
- Variables
- type APIError
- type ChatCompletionChoice
- type ChatCompletionMessage
- type ChatCompletionRequest
- type ChatCompletionResponse
- type ChatCompletionResponseFormat
- type ChatCompletionResponseFormatJSONSchema
- type ChatCompletionResponseFormatType
- type ChatMessageImageURL
- type ChatMessagePart
- type ChatMessagePartType
- type ChatProvider
- type Client
- type ClientConfig
- type Content
- type ErrorResponse
- type FinishReason
- type FunctionCall
- type FunctionDefinition
- type HTTPDoer
- type HTTPRequestBuilder
- type ImageURLDetail
- type JSONMarshaller
- type LogProb
- type LogProbs
- type Marshaller
- type Metadata
- type Option
- type RequestBuilder
- type RequestError
- type StreamOptions
- type Tool
- type ToolCall
- type ToolType
- type TopLogProbs
- type Usage
Constants ¶
const ( GPT4o = "openai/chatgpt-4o-latest" DeepseekV3 = "deepseek/deepseek-chat" DeepseekR1 = "deepseek/deepseek-r1" DeepseekR1DistillLlama = "deepseek/deepseek-r1-distill-llama-70b" LiquidLFM7B = "liquid/lfm-7b" Phi3Mini = "microsoft/phi-3-mini-128k-instruct:free" GeminiFlashExp = "google/gemini-2.0-flash-exp:free" GeminiProExp = "google/gemini-pro-1.5-exp" GeminiFlash8B = "google/gemini-flash-1.5-8b" GPT4oMini = "openai/gpt-4o-mini" )
const ( ChatMessageRoleSystem = "system" ChatMessageRoleUser = "user" ChatMessageRoleAssistant = "assistant" ChatMessageRoleFunction = "function" ChatMessageRoleTool = "tool" )
Chat message role defined by the Openrouter API.
Variables ¶
var ( ErrChatCompletionInvalidModel = errors.New("this model is not supported with this method, please use CreateCompletion client method instead") //nolint:lll ErrChatCompletionStreamNotSupported = errors.New("streaming is not supported with this method, please use CreateChatCompletionStream") //nolint:lll ErrContentFieldsMisused = errors.New("can't use both Content and MultiContent properties simultaneously") )
Functions ¶
This section is empty.
Types ¶
type APIError ¶
type APIError struct { Code any `json:"code,omitempty"` Message string `json:"message"` Metadata *Metadata `json:"metadata,omitempty"` // Internal fields HTTPStatusCode int `json:"-"` }
APIError provides error information returned by the Openrouter API.
func (*APIError) UnmarshalJSON ¶
type ChatCompletionChoice ¶
type ChatCompletionChoice struct { Index int `json:"index"` Message ChatCompletionMessage `json:"message"` // FinishReason // stop: API returned complete message, // or a message terminated by one of the stop sequences provided via the stop parameter // length: Incomplete model output due to max_tokens parameter or token limit // function_call: The model decided to call a function // content_filter: Omitted content due to a flag from our content filters // null: API response still in progress or incomplete FinishReason FinishReason `json:"finish_reason"` LogProbs *LogProbs `json:"logprobs,omitempty"` }
type ChatCompletionMessage ¶
type ChatCompletionMessage struct { Role string `json:"role"` Content Content `json:"content,omitzero"` Refusal string `json:"refusal,omitempty"` FunctionCall *FunctionCall `json:"function_call,omitempty"` // For Role=assistant prompts this may be set to the tool calls generated by the model, such as function calls. ToolCalls []ToolCall `json:"tool_calls,omitempty"` // For Role=tool prompts this should be set to the ID given in the assistant's prior request to call a tool. ToolCallID string `json:"tool_call_id,omitempty"` }
type ChatCompletionRequest ¶
type ChatCompletionRequest struct { Model string `json:"model"` Provider *ChatProvider `json:"provider,omitempty"` Messages []ChatCompletionMessage `json:"messages"` // MaxTokens The maximum number of tokens that can be generated in the chat completion. // This value can be used to control costs for text generated via API. // This value is now deprecated in favor of max_completion_tokens, and is not compatible with o1 series models. // refs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-max_tokens MaxTokens int `json:"max_tokens,omitempty"` // MaxCompletionTokens An upper bound for the number of tokens that can be generated for a completion, // including visible output tokens and reasoning tokens https://platform.openai.com/docs/guides/reasoning MaxCompletionTokens int `json:"max_completion_tokens,omitempty"` Temperature float32 `json:"temperature,omitempty"` TopP float32 `json:"top_p,omitempty"` TopK int `json:"top_k,omitempty"` TopA float32 `json:"top_a,omitempty"` N int `json:"n,omitempty"` Stream bool `json:"stream,omitempty"` Stop []string `json:"stop,omitempty"` PresencePenalty float32 `json:"presence_penalty,omitempty"` RepetitionPenalty float32 `json:"repetition_penalty,omitempty"` ResponseFormat *ChatCompletionResponseFormat `json:"response_format,omitempty"` Seed *int `json:"seed,omitempty"` MinP float32 `json:"min_p,omitempty"` FrequencyPenalty float32 `json:"frequency_penalty,omitempty"` // LogitBias is must be a token id string (specified by their token ID in the tokenizer), not a word string. // incorrect: `"logit_bias":{"You": 6}`, correct: `"logit_bias":{"1639": 6}` // refs: https://platform.openai.com/docs/api-reference/chat/create#chat/create-logit_bias LogitBias map[string]int `json:"logit_bias,omitempty"` // LogProbs indicates whether to return log probabilities of the output tokens or not. // If true, returns the log probabilities of each output token returned in the content of message. // This option is currently not available on the gpt-4-vision-preview model. LogProbs bool `json:"logprobs,omitempty"` // TopLogProbs is an integer between 0 and 5 specifying the number of most likely tokens to return at each // token position, each with an associated log probability. // logprobs must be set to true if this parameter is used. TopLogProbs int `json:"top_logprobs,omitempty"` User string `json:"user,omitempty"` // Deprecated: use Tools instead. Functions []FunctionDefinition `json:"functions,omitempty"` // Deprecated: use ToolChoice instead. FunctionCall any `json:"function_call,omitempty"` Tools []Tool `json:"tools,omitempty"` // This can be either a string or an ToolChoice object. ToolChoice any `json:"tool_choice,omitempty"` // Options for streaming response. Only set this when you set stream: true. StreamOptions *StreamOptions `json:"stream_options,omitempty"` // Disable the default behavior of parallel tool calls by setting it: false. ParallelToolCalls any `json:"parallel_tool_calls,omitempty"` // Store can be set to true to store the output of this completion request for use in distillations and evals. // https://platform.openai.com/docs/api-reference/chat/create#chat-create-store Store bool `json:"store,omitempty"` // Metadata to store with the completion. Metadata map[string]string `json:"metadata,omitempty"` // Apply message transforms // https://openrouter.ai/docs/features/message-transforms Transforms []string `json:"transforms,omitempty"` }
type ChatCompletionResponse ¶
type ChatCompletionResponse struct { ID string `json:"id"` Object string `json:"object"` Created int64 `json:"created"` Model string `json:"model"` Choices []ChatCompletionChoice `json:"choices"` Usage Usage `json:"usage"` SystemFingerprint string `json:"system_fingerprint"` }
ChatCompletionResponse represents a response structure for chat completion API.
type ChatCompletionResponseFormat ¶
type ChatCompletionResponseFormat struct { Type ChatCompletionResponseFormatType `json:"type,omitempty"` JSONSchema *ChatCompletionResponseFormatJSONSchema `json:"json_schema,omitempty"` }
type ChatCompletionResponseFormatType ¶
type ChatCompletionResponseFormatType string
const ( ChatCompletionResponseFormatTypeJSONObject ChatCompletionResponseFormatType = "json_object" ChatCompletionResponseFormatTypeJSONSchema ChatCompletionResponseFormatType = "json_schema" ChatCompletionResponseFormatTypeText ChatCompletionResponseFormatType = "text" )
type ChatMessageImageURL ¶
type ChatMessageImageURL struct { URL string `json:"url,omitempty"` Detail ImageURLDetail `json:"detail,omitempty"` }
type ChatMessagePart ¶
type ChatMessagePart struct { Type ChatMessagePartType `json:"type,omitempty"` Text string `json:"text,omitempty"` ImageURL *ChatMessageImageURL `json:"image_url,omitempty"` }
type ChatMessagePartType ¶
type ChatMessagePartType string
const ( ChatMessagePartTypeText ChatMessagePartType = "text" ChatMessagePartTypeImageURL ChatMessagePartType = "image_url" )
type ChatProvider ¶
type Client ¶
type Client struct {
// contains filtered or unexported fields
}
func NewClientWithConfig ¶
func NewClientWithConfig(config ClientConfig) *Client
func (*Client) CreateChatCompletion ¶
func (c *Client) CreateChatCompletion( ctx context.Context, request ChatCompletionRequest, ) (response ChatCompletionResponse, err error)
CreateChatCompletion — API call to Create a completion for the chat message.
type ClientConfig ¶
type ClientConfig struct { BaseURL string OrgID string AssistantVersion string HTTPClient HTTPDoer HttpReferer string XTitle string EmptyMessagesLimit uint // contains filtered or unexported fields }
ClientConfig is a configuration for the openrouter client.
func DefaultConfig ¶
func DefaultConfig(authToken string) *ClientConfig
type Content ¶
type Content struct { Text string Multi []ChatMessagePart }
Content handles both string and multi-part content.
func (Content) MarshalJSON ¶
MarshalJSON serializes ContentType as a string or array.
func (*Content) UnmarshalJSON ¶
UnmarshalJSON deserializes ContentType from a string or array.
type ErrorResponse ¶
type ErrorResponse struct {
Error *APIError `json:"error,omitempty"`
}
type FinishReason ¶
type FinishReason string
const ( FinishReasonStop FinishReason = "stop" FinishReasonLength FinishReason = "length" FinishReasonFunctionCall FinishReason = "function_call" FinishReasonToolCalls FinishReason = "tool_calls" FinishReasonContentFilter FinishReason = "content_filter" FinishReasonNull FinishReason = "null" )
func (FinishReason) MarshalJSON ¶
func (r FinishReason) MarshalJSON() ([]byte, error)
type FunctionCall ¶
type FunctionDefinition ¶
type FunctionDefinition struct { Name string `json:"name"` Description string `json:"description,omitempty"` Strict bool `json:"strict,omitempty"` // Parameters is an object describing the function. // You can pass json.RawMessage to describe the schema, // or you can pass in a struct which serializes to the proper JSON schema. // The jsonschema package is provided for convenience, but you should // consider another specialized library if you require more complex schemas. Parameters any `json:"parameters"` }
type HTTPRequestBuilder ¶
type HTTPRequestBuilder struct {
// contains filtered or unexported fields
}
func NewRequestBuilder ¶
func NewRequestBuilder() *HTTPRequestBuilder
type ImageURLDetail ¶
type ImageURLDetail string
const ( ImageURLDetailHigh ImageURLDetail = "high" ImageURLDetailLow ImageURLDetail = "low" ImageURLDetailAuto ImageURLDetail = "auto" )
type JSONMarshaller ¶
type JSONMarshaller struct{}
type LogProb ¶
type LogProb struct { Token string `json:"token"` LogProb float64 `json:"logprob"` Bytes []byte `json:"bytes,omitempty"` // Omitting the field if it is null // TopLogProbs is a list of the most likely tokens and their log probability, at this token position. // In rare cases, there may be fewer than the number of requested top_logprobs returned. TopLogProbs []TopLogProbs `json:"top_logprobs"` }
LogProb represents the probability information for a token.
type LogProbs ¶
type LogProbs struct { // Content is a list of message content tokens with log probability information. Content []LogProb `json:"content"` }
LogProbs is the top-level structure containing the log probability information.
type Marshaller ¶
type Metadata ¶
type Metadata struct { // Common fields ProviderName string `json:"provider_name,omitempty"` // Provider-specific fields Raw json.RawMessage `json:"raw,omitempty"` // Raw error from provider // Moderation-specific fields Reasons []string `json:"reasons,omitempty"` // Why input was flagged FlaggedInput string `json:"flagged_input,omitempty"` // Truncated flagged text ModelSlug string `json:"model_slug,omitempty"` // Model that flagged input }
Metadata provides additional information about the error.
func (*Metadata) IsModerationError ¶
IsModerationError checks if this is a moderation error
func (*Metadata) IsProviderError ¶
IsProviderError checks if this is a provider error
func (*Metadata) ModerationError ¶
ModerationError returns moderation-specific error details
func (*Metadata) ProviderError ¶
func (m *Metadata) ProviderError() (string, json.RawMessage)
ProviderError returns provider-specific error details
type RequestBuilder ¶
type RequestError ¶
RequestError provides information about generic request errors.
func (*RequestError) Error ¶
func (e *RequestError) Error() string
func (*RequestError) Unwrap ¶
func (e *RequestError) Unwrap() error
type StreamOptions ¶
type StreamOptions struct { // If set, an additional chunk will be streamed before the data: [DONE] message. // The usage field on this chunk shows the token usage statistics for the entire request, // and the choices field will always be an empty array. // All other chunks will also include a usage field, but with a null value. IncludeUsage bool `json:"include_usage,omitempty"` }
type Tool ¶
type Tool struct { Type ToolType `json:"type"` Function *FunctionDefinition `json:"function,omitempty"` }
type ToolCall ¶
type ToolCall struct { // Index is not nil only in chat completion chunk object Index *int `json:"index,omitempty"` ID string `json:"id,omitempty"` Type ToolType `json:"type"` Function FunctionCall `json:"function"` }
type TopLogProbs ¶
Source Files
¶
Directories
¶
Path | Synopsis |
---|---|
examples
|
|
Package jsonschema provides very simple functionality for representing a JSON schema as a (nested) struct.
|
Package jsonschema provides very simple functionality for representing a JSON schema as a (nested) struct. |