Documentation
¶
Index ¶
Constants ¶
This section is empty.
Variables ¶
View Source
var ( ErrApiKeyIsRequired = errors.New("api key is required") ErrPromptIsRequired = errors.New("prompt is required") ErrParameterOnlyForOpenAIModels = errors.New("response format or seed parameter is only available for OpenAI models") ErrMaxTokenOutOfRange = errors.New("max token should be between 1 and context length") ErrTemperatureOutOfRange = errors.New("temperature should be between 0.0 and 2.0") ErrTopPOutOfRange = errors.New("top p should be between 0.0 and 1.0") ErrTopKOutOfRange = errors.New("top k should be between 0 and infinity") ErrFrequencyPenaltyOutOfRange = errors.New("frequency penalty should be between -2 and 2") ErrPresencePenaltyOutOfRange = errors.New("presence penalty should be between -2 and 2") ErrRepetitionPenaltyOutOfRange = errors.New("repetition penalty should be between 0.0 and 2.0") )
Functions ¶
This section is empty.
Types ¶
type Client ¶
type Client struct {
// contains filtered or unexported fields
}
func (*Client) Completions ¶
func (c *Client) Completions(ctx context.Context, req CompletionsRequest) (resp CompletionsResponse, err error)
type ClientOptions ¶
type ClientOptions struct { AppURL string // Optional, for including your app on openrouter.ai rankings. AppName string // Optional. Shows in rankings on openrouter.ai. APITimeoutInSeconds int // Optional. Default is 30 seconds. HttpClient *http.Client // Optional. Default is http.DefaultClient. BaseURL string // Optional. Default is "https://openrouter.ai". }
type CompletionsRequest ¶
type CompletionsRequest struct { // Prompt is the input text to generate completions from // Required Prompt string `json:"prompt"` // Model is the model to use for the request // See list of models here: // https://openrouter.ai/docs#models // Optional // If not provided, the default model set on your openrouter account will be used Model *string `json:"model,omitempty"` // ResponseFormat is the format of the response // Can be used for OpenAI models only // Optional ResponseFormat *struct{ Type ResponseFormat } `json:"response_format,omitempty"` // Seed is the random seed to use for the request // Can be used for OpenAI models only // Optional Seed *int `json:"seed,omitempty"` // Stop is the stop sequence for the request // Optional Stop *[]string `json:"stop,omitempty"` // Below are the LLM Parameters can be used // For more details, see https://openrouter.ai/docs#llm-parameters MaxToken *int `json:"max_token,omitempty"` // 1 - Context Length Temperature *float32 `json:"temperature,omitempty"` // 0.0 - 2.0 TopP *float32 `json:"top_p,omitempty"` // 0.0 - 1.0 TopK *int `json:"top_k,omitempty"` // 0 - Infinity. Not available for OpenAI models FrequencyPenalty *float32 `json:"frequency_penalty,omitempty"` // -2 - 2 PresencePenalty *float32 `json:"presence_penalty,omitempty"` // -2 - 2 RepetitionPenalty *float32 `json:"repetition_penalty,omitempty"` // 0.0 - 2.0 // Transform // For more detail, see https://openrouter.ai/docs#transforms Transforms *[]string `json:"transforms,omitempty"` // FallbackModels // For more detail, see https://openrouter.ai/docs#oauth FallbackModels *[]string `json:"models,omitempty"` }
type CompletionsResponse ¶
type CompletionsResponse struct { ID string `json:"id"` // Depending on whether you set "stream" to "true" and // whether you passed in "messages" or a "prompt", you // will get a different output shape // OpenRouter provide four different response formats // NonStreamingChoice | StreamingChoice | NonChatChoice | Error // But as of now, we not support stream or message yet, so we only support NonChatChoice and Error // See https://openrouter.ai/docs#response Choices []NonChatChoice `json:"choices"` // Unix timestamp of when the request was created Created int64 `json:"created"` // Model used for the request Model string `json:"model"` }
type ErrStatusCode ¶
type ErrStatusCode int
var ( ErrBadRequest ErrStatusCode = 400 // Invalid or missing parameters, CORS ErrPaymentRequired ErrStatusCode = 402 // Your account or API Key has insufficient credits ErrForbidden ErrStatusCode = 403 // Your chosen model requires moderation and your input was flagged ErrTimeout ErrStatusCode = 408 // The request timed out ErrRateLimit ErrStatusCode = 429 // You have exceeded your rate limit ErrBadGateway ErrStatusCode = 502 // Your chosen model is down or we received an invalid response from it )
type ErrorResponse ¶
type NonChatChoice ¶
type ResponseFormat ¶
type ResponseFormat string
var ( ResponseFormatJSON ResponseFormat = "json_object" ResponseFormatText ResponseFormat = "text" )
Click to show internal directories.
Click to hide internal directories.