openrouter

package module
v0.1.8 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jun 30, 2025 License: Apache-2.0 Imports: 11 Imported by: 8

README

Go Openrouter

Go Reference Go Report Card codecov

This library provides unofficial Go client for Openrouter API

Installation

go get github.com/revrost/go-openrouter

Features

  • Chat Completion
  • Streaming
  • Tool calling
  • Structured outputs
  • Prompt caching

Usage

Deepseek V3 example usage:
package main

import (
	"context"
	"fmt"
	openrouter "github.com/revrost/go-openrouter"
)

func main() {
	client := openrouter.NewClient(
		"your token",
		openrouter.WithXTitle("My App"),
		openrouter.WithHTTPReferer("https://myapp.com"),
	)
	resp, err := client.CreateChatCompletion(
		context.Background(),
		openrouter.ChatCompletionRequest{
			Model: openrouter.DeepseekV3,
			Messages: []openrouter.ChatCompletionMessage{
				{
					Role:    openrouter.ChatMessageRoleUser,
					Content: openrouter.Content{Text: "Hello!"},
				},
			},
		},
	)

	if err != nil {
		fmt.Printf("ChatCompletion error: %v\n", err)
		return
	}

	fmt.Println(resp.Choices[0].Message.Content)
}

Getting an Openrouter API Key:
  1. Visit the openrouter website at https://openrouter.ai/docs/quick-start.
  2. If you don't have an account, click on "Sign Up" to create one. If you do, click "Log In".
  3. Once logged in, navigate to your API key management page.
  4. Click on "Create new secret key".
  5. Enter a name for your new key, then click "Create secret key".
  6. Your new API key will be displayed. Use this key to interact with the openrouter API.

Note: Your API key is sensitive information. Do not share it with anyone.

For deepseek models, sometimes its better to use openrouter integration feature and pass in your own API key into the control panel for better performance, as openrouter will use your API key to make requests to the underlying model which potentially avoids shared rate limits.

Other examples:
Streaming Response
func main() {
	ctx := context.Background()
	client := openrouter.NewClient(os.Getenv("OPENROUTER_API_KEY"))

	stream, err := client.CreateChatCompletionStream(
		context.Background(), openrouter.ChatCompletionRequest{
			Model: "qwen/qwq-32b:free",
			Messages: []openrouter.ChatCompletionMessage{
				{
					Role:    "user",
					Content: openrouter.Content{Text: "Hello, how are you?"}},
			},
			Stream: true,
		},
	)
	require.NoError(t, err)
	defer stream.Close()

	for {
		response, err := stream.Recv()
		if err != nil && err != io.EOF {
			require.NoError(t, err)
		}
		if errors.Is(err, io.EOF) {
			fmt.Println("EOF, stream finished")
			return
		}
		json, err := json.MarshalIndent(response, "", "  ")
		require.NoError(t, err)
		fmt.Println(string(json))
	}
}
JSON Schema for function calling
{
  "name": "get_current_weather",
  "description": "Get the current weather in a given location",
  "parameters": {
    "type": "object",
    "properties": {
      "location": {
        "type": "string",
        "description": "The city and state, e.g. San Francisco, CA"
      },
      "unit": {
        "type": "string",
        "enum": ["celsius", "fahrenheit"]
      }
    },
    "required": ["location"]
  }
}

Using the jsonschema package, this schema could be created using structs as such:

FunctionDefinition{
  Name: "get_current_weather",
  Parameters: jsonschema.Definition{
    Type: jsonschema.Object,
    Properties: map[string]jsonschema.Definition{
      "location": {
        Type: jsonschema.String,
        Description: "The city and state, e.g. San Francisco, CA",
      },
      "unit": {
        Type: jsonschema.String,
        Enum: []string{"celsius", "fahrenheit"},
      },
    },
    Required: []string{"location"},
  },
}

The Parameters field of a FunctionDefinition can accept either of the above styles, or even a nested struct from another library (as long as it can be marshalled into JSON).

Structured Outputs
func main() {
	ctx := context.Background()
	client := openrouter.NewClient(os.Getenv("OPENROUTER_API_KEY"))

	type Result struct {
		Location    string  `json:"location"`
		Temperature float64 `json:"temperature"`
		Condition   string  `json:"condition"`
	}
	var result Result
	schema, err := jsonschema.GenerateSchemaForType(result)
	if err != nil {
		log.Fatalf("GenerateSchemaForType error: %v", err)
	}

	request := openrouter.ChatCompletionRequest{
		Model: openrouter.DeepseekV3,
		Messages: []openrouter.ChatCompletionMessage{
			{
				Role:    openrouter.ChatMessageRoleUser,
				Content: openrouter.Content{Text: "What's the weather like in London?"},
			},
		},
		ResponseFormat: &openrouter.ChatCompletionResponseFormat{
			Type: openrouter.ChatCompletionResponseFormatTypeJSONSchema,
			JSONSchema: &openrouter.ChatCompletionResponseFormatJSONSchema{
				Name:   "weather",
				Schema: schema,
				Strict: true,
			},
		},
	}

	pj, _ := json.MarshalIndent(request, "", "\t")
	fmt.Printf("request :\n %s\n", string(pj))

	res, err := client.CreateChatCompletion(ctx, request)
	if err != nil {
		fmt.Println("error", err)
	} else {
		b, _ := json.MarshalIndent(res, "", "\t")
		fmt.Printf("response :\n %s", string(b))
	}
}
More examples in `examples/` folder.

Frequently Asked Questions

Contributing

Contributing Guidelines, we hope to see your contributions!

Documentation

Index

Constants

View Source
const (
	GPT4o                  = "openai/chatgpt-4o-latest"
	DeepseekV3             = "deepseek/deepseek-chat"
	DeepseekR1             = "deepseek/deepseek-r1"
	DeepseekR1DistillLlama = "deepseek/deepseek-r1-distill-llama-70b"
	LiquidLFM7B            = "liquid/lfm-7b"
	Phi3Mini               = "microsoft/phi-3-mini-128k-instruct:free"
	GeminiFlashExp         = "google/gemini-2.0-flash-exp:free"
	GeminiProExp           = "google/gemini-pro-1.5-exp"
	GeminiFlash8B          = "google/gemini-flash-1.5-8b"
	GPT4oMini              = "openai/gpt-4o-mini"
)
View Source
const (
	ChatMessageRoleSystem    = "system"
	ChatMessageRoleUser      = "user"
	ChatMessageRoleAssistant = "assistant"
	ChatMessageRoleFunction  = "function"
	ChatMessageRoleTool      = "tool"
)

Chat message role defined by the Openrouter API.

Variables

View Source
var (
	ErrChatCompletionInvalidModel       = errors.New("this model is not supported with this method, please use CreateCompletion client method instead") //nolint:lll
	ErrChatCompletionStreamNotSupported = errors.New("streaming is not supported with this method, please use CreateChatCompletionStream")              //nolint:lll
	ErrContentFieldsMisused             = errors.New("can't use both Content and MultiContent properties simultaneously")
)

Functions

func String

func String(s string) *string

String is a helper function returns a pointer to the string value passed in.

Types

type APIError

type APIError struct {
	Code     any       `json:"code,omitempty"`
	Message  string    `json:"message"`
	Metadata *Metadata `json:"metadata,omitempty"`

	// Internal fields
	HTTPStatusCode int `json:"-"`
}

APIError provides error information returned by the Openrouter API.

func (*APIError) Error

func (e *APIError) Error() string

func (*APIError) UnmarshalJSON

func (e *APIError) UnmarshalJSON(data []byte) (err error)

type Annotation added in v0.1.3

type Annotation struct {
	Type        AnnotationType `json:"type"`
	URLCitation URLCitation    `json:"url_citation"`
}

type AnnotationType added in v0.1.3

type AnnotationType string
const (
	AnnotationTypeUrlCitation AnnotationType = "url_citation"
)

type CacheControl

type CacheControl struct {
	// Type only supports "ephemeral" for now.
	Type string `json:"type"`
	// TTL in  format of "5m" | "1h"
	TTL *string `json:"ttl,omitempty"`
}

type ChatCompletionChoice

type ChatCompletionChoice struct {
	Index   int                   `json:"index"`
	Message ChatCompletionMessage `json:"message"`
	// FinishReason
	// stop: API returned complete message,
	// or a message terminated by one of the stop sequences provided via the stop parameter
	// length: Incomplete model output due to max_tokens parameter or token limit
	// function_call: The model decided to call a function
	// content_filter: Omitted content due to a flag from our content filters
	// null: API response still in progress or incomplete
	FinishReason FinishReason `json:"finish_reason"`
	LogProbs     *LogProbs    `json:"logprobs,omitempty"`
}

type ChatCompletionMessage

type ChatCompletionMessage struct {
	Role    string  `json:"role"`
	Content Content `json:"content,omitzero"`
	Refusal string  `json:"refusal,omitempty"`

	// This property is used for the "reasoning" feature supported by deepseek-reasoner
	// - https://api-docs.deepseek.com/api/create-chat-completion#responses
	ReasoningContent *string `json:"reasoning_content,omitempty"`

	// Reasoning Used by all the other models
	Reasoning *string `json:"reasoning,omitempty"`

	FunctionCall *FunctionCall `json:"function_call,omitempty"`

	// For Role=assistant prompts this may be set to the tool calls generated by the model, such as function calls.
	ToolCalls []ToolCall `json:"tool_calls,omitempty"`

	// For Role=tool prompts this should be set to the ID given in the assistant's prior request to call a tool.
	ToolCallID string `json:"tool_call_id,omitempty"`

	// Web Search Annotations
	Annotations []Annotation `json:"annotations,omitempty"`
}

func AssistantMessage added in v0.1.6

func AssistantMessage(content string) ChatCompletionMessage

AssistantMessage creates a new assistant message with the given text content.

func SystemMessage added in v0.1.6

func SystemMessage(content string) ChatCompletionMessage

SystemMessage creates a new system message with the given text content.

func ToolMessage added in v0.1.6

func ToolMessage(callID string, content string) ChatCompletionMessage

ToolMessage creates a new tool (response) message with a call ID and content.

func UserMessage added in v0.1.6

func UserMessage(content string) ChatCompletionMessage

UserMessage creates a new user message with the given text content.

func UserMessageWithImage added in v0.1.7

func UserMessageWithImage(text, imageURL string) ChatCompletionMessage

UserMessageWithImage creates a new user message with text and image URL.

func UserMessageWithPDF added in v0.1.7

func UserMessageWithPDF(text, filename, fileData string) ChatCompletionMessage

UserMessageWithPDF creates a new user message with text and PDF file content.

func UserMessageWithPDFFromFile added in v0.1.7

func UserMessageWithPDFFromFile(text, filePath string) (ChatCompletionMessage, error)

UserMessageWithPDFFromFile creates a user message with text and PDF content from a file. It reads the PDF file and creates a message with the embedded PDF data.

type ChatCompletionPlugin added in v0.1.6

type ChatCompletionPlugin struct {
	ID  PluginID  `json:"id"`
	PDF PDFPlugin `json:"pdf,omitempty"`
}

func CreatePDFPlugin added in v0.1.7

func CreatePDFPlugin(engine PDFEngine) ChatCompletionPlugin

CreatePDFPlugin creates a completion plugin to process PDFs using the specified engine. The engine can be: "mistral-ocr" (for scanned documents/PDFs with images), "pdf-text" (for well-structured PDFs - free), or "native" (only for models that support file input).

type ChatCompletionReasoning

type ChatCompletionReasoning struct {
	// Effort The prompt that was used to generate the reasoning. [high, medium, low]
	Effort *string `json:"prompt,omitempty"`

	// MaxTokens cannot be simultaneously used with effort.
	MaxTokens *int `json:"max_tokens,omitempty"`

	// Exclude defaults to false.
	Exclude *bool `json:"exclude,omitempty"`
}

type ChatCompletionRequest

type ChatCompletionRequest struct {
	Model string `json:"model,omitempty"`
	// Optional model fallbacks: https://openrouter.ai/docs/features/model-routing#the-models-parameter
	Models   []string                `json:"models,omitempty"`
	Provider *ChatProvider           `json:"provider,omitempty"`
	Messages []ChatCompletionMessage `json:"messages"`

	Reasoning *ChatCompletionReasoning `json:"reasoning,omitempty"`

	Plugins []ChatCompletionPlugin `json:"plugins,omitempty"`

	// MaxTokens The maximum number of tokens that can be generated in the chat completion.
	// This value can be used to control costs for text generated via API.
	// This value is now deprecated in favor of max_completion_tokens, and is not compatible with o1 series models.
	// refs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-max_tokens
	MaxTokens int `json:"max_tokens,omitempty"`
	// MaxCompletionTokens An upper bound for the number of tokens that can be generated for a completion,
	// including visible output tokens and reasoning tokens https://platform.openai.com/docs/guides/reasoning
	MaxCompletionTokens int                           `json:"max_completion_tokens,omitempty"`
	Temperature         float32                       `json:"temperature,omitempty"`
	TopP                float32                       `json:"top_p,omitempty"`
	TopK                int                           `json:"top_k,omitempty"`
	TopA                float32                       `json:"top_a,omitempty"`
	N                   int                           `json:"n,omitempty"`
	Stream              bool                          `json:"stream,omitempty"`
	Stop                []string                      `json:"stop,omitempty"`
	PresencePenalty     float32                       `json:"presence_penalty,omitempty"`
	RepetitionPenalty   float32                       `json:"repetition_penalty,omitempty"`
	ResponseFormat      *ChatCompletionResponseFormat `json:"response_format,omitempty"`
	Seed                *int                          `json:"seed,omitempty"`
	MinP                float32                       `json:"min_p,omitempty"`
	FrequencyPenalty    float32                       `json:"frequency_penalty,omitempty"`
	// LogitBias is must be a token id string (specified by their token ID in the tokenizer), not a word string.
	// incorrect: `"logit_bias":{"You": 6}`, correct: `"logit_bias":{"1639": 6}`
	// refs: https://platform.openai.com/docs/api-reference/chat/create#chat/create-logit_bias
	LogitBias map[string]int `json:"logit_bias,omitempty"`
	// LogProbs indicates whether to return log probabilities of the output tokens or not.
	// If true, returns the log probabilities of each output token returned in the content of message.
	// This option is currently not available on the gpt-4-vision-preview model.
	LogProbs bool `json:"logprobs,omitempty"`
	// TopLogProbs is an integer between 0 and 5 specifying the number of most likely tokens to return at each
	// token position, each with an associated log probability.
	// logprobs must be set to true if this parameter is used.
	TopLogProbs int    `json:"top_logprobs,omitempty"`
	User        string `json:"user,omitempty"`
	// Deprecated: use Tools instead.
	Functions []FunctionDefinition `json:"functions,omitempty"`
	// Deprecated: use ToolChoice instead.
	FunctionCall any    `json:"function_call,omitempty"`
	Tools        []Tool `json:"tools,omitempty"`
	// This can be either a string or an ToolChoice object.
	ToolChoice any `json:"tool_choice,omitempty"`
	// Options for streaming response. Only set this when you set stream: true.
	StreamOptions *StreamOptions `json:"stream_options,omitempty"`
	// Disable the default behavior of parallel tool calls by setting it: false.
	ParallelToolCalls any `json:"parallel_tool_calls,omitempty"`
	// Store can be set to true to store the output of this completion request for use in distillations and evals.
	// https://platform.openai.com/docs/api-reference/chat/create#chat-create-store
	Store bool `json:"store,omitempty"`
	// Metadata to store with the completion.
	Metadata map[string]string `json:"metadata,omitempty"`
	// Apply message transforms
	// https://openrouter.ai/docs/features/message-transforms
	Transforms []string `json:"transforms,omitempty"`
	// Optional web search options
	// https://openrouter.ai/docs/features/web-search#specifying-search-context-size
	WebSearchOptions WebSearchOptions `json:"web_search_options,omitempty"`

	Usage *IncludeUsage `json:"usage,omitempty"`
}

type ChatCompletionResponse

type ChatCompletionResponse struct {
	ID                string                 `json:"id"`
	Object            string                 `json:"object"`
	Created           int64                  `json:"created"`
	Model             string                 `json:"model"`
	Choices           []ChatCompletionChoice `json:"choices"`
	Citations         []string               `json:"citations"`
	Usage             *Usage                 `json:"usage,omitempty"`
	SystemFingerprint string                 `json:"system_fingerprint"`
}

ChatCompletionResponse represents a response structure for chat completion API.

type ChatCompletionResponseFormat

type ChatCompletionResponseFormat struct {
	Type       ChatCompletionResponseFormatType        `json:"type,omitempty"`
	JSONSchema *ChatCompletionResponseFormatJSONSchema `json:"json_schema,omitempty"`
}

type ChatCompletionResponseFormatJSONSchema

type ChatCompletionResponseFormatJSONSchema struct {
	Name        string         `json:"name"`
	Description string         `json:"description,omitempty"`
	Schema      json.Marshaler `json:"schema"`
	Strict      bool           `json:"strict"`
}

type ChatCompletionResponseFormatType

type ChatCompletionResponseFormatType string
const (
	ChatCompletionResponseFormatTypeJSONObject ChatCompletionResponseFormatType = "json_object"
	ChatCompletionResponseFormatTypeJSONSchema ChatCompletionResponseFormatType = "json_schema"
	ChatCompletionResponseFormatTypeText       ChatCompletionResponseFormatType = "text"
)

type ChatCompletionStream

type ChatCompletionStream struct {
	// contains filtered or unexported fields
}

func (*ChatCompletionStream) Close

func (s *ChatCompletionStream) Close()

Close terminates the stream and cleans up resources.

func (*ChatCompletionStream) Recv

Recv reads the next chunk from the stream.

type ChatCompletionStreamChoice

type ChatCompletionStreamChoice struct {
	Index                int                                 `json:"index"`
	Delta                ChatCompletionStreamChoiceDelta     `json:"delta"`
	Logprobs             *ChatCompletionStreamChoiceLogprobs `json:"logprobs,omitempty"`
	FinishReason         FinishReason                        `json:"finish_reason"`
	ContentFilterResults *ContentFilterResults               `json:"content_filter_results,omitempty"`
}

type ChatCompletionStreamChoiceDelta

type ChatCompletionStreamChoiceDelta struct {
	Content      string        `json:"content,omitempty"`
	Role         string        `json:"role,omitempty"`
	FunctionCall *FunctionCall `json:"function_call,omitempty"`
	ToolCalls    []ToolCall    `json:"tool_calls,omitempty"`
	Refusal      string        `json:"refusal,omitempty"`
	Reasoning    *string       `json:"reasoning,omitempty"`

	// This property is used for the "reasoning" feature supported by deepseek-reasoner
	// which is not in the official documentation.
	// the doc from deepseek:
	// - https://api-docs.deepseek.com/api/create-chat-completion#responses
	ReasoningContent string `json:"reasoning_content,omitempty"`
}

type ChatCompletionStreamChoiceLogprobs

type ChatCompletionStreamChoiceLogprobs struct {
	Content []ChatCompletionTokenLogprob `json:"content,omitempty"`
	Refusal []ChatCompletionTokenLogprob `json:"refusal,omitempty"`
}

type ChatCompletionStreamResponse

type ChatCompletionStreamResponse struct {
	ID                  string                       `json:"id"`
	Object              string                       `json:"object"`
	Created             int64                        `json:"created"`
	Model               string                       `json:"model"`
	Choices             []ChatCompletionStreamChoice `json:"choices"`
	SystemFingerprint   string                       `json:"system_fingerprint"`
	PromptAnnotations   []PromptAnnotation           `json:"prompt_annotations,omitempty"`
	PromptFilterResults []PromptFilterResult         `json:"prompt_filter_results,omitempty"`
	// An optional field that will only be present when you set stream_options: {"include_usage": true} in your request.
	// When present, it contains a null value except for the last chunk which contains the token usage statistics
	// for the entire request.
	Usage *Usage `json:"usage,omitempty"`
}

type ChatCompletionTokenLogprob

type ChatCompletionTokenLogprob struct {
	Token       string                                 `json:"token"`
	Bytes       []int64                                `json:"bytes,omitempty"`
	Logprob     float64                                `json:"logprob,omitempty"`
	TopLogprobs []ChatCompletionTokenLogprobTopLogprob `json:"top_logprobs"`
}

type ChatCompletionTokenLogprobTopLogprob

type ChatCompletionTokenLogprobTopLogprob struct {
	Token   string  `json:"token"`
	Bytes   []int64 `json:"bytes"`
	Logprob float64 `json:"logprob"`
}

type ChatMessageImageURL

type ChatMessageImageURL struct {
	URL    string         `json:"url,omitempty"`
	Detail ImageURLDetail `json:"detail,omitempty"`
}

type ChatMessagePart

type ChatMessagePart struct {
	Type ChatMessagePartType `json:"type,omitempty"`
	Text string              `json:"text,omitempty"`
	// Prompt caching
	// https://openrouter.ai/docs/features/prompt-caching
	CacheControl *CacheControl `json:"cache_control,omitempty"`

	ImageURL *ChatMessageImageURL `json:"image_url,omitempty"`
	File     *FileContent         `json:"file,omitempty"`
}

type ChatMessagePartType

type ChatMessagePartType string
const (
	ChatMessagePartTypeText     ChatMessagePartType = "text"
	ChatMessagePartTypeImageURL ChatMessagePartType = "image_url"
	ChatMessagePartTypeFile     ChatMessagePartType = "file"
)

type ChatProvider

type ChatProvider struct {
	// The order of the providers in the list determines the order in which they are called.
	Order []string `json:"order"`
	// Allow fallbacks to other providers if the primary provider fails.
	AllowFallbacks bool `json:"allow_fallbacks"`
	// Only use providers that support all parameters in your request.
	RequireParameters bool `json:"require_parameters"`
	// Control whether to use providers that may store data.
	DataCollection DataCollection `json:"data_collection"`
	// List of provider slugs to allow for this request.
	Only []string `json:"only"`
	// List of provider slugs to skip for this request.
	Ignore []string `json:"ignore"`
	// List of quantization levels to filter by (e.g. ["int4", "int8"]).
	Quantizations []string `json:"quantizations"`
	// Sort providers by price or throughput. (e.g. "price" or "throughput").
	Sort ProviderSorting `json:"sort"`
}

Provider Routing: https://openrouter.ai/docs/features/provider-routing

type Client

type Client struct {
	// contains filtered or unexported fields
}

func NewClient

func NewClient(auth string, opts ...Option) *Client

func NewClientWithConfig

func NewClientWithConfig(config ClientConfig) *Client

func (*Client) CreateChatCompletion

func (c *Client) CreateChatCompletion(
	ctx context.Context,
	request ChatCompletionRequest,
) (response ChatCompletionResponse, err error)

CreateChatCompletion — API call to Create a completion for the chat message.

func (*Client) CreateChatCompletionStream

func (c *Client) CreateChatCompletionStream(
	ctx context.Context,
	request ChatCompletionRequest,
) (*ChatCompletionStream, error)

CreateChatCompletionStream — API call to Create a completion for the chat message with streaming.

type ClientConfig

type ClientConfig struct {
	BaseURL          string
	OrgID            string
	AssistantVersion string
	HTTPClient       HTTPDoer
	HttpReferer      string
	XTitle           string

	EmptyMessagesLimit uint
	// contains filtered or unexported fields
}

ClientConfig is a configuration for the openrouter client.

func DefaultConfig

func DefaultConfig(authToken string) *ClientConfig

type CompletionTokenDetails added in v0.1.8

type CompletionTokenDetails struct {
	ReasoningTokens int `json:"reasoning_tokens"`
}

type Content

type Content struct {
	Text  string
	Multi []ChatMessagePart
}

Content handles both string and multi-part content.

func (Content) MarshalJSON

func (c Content) MarshalJSON() ([]byte, error)

MarshalJSON serializes ContentType as a string or array.

func (*Content) UnmarshalJSON

func (c *Content) UnmarshalJSON(data []byte) error

UnmarshalJSON deserializes ContentType from a string or array.

type ContentFilterResults

type ContentFilterResults struct {
	Hate      Hate      `json:"hate,omitempty"`
	SelfHarm  SelfHarm  `json:"self_harm,omitempty"`
	Sexual    Sexual    `json:"sexual,omitempty"`
	Violence  Violence  `json:"violence,omitempty"`
	JailBreak JailBreak `json:"jailbreak,omitempty"`
	Profanity Profanity `json:"profanity,omitempty"`
}

type CostDetails added in v0.1.8

type CostDetails struct {
	UpstreamInferenceCost float64 `json:"upstream_inference_cost"`
}

type DataCollection added in v0.1.6

type DataCollection string
const (
	DataCollectionAllow DataCollection = "allow"
	DataCollectionDeny  DataCollection = "deny"
)

type ErrorResponse

type ErrorResponse struct {
	Error *APIError `json:"error,omitempty"`
}

type FileContent added in v0.1.7

type FileContent struct {
	Filename string `json:"filename"`
	FileData string `json:"file_data"`
}

FileContent represents file content for PDF processing

type FinishReason

type FinishReason string
const (
	FinishReasonStop          FinishReason = "stop"
	FinishReasonLength        FinishReason = "length"
	FinishReasonFunctionCall  FinishReason = "function_call"
	FinishReasonToolCalls     FinishReason = "tool_calls"
	FinishReasonContentFilter FinishReason = "content_filter"
	FinishReasonNull          FinishReason = "null"
)

func (FinishReason) MarshalJSON

func (r FinishReason) MarshalJSON() ([]byte, error)

type FunctionCall

type FunctionCall struct {
	Name string `json:"name,omitempty"`
	// call function with arguments in JSON format
	Arguments string `json:"arguments,omitempty"`
}

type FunctionDefinition

type FunctionDefinition struct {
	Name        string `json:"name"`
	Description string `json:"description,omitempty"`
	Strict      bool   `json:"strict,omitempty"`
	// Parameters is an object describing the function.
	// You can pass json.RawMessage to describe the schema,
	// or you can pass in a struct which serializes to the proper JSON schema.
	// The jsonschema package is provided for convenience, but you should
	// consider another specialized library if you require more complex schemas.
	Parameters any `json:"parameters"`
}

type HTTPDoer

type HTTPDoer interface {
	Do(req *http.Request) (*http.Response, error)
}

type HTTPRequestBuilder

type HTTPRequestBuilder struct {
	// contains filtered or unexported fields
}

func NewRequestBuilder

func NewRequestBuilder() *HTTPRequestBuilder

func (*HTTPRequestBuilder) Build

func (b *HTTPRequestBuilder) Build(
	ctx context.Context,
	method string,
	url string,
	body any,
	header http.Header,
) (req *http.Request, err error)

type Hate

type Hate struct {
	Filtered bool   `json:"filtered"`
	Severity string `json:"severity,omitempty"`
}

type ImageURLDetail

type ImageURLDetail string
const (
	ImageURLDetailHigh ImageURLDetail = "high"
	ImageURLDetailLow  ImageURLDetail = "low"
	ImageURLDetailAuto ImageURLDetail = "auto"
)

type IncludeUsage

type IncludeUsage struct {
	Include bool `json:"include"`
}

type JSONMarshaller

type JSONMarshaller struct{}

func (*JSONMarshaller) Marshal

func (jm *JSONMarshaller) Marshal(value any) ([]byte, error)

type JailBreak

type JailBreak struct {
	Filtered bool `json:"filtered"`
	Detected bool `json:"detected"`
}

type LogProb

type LogProb struct {
	Token   string  `json:"token"`
	LogProb float64 `json:"logprob"`
	Bytes   []byte  `json:"bytes,omitempty"` // Omitting the field if it is null
	// TopLogProbs is a list of the most likely tokens and their log probability, at this token position.
	// In rare cases, there may be fewer than the number of requested top_logprobs returned.
	TopLogProbs []TopLogProbs `json:"top_logprobs"`
}

LogProb represents the probability information for a token.

type LogProbs

type LogProbs struct {
	// Content is a list of message content tokens with log probability information.
	Content []LogProb `json:"content"`
}

LogProbs is the top-level structure containing the log probability information.

type Marshaller

type Marshaller interface {
	Marshal(value any) ([]byte, error)
}

type Metadata

type Metadata map[string]any

Metadata provides additional information about the error.

type Option

type Option func(*ClientConfig)

func WithHTTPReferer

func WithHTTPReferer(referer string) Option

func WithXTitle

func WithXTitle(title string) Option

type PDFEngine added in v0.1.6

type PDFEngine string
const (
	// Best for scanned documents or PDFs with images ($2 per 1,000 pages).
	PDFEngineMistralOCR PDFEngine = "mistral-ocr"
	// Best for well-structured PDFs with clear text content (Free).
	PDFEnginePDFText PDFEngine = "pdf-text"
	// Only available for models that support file input natively (charged as input tokens).
	PDFEngineNative PDFEngine = "native"
)

type PDFPlugin added in v0.1.6

type PDFPlugin struct {
	Engine string `json:"engine"`
}

type PluginID added in v0.1.6

type PluginID string
const (
	// Processing PDFs: https://openrouter.ai/docs/features/images-and-pdfs#processing-pdfs
	PluginIDFileParser PluginID = "file-parser"
	// Web search plugin: https://openrouter.ai/docs/features/web-search
	PluginIDWeb PluginID = "web"
)

type Profanity

type Profanity struct {
	Filtered bool `json:"filtered"`
	Detected bool `json:"detected"`
}

type PromptAnnotation

type PromptAnnotation struct {
	PromptIndex          int                  `json:"prompt_index,omitempty"`
	ContentFilterResults ContentFilterResults `json:"content_filter_results,omitempty"`
}

type PromptFilterResult

type PromptFilterResult struct {
	Index                int                  `json:"index"`
	ContentFilterResults ContentFilterResults `json:"content_filter_results,omitempty"`
}

type PromptTokenDetails added in v0.1.8

type PromptTokenDetails struct {
	CachedTokens int `json:"cached_tokens"`
}

type ProviderSorting added in v0.1.6

type ProviderSorting string
const (
	ProviderSortingPrice      ProviderSorting = "price"
	ProviderSortingThroughput ProviderSorting = "throughput"
	ProviderSortingLatency    ProviderSorting = "latency"
)

type RequestBuilder

type RequestBuilder interface {
	Build(ctx context.Context, method, url string, body any, header http.Header) (*http.Request, error)
}

type RequestError

type RequestError struct {
	HTTPStatus     string
	HTTPStatusCode int
	Err            error
	Body           []byte
}

RequestError provides information about generic request errors.

func (*RequestError) Error

func (e *RequestError) Error() string

func (*RequestError) Unwrap

func (e *RequestError) Unwrap() error

type SearchContextSize added in v0.1.3

type SearchContextSize string
const (
	SearchContextSizeLow    SearchContextSize = "low"
	SearchContextSizeMedium SearchContextSize = "medium"
	SearchContextSizeHigh   SearchContextSize = "high"
)

type SelfHarm

type SelfHarm struct {
	Filtered bool   `json:"filtered"`
	Severity string `json:"severity,omitempty"`
}

type Sexual

type Sexual struct {
	Filtered bool   `json:"filtered"`
	Severity string `json:"severity,omitempty"`
}

type StreamOptions

type StreamOptions struct {
	// If set, an additional chunk will be streamed before the data: [DONE] message.
	// The usage field on this chunk shows the token usage statistics for the entire request,
	// and the choices field will always be an empty array.
	// All other chunks will also include a usage field, but with a null value.
	IncludeUsage bool `json:"include_usage,omitempty"`
}

type Tool

type Tool struct {
	Type     ToolType            `json:"type"`
	Function *FunctionDefinition `json:"function,omitempty"`
}

type ToolCall

type ToolCall struct {
	// Index is not nil only in chat completion chunk object
	Index    *int         `json:"index,omitempty"`
	ID       string       `json:"id,omitempty"`
	Type     ToolType     `json:"type"`
	Function FunctionCall `json:"function"`
}

type ToolType

type ToolType string
const (
	ToolTypeFunction ToolType = "function"
)

type TopLogProbs

type TopLogProbs struct {
	Token   string  `json:"token"`
	LogProb float64 `json:"logprob"`
	Bytes   []byte  `json:"bytes,omitempty"`
}

type URLCitation added in v0.1.3

type URLCitation struct {
	StartIndex int    `json:"start_index"`
	EndIndex   int    `json:"end_index"`
	Title      string `json:"title"`
	URL        string `json:"url"`
}

type Usage

type Usage struct {
	PromptTokens           int                    `json:"prompt_tokens"`
	CompletionTokens       int                    `json:"completion_tokens"`
	CompletionTokenDetails CompletionTokenDetails `json:"completion_token_details"`
	TotalTokens            int                    `json:"total_tokens"`

	Cost        float64     `json:"cost"`
	CostDetails CostDetails `json:"cost_details"`

	PromptTokenDetails PromptTokenDetails `json:"prompt_token_details"`
}

Usage Represents the total token usage per request to OpenAI.

type Violence

type Violence struct {
	Filtered bool   `json:"filtered"`
	Severity string `json:"severity,omitempty"`
}

type WebSearchOptions added in v0.1.3

type WebSearchOptions struct {
	SearchContextSize SearchContextSize `json:"search_context_size"`
}

Directories

Path Synopsis
examples
Package jsonschema provides very simple functionality for representing a JSON schema as a (nested) struct.
Package jsonschema provides very simple functionality for representing a JSON schema as a (nested) struct.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL