openrouter

package
v0.0.0-...-9409c51 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jun 6, 2025 License: Unlicense Imports: 17 Imported by: 0

README

OpenRouter Integration

Comprehensive OpenRouter API integration with dual-path execution system for universal LLM compatibility.

Features

Dual-Path Execution System ✅

Automatically routes requests based on model capabilities:

// Structured Path: For models supporting JSON schema
if capabilities.SupportsStructuredOutput {
    return executeWithStructuredOutput(goal, prompt, input)
} else {
    return executeWithUniversalCompatibility(goal, prompt, input)  
}
Model Capabilities Detection ✅

Intelligent model classification and capability detection:

capabilities := openrouter.GetModelCapabilities("openai/gpt-4")
// Returns: SupportsStructuredOutput, MaxContextLength, Provider, etc.
JSON Schema Generation ✅

Automatic schema generation from Go structs and JSON examples:

schema, err := GenerateSchemaFromJSONExample(jsonExample)
responseFormat, err := UseOpenRouterJsonFormat(outputExample, "SchemaName")
Universal Prompts ✅

Fallback system for non-structured-output models using enhanced prompting:

universalPrompt := CreateUniversalCompatibilityPrompt(systemMsg, schema, inputExample, outputExample)

Key Components

Status: ✅ Complete

Full OpenRouter integration with dual-path execution, automatic model detection, and universal LLM compatibility.

Documentation

Index

Constants

View Source
const ChatTemplate = `` /* 32197-byte string literal not displayed */

ChatTemplate holds the HTML template for the chat interface

Variables

View Source
var (
	ErrBadRequest          = errors.New("bad request (invalid or missing params, CORS)")
	ErrInvalidCredentials  = errors.New("invalid credentials")
	ErrInsufficientCredits = errors.New("insufficient credits")
	ErrModerationFlag      = errors.New("input flagged by moderation")
	ErrTimeout             = errors.New("request timed out")
	ErrRateLimited         = errors.New("rate limited")
	ErrModelDown           = errors.New("model unavailable")
	ErrNoProviders         = errors.New("no available providers meet requirements")
	ErrNoResponse          = errors.New("no message response received from API")
)

Standard error codes as defined in OpenRouter documentation

View Source
var ErrGenerationIDNotFound = errors.New("generation information for this generation ID was not found, either this id is invalid or you requested the endpoint too soon")

ErrorCodeToError maps OpenRouter error codes to standardized errors

View Source
var JSONObjectResponseFormat = "json_object"
View Source
var JSONSchemaStringResponseFormat = "json_schema"
View Source
var StructuredOutputModels = map[string]struct{}{
	"qwen/qwen3-30b-a3b":                        {},
	"qwen/qwen3-32b":                            {},
	"qwen/qwen3-235b-a22b":                      {},
	"openai/o4-mini-high":                       {},
	"openai/o3":                                 {},
	"openai/o4-mini":                            {},
	"openai/gpt-4.1":                            {},
	"openai/gpt-4.1-mini":                       {},
	"openai/gpt-4.1-nano":                       {},
	"meta-llama/llama-4-maverick:free":          {},
	"meta-llama/llama-4-maverick":               {},
	"meta-llama/llama-4-scout:free":             {},
	"meta-llama/llama-4-scout":                  {},
	"google/gemini-2.5-pro-exp-03-25":           {},
	"mistralai/mistral-small-3.1-24b-instruct":  {},
	"google/gemma-3-4b-it:free":                 {},
	"google/gemma-3-12b-it:free":                {},
	"cohere/command-a":                          {},
	"openai/gpt-4o-mini-search-preview":         {},
	"openai/gpt-4o-search-preview":              {},
	"google/gemma-3-27b-it:free":                {},
	"qwen/qwq-32b":                              {},
	"openai/gpt-4.5-preview":                    {},
	"google/gemini-2.0-flash-lite-001":          {},
	"mistralai/mistral-saba":                    {},
	"openai/o3-mini-high":                       {},
	"google/gemini-2.0-flash-001":               {},
	"openai/o3-mini":                            {},
	"mistralai/mistral-small-24b-instruct-2501": {},
	"deepseek/deepseek-r1-distill-llama-70b":    {},
	"deepseek/deepseek-r1":                      {},
	"mistralai/codestral-2501":                  {},
	"deepseek/deepseek-chat":                    {},
	"openai/o1":                                 {},
	"cohere/command-r7b-12-2024":                {},
	"meta-llama/llama-3.3-70b-instruct":         {},
	"openai/gpt-4o-2024-11-20":                  {},
	"mistralai/mistral-large-2411":              {},
	"mistralai/mistral-large-2407":              {},
	"mistralai/pixtral-large-2411":              {},
	"qwen/qwen-2.5-7b-instruct":                 {},
	"google/gemini-flash-1.5-8b":                {},
	"qwen/qwen-2.5-72b-instruct":                {},
	"mistralai/pixtral-12b":                     {},
	"cohere/command-r-plus-08-2024":             {},
	"cohere/command-r-08-2024":                  {},
	"openai/chatgpt-4o-latest":                  {},
	"openai/gpt-4o-2024-08-06":                  {},
	"meta-llama/llama-3.1-405b-instruct":        {},
	"meta-llama/llama-3.1-70b-instruct":         {},
	"mistralai/mistral-nemo":                    {},
	"openai/gpt-4o-mini":                        {},
	"openai/gpt-4o-mini-2024-07-18":             {},
	"01-ai/yi-large":                            {},
	"openai/gpt-4o":                             {},
	"openai/gpt-4o:extended":                    {},
	"openai/gpt-4o-2024-05-13":                  {},
	"mistralai/mixtral-8x22b-instruct":          {},
	"google/gemini-pro-1.5":                     {},
	"cohere/command-r-plus":                     {},
	"cohere/command-r-plus-04-2024":             {},
	"cohere/command":                            {},
	"cohere/command-r":                          {},
	"cohere/command-r-03-2024":                  {},
	"mistralai/mistral-large":                   {},
	"openai/gpt-3.5-turbo-0613":                 {},
	"openai/gpt-4-turbo-preview":                {},
	"mistralai/mistral-medium":                  {},
	"mistralai/mistral-small":                   {},
	"mistralai/mistral-tiny":                    {},
	"openai/gpt-3.5-turbo-1106":                 {},
	"openai/gpt-4-1106-preview":                 {},
	"openai/gpt-4-32k-0314":                     {},
	"openai/gpt-4-0314":                         {},
}

StructuredOutputModels is a simple set of models that support structured output If a model is in this set, it supports structured output. If not, it doesn't. This determines which execution path (structured vs universal) to use

Functions

func AddStructuredOutputModel

func AddStructuredOutputModel(modelID string)

AddStructuredOutputModel allows adding new models that support structured output at runtime This is useful for testing or adding support for new models

func CreateOpenRouterRouter

func CreateOpenRouterRouter(openRouter *OpenRouter) http.Handler

func CreateUniversalCompatibilityPrompt

func CreateUniversalCompatibilityPrompt(existingSystemPrompt string, schema map[string]interface{}, inputExample, outputExample json.RawMessage) string

CreateUniversalCompatibilityPrompt is the main function that creates a complete system prompt for universal LLM compatibility, merging existing prompts with JSON schema instructions

func FormatSchemaForPrompt

func FormatSchemaForPrompt(schema map[string]interface{}) string

FormatSchemaForPrompt converts a JSON schema into human-readable format that's easy for LLMs to understand and follow

func GenerateUniversalSystemPrompt

func GenerateUniversalSystemPrompt(schema map[string]interface{}, inputExample, outputExample json.RawMessage) string

GenerateUniversalSystemPrompt creates a system prompt that guides ANY LLM to produce valid JSON output matching the provided schema, even without structured output support

func GetAllSupportedModels

func GetAllSupportedModels() []string

GetAllSupportedModels returns a copy of all models that support structured output This is useful for debugging or API endpoints

func GetSupportedModels

func GetSupportedModels() []string

GetSupportedModels returns a list of all models that support structured output

func HasChoiceErrors

func HasChoiceErrors(choices []*BaseChoice) bool

HasChoiceErrors checks if any choices in a response contain errors

func IsNoChoicesError

func IsNoChoicesError(choicesCount int) bool

IsNoChoicesError checks if the response has no choices (which indicates an error)

func MergeSystemPrompts

func MergeSystemPrompts(existingSystemPrompt, universalPrompt string) string

MergeSystemPrompts implements the collision strategy for combining existing system prompts with universal JSON schema instructions. The original prompt comes first, then our instructions.

func PseudoStructuredResponseCleaner

func PseudoStructuredResponseCleaner(response string) string

func RemoveStructuredOutputModel

func RemoveStructuredOutputModel(modelID string)

RemoveStructuredOutputModel removes a model from the structured output support list This is useful for testing

func ServeChatUI

func ServeChatUI(w http.ResponseWriter, r *http.Request)

ServeChatUI handles the request to show the chat UI

func SupportsStructuredOutput

func SupportsStructuredOutput(modelID string) bool

SupportsStructuredOutput checks if a model supports structured output Simply checks if the model is in the StructuredOutputModels set

func UseOpenRouterJsonFormat

func UseOpenRouterJsonFormat(exampleOutput any, schemaName string) (json.RawMessage, error)

UseOpenRouterJsonFormat creates a JSON schema response format for OpenRouter requests based on the provided example response object

func UseOpenRouterJsonFormatFromJSON

func UseOpenRouterJsonFormatFromJSON(jsonExample json.RawMessage, schemaName string) (json.RawMessage, error)

UseOpenRouterJsonFormatFromJSON creates a JSON schema response format for OpenRouter requests specifically designed for JSON examples (not Go structs). This is optimized for the dual-path execution system where we work with JSON data throughout the pipeline.

func ValidateJSONAgainstSchema

func ValidateJSONAgainstSchema(data json.RawMessage, schema *Definition) error

ValidateJSONAgainstSchema validates JSON data against a schema definition

func ValidateJSONResponseFormat

func ValidateJSONResponseFormat(responseFormat json.RawMessage) error

ValidateJSONResponseFormat validates that a response format is properly structured for OpenRouter's JSON schema requirements

Types

type BaseChoice

type BaseChoice struct {
	FinishReason       *string `json:"finish_reason"`        // "stop", "length", "tool_calls", etc. or null
	NativeFinishReason *string `json:"native_finish_reason"` // Provider-specific reason or null
	Error              *struct {
		Code     int            `json:"code"`
		Message  string         `json:"message"`
		Metadata map[string]any `json:"metadata,omitempty"`
	} `json:"error,omitempty"`
}

BaseChoice holds fields common to all types of choices within a response.

type DataType

type DataType string
const (
	Object  DataType = "object"
	Number  DataType = "number"
	Integer DataType = "integer"
	String  DataType = "string"
	Array   DataType = "array"
	Null    DataType = "null"
	Boolean DataType = "boolean"
)

type Definition

type Definition struct {
	// Type specifies the data type of the schema.
	Type DataType `json:"type,omitempty"`
	// Description is the description of the schema.
	Description string `json:"description,omitempty"`
	// Enum is used to restrict a value to a fixed set of values. It must be an array with at least
	// one element, where each element is unique. You will probably only use this with strings.
	Enum []string `json:"enum,omitempty"`
	// Properties describes the properties of an object, if the schema type is Object.
	Properties map[string]Definition `json:"properties,omitempty"`
	// Required specifies which properties are required, if the schema type is Object.
	Required []string `json:"required,omitempty"`
	// Items specifies which data type an array contains, if the schema type is Array.
	Items *Definition `json:"items,omitempty"`
	// AdditionalProperties is used to control the handling of properties in an object
	// that are not explicitly defined in the properties section of the schema. example:
	// additionalProperties: true
	// additionalProperties: false
	// additionalProperties: jsonschema.Definition{Type: jsonschema.String}
	AdditionalProperties any `json:"additionalProperties,omitempty"`
}

Definition is a struct for describing a JSON Schema. It is fairly limited, and you may have better luck using a third-party library.

func GenerateSchemaForType

func GenerateSchemaForType(v any) (*Definition, error)

func GenerateSchemaFromJSONExample

func GenerateSchemaFromJSONExample(example json.RawMessage) (*Definition, error)

GenerateSchemaFromJSONExample creates a schema definition from a JSON example

func (*Definition) MarshalJSON

func (d *Definition) MarshalJSON() ([]byte, error)

type ErrorResponse

type ErrorResponse struct {
	Details struct {
		Code     int                    `json:"code"`
		Message  string                 `json:"message"`
		Metadata map[string]interface{} `json:"metadata,omitempty"`
	} `json:"error"`
}

ErrorResponse matches the OpenRouter API error structure

func ExtractChoiceError

func ExtractChoiceError(choice *BaseChoice) *ErrorResponse

ExtractChoiceError extracts error information from a choice if present

func GetChoiceErrors

func GetChoiceErrors(choices []*BaseChoice) []*ErrorResponse

GetChoiceErrors extracts all errors from choices

func (*ErrorResponse) Error

func (e *ErrorResponse) Error() string

Error implements the error interface for ErrorResponse

type FunctionDelta

type FunctionDelta struct {
	Name      *string `json:"name,omitempty"` // Function name (often only in first chunk)
	Arguments string  `json:"arguments"`      // Argument chunk (JSON string delta)
}

FunctionDelta represents the function details delta in a tool call.

type GenerationStats

type GenerationStats struct {
	ID                     string  `json:"id"`
	TotalCost              float64 `json:"total_cost"`
	CreatedAt              string  `json:"created_at"`
	Model                  string  `json:"model"`
	Origin                 string  `json:"origin"`
	Usage                  float64 `json:"usage"`
	IsByok                 bool    `json:"is_byok"`
	UpstreamID             string  `json:"upstream_id"`
	CacheDiscount          float64 `json:"cache_discount"`
	AppID                  int     `json:"app_id"`
	Streamed               bool    `json:"streamed"`
	Cancelled              bool    `json:"cancelled"`
	ProviderName           string  `json:"provider_name"`
	Latency                int     `json:"latency"`
	ModerationLatency      int     `json:"moderation_latency"`
	GenerationTime         int     `json:"generation_time"`
	FinishReason           string  `json:"finish_reason"`
	NativeFinishReason     string  `json:"native_finish_reason"`
	TokensPrompt           int     `json:"tokens_prompt"`
	TokensCompletion       int     `json:"tokens_completion"`
	NativeTokensPrompt     int     `json:"native_tokens_prompt"`
	NativeTokensCompletion int     `json:"native_tokens_completion"`
	NativeTokensReasoning  int     `json:"native_tokens_reasoning"`
	NumMediaPrompt         int     `json:"num_media_prompt"`
	NumMediaCompletion     int     `json:"num_media_completion"`
	NumSearchResults       int     `json:"num_search_results"`
}

GenerationStats contains detailed information about a generation

type GenerationStatsResponse

type GenerationStatsResponse struct {
	Data GenerationStats `json:"data"`
}

GenerationStatsResponse represents the response from the /api/v1/generation endpoint

type Message

type Message struct {
	Role       string  `json:"role"`    // "user", "assistant", "system", or "tool"
	Content    string  `json:"content"` // Simple string content
	Name       *string `json:"name,omitempty"`
	ToolCallID *string `json:"tool_call_id,omitempty"` // Required if role is "tool"
}

Message represents a single message in the chat conversation.

type ModerationErrorMetadata

type ModerationErrorMetadata struct {
	Reasons      []string `json:"reasons"`
	FlaggedInput string   `json:"flagged_input"`
	ProviderName string   `json:"provider_name"`
	ModelSlug    string   `json:"model_slug"`
}

ModerationErrorMetadata defines the structure for moderation-related errors

func IsModerationError

func IsModerationError(err error) (*ModerationErrorMetadata, bool)

IsModerationError checks if an error is a moderation error and returns parsed metadata

func (*ModerationErrorMetadata) Error

func (m *ModerationErrorMetadata) Error() string

Error implements the error interface for ModerationErrorMetadata

type NonStreamingChatChoice

type NonStreamingChatChoice struct {
	BaseChoice
	Message ResponseMessage `json:"message"`
}

NonStreamingChatChoice represents a choice in a standard chat completion response

type NonStreamingChatResponse

type NonStreamingChatResponse struct {
	OpenRouterBaseResponse
	Choices []NonStreamingChatChoice `json:"choices"`
}

NonStreamingChatResponse represents a standard (non-streamed) chat completion response.

func ValidateNonStreamingResponse

func ValidateNonStreamingResponse(respBody []byte, statusCode int) (*NonStreamingChatResponse, error)

ValidateNonStreamingResponse checks a non-streaming response for various error conditions and returns standardized errors when problems are found

type OpenRouter

type OpenRouter struct {
	ApiKey string
}

func CreateOpenRouter

func CreateOpenRouter(apiKey string) (*OpenRouter, error)

func (*OpenRouter) GenerateNonStreamingChatResponse

func (o *OpenRouter) GenerateNonStreamingChatResponse(request *OpenRouterRequest) (*NonStreamingChatResponse, error)

--- Specific Response Generation Functions --- GenerateNonStreamingChatResponse sends a request expected to yield a standard chat response. Assumes request.Stream is false or nil, and request.Messages is used.

func (*OpenRouter) GeneratePromptCompletionResponse

func (o *OpenRouter) GeneratePromptCompletionResponse(request *OpenRouterRequest) (*PromptCompletionResponse, error)

GeneratePromptCompletionResponse sends a request expected to yield a simple prompt completion. Assumes request.Stream is false or nil, and request.Prompt is used.

func (*OpenRouter) GenerateStreamingChatResponse

func (o *OpenRouter) GenerateStreamingChatResponse(ctx context.Context, request *OpenRouterRequest) (<-chan *StreamingChatResponse, error)

GenerateStreamingChatResponse sends a request and returns a channel to receive streaming chat chunks. Assumes request.Stream is explicitly set to true. The caller MUST read from the channel until it is closed. Errors encountered during streaming will cause the channel to be closed prematurely. Context can be used to cancel the request and clean up resources.

func (*OpenRouter) GetGenerationStats

func (o *OpenRouter) GetGenerationStats(generationID string) (*GenerationStats, error)

GetGenerationStats retrieves detailed information about a generation by its ID WARNING: You must wait around 400 ms before calling the generation stats endpoint else you will get a 404 error

type OpenRouterBaseResponse

type OpenRouterBaseResponse struct {
	ID                string         `json:"id"`
	Created           int64          `json:"created"`
	Model             string         `json:"model"`
	Object            string         `json:"object"` // "chat.completion" or "chat.completion.chunk"
	SystemFingerprint *string        `json:"system_fingerprint,omitempty"`
	Usage             *ResponseUsage `json:"usage,omitempty"` // Present non-streaming or in final stream chunk
}

OpenRouterBaseResponse holds fields common to all top-level response objects.

type OpenRouterParameters

type OpenRouterParameters struct {
	Temperature       float64 `json:"temperature,omitempty"`
	TopP              float64 `json:"top_p,omitempty"`
	FrequencyPenalty  float64 `json:"frequency_penalty,omitempty"`
	PresencePenalty   float64 `json:"presence_penalty,omitempty"`
	RepetitionPenalty float64 `json:"repetition_penalty,omitempty"`
	TopK              int     `json:"top_k,omitempty"`
}

type OpenRouterRequest

type OpenRouterRequest struct {
	// Either Messages or Prompt is required.
	Messages []Message `json:"messages,omitempty"`
	Prompt   *string   `json:"prompt,omitempty"`

	Model *string `json:"model,omitempty"` // Uses user's default if unspecified
	Parameters
}

OpenRouterRequest represents the request body sent to the OpenRouter API.

type OpenRouterRouter

type OpenRouterRouter struct {
	OpenRouter *OpenRouter
}

type Parameters

type Parameters struct {
	// Allows to force the model to produce specific output format
	// See models page and note on this docs page for which models support it
	ResponseFormat json.RawMessage `json:"response_format,omitempty"`

	Stop []string `json:"stop,omitempty"` // String(s) to stop generation at

	Stream    *bool `json:"stream,omitempty"`     // Enable streaming
	MaxTokens *int  `json:"max_tokens,omitempty"` // Range: [1, context_length)

	// Tool calling
	Tools []struct {
		Type     string `json:"type"` // Should be "function"
		Function struct {
			Description *string        `json:"description,omitempty"`
			Name        string         `json:"name"`
			Parameters  map[string]any `json:"parameters"` // JSON Schema object
		} `json:"function"`
	} `json:"tools,omitempty"`

	ToolChoice any `json:"tool_choice,omitempty"` // "none", "auto", or {"type": "function", "function": {"name": "..."}}

	// LLM Parameters (Optional)
	Temperature       *float64        `json:"temperature,omitempty"`        // Range: [0, 2]
	TopP              *float64        `json:"top_p,omitempty"`              // Range: (0, 1]
	TopK              *int            `json:"top_k,omitempty"`              // Range: [1, Infinity) Not available for OpenAI models
	FrequencyPenalty  *float64        `json:"frequency_penalty,omitempty"`  // Range: [-2, 2]
	PresencePenalty   *float64        `json:"presence_penalty,omitempty"`   // Range: [-2, 2]
	RepetitionPenalty *float64        `json:"repetition_penalty,omitempty"` // Range: (0, 2]
	Seed              *int            `json:"seed,omitempty"`               // Integer only
	LogitBias         map[int]float64 `json:"logit_bias,omitempty"`         // { token_id: bias }
	TopLogprobs       *int            `json:"top_logprobs,omitempty"`       // Integer only
	MinP              *float64        `json:"min_p,omitempty"`              // Range: [0, 1]
	TopA              *float64        `json:"top_a,omitempty"`              // Range: [0, 1]

	// OpenRouter-only parameters (Optional)
	Transforms []string `json:"transforms,omitempty"` // Prompt transforms
	Models     []string `json:"models,omitempty"`     // Model routing list
	Route      *string  `json:"route,omitempty"`      // Model routing strategy ("fallback")
	// Provider   *ProviderPreferences `json:"provider,omitempty"` // Inlined below
	ProviderOrder             []string `json:"provider_order,omitempty"`              // List of provider names to try in order
	ProviderAllowFallbacks    *bool    `json:"provider_allow_fallbacks,omitempty"`    // Default: true. Allow backup providers
	ProviderRequireParameters *bool    `json:"provider_require_parameters,omitempty"` // Default: false. Only use providers supporting all request parameters
	ProviderDataCollection    *string  `json:"provider_data_collection,omitempty"`    // Default: "allow". Control data storage ("allow" | "deny")
	ProviderIgnore            []string `json:"provider_ignore,omitempty"`             // List of provider names to skip
	ProviderQuantizations     []string `json:"provider_quantizations,omitempty"`      // List of quantization levels to filter by (e.g., ["int4", "int8"])
	ProviderSort              *string  `json:"provider_sort,omitempty"`               // Sort providers by "price" or "throughput"
}

type PromptCompletionChoice

type PromptCompletionChoice struct {
	BaseChoice
	Text string `json:"text"` // The generated text
}

PromptCompletionChoice represents a choice when the input was a simple prompt string.

type PromptCompletionResponse

type PromptCompletionResponse struct {
	OpenRouterBaseResponse
	Choices []PromptCompletionChoice `json:"choices"`
}

PromptCompletionResponse represents a response when the input was a simple prompt string.

type ProviderErrorMetadata

type ProviderErrorMetadata struct {
	ProviderName string      `json:"provider_name"`
	Raw          interface{} `json:"raw"`
}

ProviderErrorMetadata defines the structure for provider-related errors

func IsProviderError

func IsProviderError(err error) (*ProviderErrorMetadata, bool)

IsProviderError checks if an error is a provider error and returns parsed metadata

func (*ProviderErrorMetadata) Error

func (p *ProviderErrorMetadata) Error() string

Error implements the error interface for ProviderErrorMetadata

type ResponseMessage

type ResponseMessage struct {
	Content   *string    `json:"content"` // Message content or null
	Role      string     `json:"role"`    // Usually "assistant"
	ToolCalls []ToolCall `json:"tool_calls,omitempty"`
}

ResponseMessage represents the message content in a chat response

type ResponseUsage

type ResponseUsage struct {
	PromptTokens     int `json:"prompt_tokens"`
	CompletionTokens int `json:"completion_tokens"`
	TotalTokens      int `json:"total_tokens"`
}

ResponseUsage contains token usage information.

type StreamingChatChoice

type StreamingChatChoice struct {
	BaseChoice                    // Finish reasons usually null until the final chunk
	Delta      StreamingChatDelta `json:"delta"`
}

StreamingChatChoice represents a choice chunk in a streaming chat response.

type StreamingChatDelta

type StreamingChatDelta struct {
	Content   *string         `json:"content"`              // Content delta (token chunk) or null
	Role      *string         `json:"role,omitempty"`       // Usually present only in the first chunk
	ToolCalls []ToolCallDelta `json:"tool_calls,omitempty"` // Tool calls delta
}

StreamingChatDelta represents the delta changes in a streaming chat response.

type StreamingChatResponse

type StreamingChatResponse struct {
	OpenRouterBaseResponse
	Choices []StreamingChatChoice `json:"choices"` // Note: Final chunk might have empty Choices and only Usage in Base
}

StreamingChatResponse represents a streaming chat completion response chunk.

type Tool

type Tool struct {
}

Tool defines a tool (currently only "function" type is supported).

type ToolCall

type ToolCall struct {
	ID       string           `json:"id"`   // ID of the tool call
	Type     string           `json:"type"` // Should be "function"
	Function ToolCallFunction `json:"function"`
}

ToolCall represents a single tool call in a message

type ToolCallDelta

type ToolCallDelta struct {
	Index    *int          `json:"index,omitempty"` // Index for incremental tool call updates
	ID       string        `json:"id"`              // ID of the tool call
	Type     string        `json:"type"`            // Should be "function"
	Function FunctionDelta `json:"function"`
}

ToolCallDelta represents a single tool call delta in a streaming response.

type ToolCallFunction

type ToolCallFunction struct {
	Name      string `json:"name"`      // Function name
	Arguments string `json:"arguments"` // JSON string arguments
}

ToolCallFunction represents the function details in a tool call

type ToolChoiceFunction

type ToolChoiceFunction struct {
	Name string `json:"name"`
}

ToolChoiceFunction specifies a function to be called.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL