Documentation
¶
Index ¶
- func ToLLMSResponse(resp *Response) *llm.GenerateResponse
- type Choice
- type Client
- type ClientOption
- type ContentItem
- type DeltaMessage
- type FunctionCall
- type FunctionCallDelta
- type ImageURL
- type Message
- type Request
- type Response
- type StreamChoice
- type StreamOptions
- type StreamResponse
- type Tool
- type ToolCall
- type ToolCallDelta
- type ToolDefinition
- type Usage
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
func ToLLMSResponse ¶
func ToLLMSResponse(resp *Response) *llm.GenerateResponse
ToLLMSResponse converts a Response to an llm.ChatResponse
Types ¶
type Choice ¶
type Choice struct {
Index int `json:"index"`
Message Message `json:"message"`
FinishReason string `json:"finish_reason"`
}
Choice represents a choice in the OpenAI API response
type Client ¶
Client represents an InceptionLabs API client
func NewClient ¶
func NewClient(apiKey, model string, options ...ClientOption) *Client
NewClient creates a new InceptionLabs client with the given API key and model
func (*Client) Generate ¶
func (c *Client) Generate(ctx context.Context, request *llm.GenerateRequest) (*llm.GenerateResponse, error)
Generate sends a chat request to the InceptionLabs API and returns the response
func (*Client) Implements ¶
type ClientOption ¶
type ClientOption func(*Client)
func WithBaseURL ¶
func WithBaseURL(baseURL string) ClientOption
func WithHTTPClient ¶
func WithHTTPClient(httpClient *http.Client) ClientOption
func WithModel ¶
func WithModel(model string) ClientOption
func WithUsageListener ¶
func WithUsageListener(l basecfg.UsageListener) ClientOption
WithUsageListener registers a callback to receive token usage information.
type ContentItem ¶
type ContentItem struct {
Type string `json:"type"`
Text string `json:"text,omitempty"`
ImageURL *ImageURL `json:"image_url,omitempty"`
}
ContentItem represents a single content item in a message for the OpenAI API
type DeltaMessage ¶
type DeltaMessage struct {
Role string `json:"role,omitempty"`
Content *string `json:"content,omitempty"`
ToolCalls []ToolCallDelta `json:"tool_calls,omitempty"`
}
type FunctionCall ¶
FunctionCall represents a function call in the OpenAI API
type FunctionCallDelta ¶
type Message ¶
type Message struct {
Role string `json:"role"`
Content interface{} `json:"content,omitempty"` // Can be string or []ContentItem
Name string `json:"name,omitempty"`
FunctionCall *FunctionCall `json:"function_call,omitempty"`
ToolCalls []ToolCall `json:"tool_calls,omitempty"`
ToolCallId string `json:"tool_call_id,omitempty"`
}
Message represents a message in the OpenAI API request
type Request ¶
type Request struct {
Tools []Tool `json:"tools,omitempty"`
Model string `json:"model"`
Messages []Message `json:"messages"`
Temperature *float64 `json:"temperature,omitempty"`
MaxTokens int `json:"max_completion_tokens,omitempty"`
TopP float64 `json:"top_p,omitempty"`
N int `json:"n,omitempty"`
Stream bool `json:"stream,omitempty"`
StreamOptions *StreamOptions `json:"stream_options,omitempty"`
// Reasoning enables configuration of internal chain-of-thought reasoning features.
Reasoning *llm.Reasoning `json:"reasoning,omitempty"`
ToolChoice interface{} `json:"tool_choice,omitempty"`
ParallelToolCalls bool `json:"parallel_tool_calls,omitempty"`
}
Request represents the request structure for OpenAI API
type Response ¶
type Response struct {
ID string `json:"id"`
Object string `json:"object"`
Created int64 `json:"created"`
Model string `json:"model"`
Choices []Choice `json:"choices"`
Usage Usage `json:"usage"`
}
Response represents the response structure from OpenAI API
type StreamChoice ¶
type StreamChoice struct {
Index int `json:"index"`
Delta DeltaMessage `json:"delta"`
FinishReason *string `json:"finish_reason"`
}
type StreamOptions ¶
type StreamOptions struct {
IncludeUsage bool `json:"include_usage,omitempty"`
}
StreamOptions controls additional streaming behavior.
type StreamResponse ¶
type StreamResponse struct {
ID string `json:"id"`
Object string `json:"object"`
Created int64 `json:"created"`
Model string `json:"model"`
Choices []StreamChoice `json:"choices"`
}
StreamResponse represents a single Server-Sent Event chunk from OpenAI chat/completions endpoint when stream=true. The payload places partial deltas under choices[i].delta instead of choices[i].message.
type Tool ¶
type Tool struct {
Type string `json:"type"`
Function ToolDefinition `json:"function"`
}
Tool represents a tool in the OpenAI API
type ToolCall ¶
type ToolCall struct {
ID string `json:"id"`
Type string `json:"type"`
Function FunctionCall `json:"function"`
}
ToolCall represents a tool call in the OpenAI API
type ToolCallDelta ¶
type ToolCallDelta struct {
Index int `json:"index"`
ID string `json:"id,omitempty"`
Type string `json:"type,omitempty"`
Function FunctionCallDelta `json:"function,omitempty"`
}
ToolCallDelta mirrors the incremental tool call fields included in streaming deltas. Arguments are delivered as a concatenated string across multiple events.
type ToolDefinition ¶
type ToolDefinition struct {
Name string `json:"name"`
Description string `json:"description,omitempty"`
Parameters map[string]interface{} `json:"parameters,omitempty"`
Required []string `json:"required,omitempty"`
}
ToolDefinition represents a tool definition in the OpenAI API
type Usage ¶
type Usage struct {
PromptTokens int `json:"prompt_tokens"`
CompletionTokens int `json:"completion_tokens"`
TotalTokens int `json:"total_tokens"`
PromptTokensDetails struct {
CachedTokens int `json:"cached_tokens"`
AudioTokens int `json:"audio_tokens"`
} `json:"prompt_tokens_details"`
CompletionTokensDetails struct {
ReasoningTokens int `json:"reasoning_tokens"`
AudioTokens int `json:"audio_tokens"`
AcceptedPredictionTokens int `json:"accepted_prediction_tokens"`
RejectedPredictionTokens int `json:"rejected_prediction_tokens"`
} `json:"completion_tokens_details"`
}
Usage represents token usage information in the OpenAI API response