Documentation
¶
Index ¶
- func IsAuthError(err error) bool
- func IsNotFoundError(err error) bool
- func IsRateLimitError(err error) bool
- type ChatCompletionStream
- type ChatService
- type Client
- type EmbeddingService
- type Error
- type GeminiService
- func (s *GeminiService) GenerateContent(ctx context.Context, model string, contents []*genai.Content, ...) (*genai.GenerateContentResponse, error)
- func (s *GeminiService) GenerateContentStream(ctx context.Context, model string, contents []*genai.Content, ...) iter.Seq2[*genai.GenerateContentResponse, error]
- func (s *GeminiService) GenerateImages(ctx context.Context, model string, prompt string, ...) (*genai.GenerateImagesResponse, error)
- func (s *GeminiService) GenerateVideos(ctx context.Context, model string, prompt string, image *genai.Image, ...) (*genai.GenerateVideosOperation, error)
- func (s *GeminiService) GetVideosOperation(ctx context.Context, op *genai.GenerateVideosOperation) (*genai.GenerateVideosOperation, error)
- type MessageService
- type MessageStream
- type Model
- type ModelList
- type ModelService
- type Option
- type Pricing
- type PricingConditions
- type Provider
- type ResponseService
- type ResponseStream
- type TokenRange
Examples ¶
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
func IsAuthError ¶
func IsNotFoundError ¶
func IsRateLimitError ¶
Types ¶
type ChatCompletionStream ¶
type ChatCompletionStream struct {
// contains filtered or unexported fields
}
ChatCompletionStream wraps a streaming chat completion response.
func (*ChatCompletionStream) Close ¶
func (s *ChatCompletionStream) Close() error
Close terminates the underlying stream.
func (*ChatCompletionStream) Current ¶
func (s *ChatCompletionStream) Current() openai.ChatCompletionChunk
Current returns the most recently decoded chunk.
func (*ChatCompletionStream) Err ¶
func (s *ChatCompletionStream) Err() error
Err returns the first error encountered during streaming, wrapped as a zenmux Error when applicable.
func (*ChatCompletionStream) Next ¶
func (s *ChatCompletionStream) Next() bool
Next advances to the next chunk in the stream. Returns false when the stream is exhausted or an error has occurred.
type ChatService ¶
type ChatService struct {
// contains filtered or unexported fields
}
ChatService provides access to the chat completions API.
func (*ChatService) Create ¶
func (s *ChatService) Create(ctx context.Context, params openai.ChatCompletionNewParams) (*openai.ChatCompletion, error)
Create sends a chat completion request and returns the result.
func (*ChatService) CreateStream ¶
func (s *ChatService) CreateStream(ctx context.Context, params openai.ChatCompletionNewParams) *ChatCompletionStream
CreateStream initiates a streaming chat completion request.
type Client ¶
type Client struct {
// Chat provides access to chat completion endpoints.
Chat *ChatService
// Responses provides access to the Responses API endpoints.
Responses *ResponseService
// Embeddings provides access to the embeddings API endpoints.
Embeddings *EmbeddingService
// Messages provides access to the Anthropic Messages API endpoints.
Messages *MessageService
// Gemini provides access to the Google Gemini API endpoints.
Gemini *GeminiService
// Models provides unified model listing across all providers.
Models *ModelService
// Platform provides access to the ZenMux Platform management API.
Platform *platform.Client
// contains filtered or unexported fields
}
Client is the top-level ZenMux SDK client. It exposes service objects that map to provider APIs.
func NewClient ¶
NewClient creates a new ZenMux client configured with the given API key and optional settings.
Example ¶
package main
import (
"fmt"
zenmux "github.com/0xCyberFred/zenmux-sdk-go"
)
func main() {
client := zenmux.NewClient("sk-your-zenmux-key",
zenmux.WithManagementKey("sk-mgmt-your-key"),
)
_ = client.Chat // OpenAI Chat Completions
_ = client.Responses // OpenAI Responses
_ = client.Embeddings // OpenAI Embeddings
_ = client.Messages // Anthropic Messages
_ = client.Gemini // Google Gemini
_ = client.Models // Unified model listing
_ = client.Platform // Platform management API
_ = client.OpenAI() // *openai.Client escape hatch
_ = client.Anthropic() // *anthropic.Client escape hatch
_ = client.Google() // *genai.Client escape hatch
fmt.Println("client created")
}
Output: client created
func (*Client) Anthropic ¶
Anthropic returns the underlying anthropic-sdk-go client for direct access to provider-specific functionality.
type EmbeddingService ¶
type EmbeddingService struct {
// contains filtered or unexported fields
}
EmbeddingService provides access to the embeddings API.
func (*EmbeddingService) Create ¶
func (s *EmbeddingService) Create(ctx context.Context, params openai.EmbeddingNewParams) (*openai.CreateEmbeddingResponse, error)
Create sends an embedding request and returns the result.
type GeminiService ¶
type GeminiService struct {
// contains filtered or unexported fields
}
GeminiService provides access to the Google Gemini API.
func (*GeminiService) GenerateContent ¶
func (s *GeminiService) GenerateContent(ctx context.Context, model string, contents []*genai.Content, config *genai.GenerateContentConfig) (*genai.GenerateContentResponse, error)
GenerateContent sends a content generation request and returns the result.
func (*GeminiService) GenerateContentStream ¶
func (s *GeminiService) GenerateContentStream(ctx context.Context, model string, contents []*genai.Content, config *genai.GenerateContentConfig) iter.Seq2[*genai.GenerateContentResponse, error]
GenerateContentStream initiates a streaming content generation request. It returns an iterator that yields response chunks.
func (*GeminiService) GenerateImages ¶
func (s *GeminiService) GenerateImages(ctx context.Context, model string, prompt string, config *genai.GenerateImagesConfig) (*genai.GenerateImagesResponse, error)
GenerateImages sends an image generation request and returns the result.
func (*GeminiService) GenerateVideos ¶
func (s *GeminiService) GenerateVideos(ctx context.Context, model string, prompt string, image *genai.Image, config *genai.GenerateVideosConfig) (*genai.GenerateVideosOperation, error)
GenerateVideos creates a long-running video generation operation.
func (*GeminiService) GetVideosOperation ¶
func (s *GeminiService) GetVideosOperation(ctx context.Context, op *genai.GenerateVideosOperation) (*genai.GenerateVideosOperation, error)
GetVideosOperation polls a long-running video generation operation.
The genai library's Operations.GetVideosOperation does not work with ZenMux because ZenMux's /api/vertex-ai endpoint speaks the Vertex AI protocol (POST <resource>:fetchPredictOperation) but expects API version v1beta rather than the Vertex default v1beta1. This method issues the correct request directly.
type MessageService ¶
type MessageService struct {
// contains filtered or unexported fields
}
MessageService provides access to the Anthropic Messages API.
func (*MessageService) Create ¶
func (s *MessageService) Create(ctx context.Context, params anthropic.MessageNewParams) (*anthropic.Message, error)
Create sends a message request and returns the result.
func (*MessageService) CreateStream ¶
func (s *MessageService) CreateStream(ctx context.Context, params anthropic.MessageNewParams) *MessageStream
CreateStream initiates a streaming message request.
type MessageStream ¶
type MessageStream struct {
// contains filtered or unexported fields
}
MessageStream wraps a streaming message response from the Anthropic API.
func (*MessageStream) Close ¶
func (s *MessageStream) Close() error
Close terminates the underlying stream.
func (*MessageStream) Current ¶
func (s *MessageStream) Current() anthropic.MessageStreamEventUnion
Current returns the most recently decoded stream event.
func (*MessageStream) Err ¶
func (s *MessageStream) Err() error
Err returns the first error encountered during streaming, wrapped as a zenmux Error when applicable.
func (*MessageStream) Next ¶
func (s *MessageStream) Next() bool
Next advances to the next event in the stream. Returns false when the stream is exhausted or an error has occurred.
type Model ¶
type Model struct {
ID string
DisplayName string
Provider Provider
InputModalities []string
OutputModalities []string
ContextLength int
Reasoning bool
Pricings map[string][]Pricing
}
Model is a unified representation of a model from any supported provider.
type ModelList ¶
type ModelList struct {
Models []Model
}
ModelList holds a collection of normalized models returned from a provider.
type ModelService ¶
type ModelService struct {
// contains filtered or unexported fields
}
ModelService provides access to model listing across providers.
type Option ¶
type Option func(*config)
func WithBaseURL ¶
func WithHTTPClient ¶
func WithManagementKey ¶
func WithMaxRetries ¶
func WithTimeout ¶
type Pricing ¶
type Pricing struct {
Value float64
Unit string
Currency string
Conditions *PricingConditions
}
Pricing describes a single pricing entry for a model.
type PricingConditions ¶
type PricingConditions struct {
PromptTokens *TokenRange `json:"prompt_tokens,omitempty"`
CompletionTokens *TokenRange `json:"completion_tokens,omitempty"`
}
PricingConditions specifies token-range conditions under which a pricing entry applies.
type ResponseService ¶
type ResponseService struct {
// contains filtered or unexported fields
}
ResponseService provides access to the Responses API.
func (*ResponseService) Create ¶
func (s *ResponseService) Create(ctx context.Context, params responses.ResponseNewParams) (*responses.Response, error)
Create sends a response request and returns the result.
func (*ResponseService) CreateStream ¶
func (s *ResponseService) CreateStream(ctx context.Context, params responses.ResponseNewParams) *ResponseStream
CreateStream initiates a streaming response request.
type ResponseStream ¶
type ResponseStream struct {
// contains filtered or unexported fields
}
ResponseStream wraps a streaming response from the Responses API.
func (*ResponseStream) Close ¶
func (s *ResponseStream) Close() error
Close terminates the underlying stream.
func (*ResponseStream) Current ¶
func (s *ResponseStream) Current() responses.ResponseStreamEventUnion
Current returns the most recently decoded stream event.
func (*ResponseStream) Err ¶
func (s *ResponseStream) Err() error
Err returns the first error encountered during streaming, wrapped as a zenmux Error when applicable.
func (*ResponseStream) Next ¶
func (s *ResponseStream) Next() bool
Next advances to the next event in the stream. Returns false when the stream is exhausted or an error has occurred.
Source Files
¶
Directories
¶
| Path | Synopsis |
|---|---|
|
examples
|
|
|
chat
command
|
|
|
chat-stream
command
|
|
|
embeddings
command
|
|
|
gemini
command
|
|
|
gemini-image
command
Generate images using OpenAI GPT-Image-2 via ZenMux's Vertex AI protocol.
|
Generate images using OpenAI GPT-Image-2 via ZenMux's Vertex AI protocol. |
|
gemini-stream
command
|
|
|
gemini-video
command
Generate a video via ZenMux's Vertex AI protocol.
|
Generate a video via ZenMux's Vertex AI protocol. |
|
messages
command
|
|
|
messages-stream
command
|
|
|
models
command
|
|
|
platform
command
|
|
|
responses
command
|
|
|
internal
|
|
|
provider
|
|