mistral

package module
v0.0.3 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: May 9, 2025 License: MIT Imports: 9 Imported by: 0

README

Mistral Go Client

This project is a fork from Gage-Technologies/mistral-go

The Mistral Go Client is a comprehensive Golang library designed to interface with the Mistral AI API, providing developers with a robust set of tools to integrate advanced AI-powered features into their applications. This client supports a variety of functionalities, including Chat Completions, Chat Completions Streaming, and Embeddings, allowing for seamless interaction with Mistral's powerful language models.

Features

  • Chat Completions: Generate conversational responses and complete dialogue prompts using Mistral's language models.
  • Chat Completions Streaming: Establish a real-time stream of chat completions, ideal for applications requiring continuous interaction.
  • Embeddings: Obtain numerical vector representations of text, enabling semantic search, clustering, and other machine learning applications.
  • Visoin: Vision capabilities for pixtral models (samples coming soon)
  • OCR: OCR capabilities for mistral ocr models (samples coming soon)

Getting Started

To begin using the Mistral Go Client in your project, ensure you have Go installed on your system. This client library is compatible with Go 1.20 and higher.

Installation

To install the Mistral Go Client, run the following command:

go get github.com/AuxData-ai/mistral-go
Usage

To use the client in your Go application, you need to import the package and initialize a new client instance with your API key.

package main

import (
	"log"

	"github.com/AuxData-ai/mistral-go"
)

func main() {
	// If api key is empty it will load from MISTRAL_API_KEY env var
	client := mistral.NewMistralClientDefault("your-api-key")

	// Example: Using Chat Completions
	chatRes, err := client.Chat("mistral-tiny", []mistral.ChatMessage{{Content: "Hello, world!", Role: mistral.RoleUser}}, nil)
	if err != nil {
		log.Fatalf("Error getting chat completion: %v", err)
	}
	log.Printf("Chat completion: %+v\n", chatRes)

	// Example: Using Chat Completions Stream
	chatResChan, err := client.ChatStream("mistral-tiny", []mistral.ChatMessage{{Content: "Hello, world!", Role: mistral.RoleUser}}, nil)
	if err != nil {
		log.Fatalf("Error getting chat completion stream: %v", err)
	}

	for chatResChunk := range chatResChan {
		if chatResChunk.Error != nil {
			log.Fatalf("Error while streaming response: %v", chatResChunk.Error)
		}
		log.Printf("Chat completion stream part: %+v\n", chatResChunk)
	}

	// Example: Using Embeddings
	embsRes, err := client.Embeddings("mistral-embed", []string{"Embed this sentence.", "As well as this one."})
	if err != nil {
		log.Fatalf("Error getting embeddings: %v", err)
	}

	log.Printf("Embeddings response: %+v\n", embsRes)
}

Documentation

For detailed documentation on the Mistral AI API and the available endpoints, please refer to the Mistral AI API Documentation.

Contributing

Contributions are welcome! If you would like to contribute to the project, please fork the repository and submit a pull request with your changes.

License

The Mistral Go Client is open-sourced software licensed under the MIT license.

Support

If you encounter any issues or require assistance, please file an issue on the GitHub repository issue tracker.

Documentation

Index

Constants

View Source
const (
	Endpoint          = "https://api.mistral.ai"
	CodestralEndpoint = "https://codestral.mistral.ai"
	DefaultMaxRetries = 5
	DefaultTimeout    = 120 * time.Second
)
View Source
const (
	ModelMistralLargeLatest  = "mistral-large-latest"
	ModelMistralMediumLatest = "mistral-medium-latest"
	ModelMistralSmallLatest  = "mistral-small-latest"
	ModelCodestralLatest     = "codestral-latest"

	ModelOpenMixtral8x7b  = "open-mixtral-8x7b"
	ModelOpenMixtral8x22b = "open-mixtral-8x22b"
	ModelOpenMistral7b    = "open-mistral-7b"

	ModelMistralLarge2402  = "mistral-large-2402"
	ModelMistralMedium2312 = "mistral-medium-2312"
	ModelMistralSmall2402  = "mistral-small-2402"
	ModelMistralSmall2312  = "mistral-small-2312"
	ModelMistralTiny       = "mistral-tiny-2312"
)
View Source
const (
	RoleUser      = "user"
	RoleAssistant = "assistant"
	RoleSystem    = "system"
	RoleTool      = "tool"
)
View Source
const (
	ToolChoiceAny  = "any"
	ToolChoiceAuto = "auto"
	ToolChoiceNone = "none"
)

Variables

View Source
var DefaultChatRequestParams = ChatRequestParams{
	Temperature: 1,
	TopP:        1,
	RandomSeed:  42069,
	MaxTokens:   4000,
	SafePrompt:  false,
}

Functions

This section is empty.

Types

type ChatCompletionResponse

type ChatCompletionResponse struct {
	ID      string                         `json:"id"`
	Object  string                         `json:"object"`
	Created int                            `json:"created"`
	Model   string                         `json:"model"`
	Choices []ChatCompletionResponseChoice `json:"choices"`
	Usage   UsageInfo                      `json:"usage"`
}

ChatCompletionResponse represents the response from the chat completion endpoint.

type ChatCompletionResponseChoice

type ChatCompletionResponseChoice struct {
	Index        int          `json:"index"`
	Message      ChatMessage  `json:"message"`
	FinishReason FinishReason `json:"finish_reason,omitempty"`
}

ChatCompletionResponseChoice represents a choice in the chat completion response.

type ChatCompletionResponseChoiceStream

type ChatCompletionResponseChoiceStream struct {
	Index        int          `json:"index"`
	Delta        DeltaMessage `json:"delta"`
	FinishReason FinishReason `json:"finish_reason,omitempty"`
}

ChatCompletionResponseChoice represents a choice in the chat completion response.

type ChatCompletionStreamResponse

type ChatCompletionStreamResponse struct {
	ID      string                               `json:"id"`
	Model   string                               `json:"model"`
	Choices []ChatCompletionResponseChoiceStream `json:"choices"`
	Created int                                  `json:"created,omitempty"`
	Object  string                               `json:"object,omitempty"`
	Usage   UsageInfo                            `json:"usage,omitempty"`
	Error   error                                `json:"error,omitempty"`
}

ChatCompletionStreamResponse represents the streamed response from the chat completion endpoint.

type ChatMessage

type ChatMessage struct {
	Role      string     `json:"role"`
	Content   string     `json:"content"`
	ToolCalls []ToolCall `json:"tool_calls,omitempty"`
}

ChatMessage represents a single message in a chat.

type ChatRequestParams

type ChatRequestParams struct {
	Temperature    float64        `json:"temperature"` // The temperature to use for sampling. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or TopP but not both.
	TopP           float64        `json:"top_p"`       // An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or Temperature but not both.
	RandomSeed     int            `json:"random_seed"`
	MaxTokens      int            `json:"max_tokens"`
	SafePrompt     bool           `json:"safe_prompt"` // Adds a Mistral defined safety message to the system prompt to enforce guardrailing
	Tools          []Tool         `json:"tools"`
	ToolChoice     string         `json:"tool_choice"`
	ResponseFormat ResponseFormat `json:"response_format"`
}

ChatRequestParams represents the parameters for the Chat/ChatStream method of MistralClient.

type Content

type Content any

type DeltaMessage

type DeltaMessage struct {
	Role      string     `json:"role"`
	Content   string     `json:"content"`
	ToolCalls []ToolCall `json:"tool_calls"`
}

DeltaMessage represents the delta between the prior state of the message and the new state of the message when streaming responses.

type Dimensions

type Dimensions struct {
	DPI    int `json:"dpi"`
	Height int `json:"height"`
	Width  int `json:"width"`
}

type Document

type Document struct {
	Type        string `json:"type"`
	ImageUrl    string `json:"image_url"`
	DocumentUrl string `json:"document_url"`
}

type EmbeddingObject

type EmbeddingObject struct {
	Object    string    `json:"object"`
	Embedding []float64 `json:"embedding"`
	Index     int       `json:"index"`
}

EmbeddingObject represents an embedding object in the response.

type EmbeddingResponse

type EmbeddingResponse struct {
	ID     string            `json:"id"`
	Object string            `json:"object"`
	Data   []EmbeddingObject `json:"data"`
	Model  string            `json:"model"`
	Usage  UsageInfo         `json:"usage"`
}

EmbeddingResponse represents the response from the embeddings endpoint.

type FIMCompletionResponse

type FIMCompletionResponse struct {
	ID      string                        `json:"id"`
	Object  string                        `json:"object"`
	Created int                           `json:"created"`
	Model   string                        `json:"model"`
	Choices []FIMCompletionResponseChoice `json:"choices"`
	Usage   UsageInfo                     `json:"usage"`
}

FIMCompletionResponse represents the response from the FIM completion endpoint.

type FIMCompletionResponseChoice

type FIMCompletionResponseChoice struct {
	Index        int          `json:"index"`
	Message      ChatMessage  `json:"message"`
	FinishReason FinishReason `json:"finish_reason,omitempty"`
}

FIMCompletionResponseChoice represents a choice in the FIM completion response.

type FIMRequestParams

type FIMRequestParams struct {
	Model       string   `json:"model"`
	Prompt      string   `json:"prompt"`
	Suffix      string   `json:"suffix"`
	MaxTokens   int      `json:"max_tokens"`
	Temperature float64  `json:"temperature"`
	Stop        []string `json:"stop,omitempty"`
}

FIMRequestParams represents the parameters for the FIM method of MistralClient.

type FinishReason

type FinishReason string

FinishReason the reason that a chat message was finished

const (
	FinishReasonStop   FinishReason = "stop"
	FinishReasonLength FinishReason = "length"
	FinishReasonError  FinishReason = "error"
)

type Function

type Function struct {
	Name        string `json:"name"`
	Description string `json:"description"`
	Parameters  any    `json:"parameters"`
}

Function definition of a function that the llm can call including its parameters

type FunctionCall

type FunctionCall struct {
	Name      string `json:"name"`
	Arguments string `json:"arguments"`
}

FunctionCall represents a request to call an external tool by the llm

type MistralAPIError

type MistralAPIError struct {
	MistralError
	HTTPStatus int
	Headers    map[string][]string
}

MistralAPIError is returned when the API responds with an error message.

func NewMistralAPIError

func NewMistralAPIError(message string, httpStatus int, headers map[string][]string) *MistralAPIError

func (*MistralAPIError) Error

func (e *MistralAPIError) Error() string

type MistralClient

type MistralClient struct {
	// contains filtered or unexported fields
}

func NewCodestralClientDefault

func NewCodestralClientDefault(apiKey string) *MistralClient

NewCodestralClientDefault creates a new Codestral API client with the default endpoint and the given API key. Defaults to using CODESTRAL_API_KEY from the environment.

func NewMistralClient

func NewMistralClient(apiKey string, endpoint string, maxRetries int, timeout time.Duration) *MistralClient

func NewMistralClientDefault

func NewMistralClientDefault(apiKey string) *MistralClient

NewMistralClientDefault creates a new Mistral API client with the default endpoint and the given API key. Defaults to using MISTRAL_API_KEY from the environment.

func (*MistralClient) Chat

func (c *MistralClient) Chat(model string, messages []ChatMessage, params *ChatRequestParams) (*ChatCompletionResponse, error)

func (*MistralClient) ChatStream

func (c *MistralClient) ChatStream(model string, messages []ChatMessage, params *ChatRequestParams) (<-chan ChatCompletionStreamResponse, error)

ChatStream sends a chat message and returns a channel to receive streaming responses.

func (*MistralClient) Embeddings

func (c *MistralClient) Embeddings(model string, input []string) (*EmbeddingResponse, error)

func (*MistralClient) FIM

FIM sends a FIM request and returns the completion response.

func (*MistralClient) ListModels

func (c *MistralClient) ListModels() (*ModelList, error)

func (*MistralClient) OCR

func (c *MistralClient) OCR(model string, document Document, params *OcrParams) (*OcrDocument, error)

func (*MistralClient) Vision

func (c *MistralClient) Vision(model string, messages []VisionMessage, params *ChatRequestParams) (*ChatCompletionResponse, error)

type MistralConnectionError

type MistralConnectionError struct {
	MistralError
}

MistralConnectionError is returned when the SDK cannot reach the API server for any reason.

func NewMistralConnectionError

func NewMistralConnectionError(message string) *MistralConnectionError

type MistralError

type MistralError struct {
	Message string
}

MistralError is the base error type for all Mistral errors.

func (*MistralError) Error

func (e *MistralError) Error() string

type ModelCard

type ModelCard struct {
	ID         string            `json:"id"`
	Object     string            `json:"object"`
	Created    int               `json:"created"`
	OwnedBy    string            `json:"owned_by"`
	Root       string            `json:"root,omitempty"`
	Parent     string            `json:"parent,omitempty"`
	Permission []ModelPermission `json:"permission"`
}

ModelCard represents a model card.

type ModelList

type ModelList struct {
	Object string      `json:"object"`
	Data   []ModelCard `json:"data"`
}

ModelList represents a list of models.

type ModelPermission

type ModelPermission struct {
	ID                 string `json:"id"`
	Object             string `json:"object"`
	Created            int    `json:"created"`
	AllowCreateEngine  bool   `json:"allow_create_engine"`
	AllowSampling      bool   `json:"allow_sampling"`
	AllowLogprobs      bool   `json:"allow_logprobs"`
	AllowSearchIndices bool   `json:"allow_search_indices"`
	AllowView          bool   `json:"allow_view"`
	AllowFineTuning    bool   `json:"allow_fine_tuning"`
	Organization       string `json:"organization"`
	Group              string `json:"group,omitempty"`
	IsBlocking         bool   `json:"is_blocking"`
}

ModelPermission represents the permissions of a model.

type OcrDocument

type OcrDocument struct {
	Pages     []Page       `json:"pages"`
	Model     string       `json:"model"`
	UsageInfo OcrUsageInfo `json:"usage_info"`
}

type OcrParams

type OcrParams struct {
	IncludeImageBase64 bool `json:"include_image_base64"`
}

type OcrUsageInfo

type OcrUsageInfo struct {
	PagesProcessed int `json:"pages_processed"`
	DocSizeBytes   int `json:"doc_size_bytes"`
}

type Page

type Page struct {
	Index      int        `json:"index"`
	Markdown   string     `json:"markdown"`
	Images     []string   `json:"images"`
	Dimensions Dimensions `json:"dimensions"`
}

type ResponseFormat

type ResponseFormat string

ResponseFormat the format that the response must adhere to

const (
	ResponseFormatText       ResponseFormat = "text"
	ResponseFormatJsonObject ResponseFormat = "json_object"
)

type TextContent

type TextContent struct {
	Type string `json:"type"`
	Text string `json:"text"`
}

type Tool

type Tool struct {
	Type     ToolType `json:"type"`
	Function Function `json:"function"`
}

Tool definition of a tool that the llm can call

type ToolCall

type ToolCall struct {
	Id       string       `json:"id"`
	Type     ToolType     `json:"type"`
	Function FunctionCall `json:"function"`
}

ToolCall represents the call to a tool by the llm

type ToolType

type ToolType string

ToolType type of tool defined for the llm

const (
	ToolTypeFunction ToolType = "function"
)

type UsageInfo

type UsageInfo struct {
	PromptTokens     int `json:"prompt_tokens"`
	TotalTokens      int `json:"total_tokens"`
	CompletionTokens int `json:"completion_tokens,omitempty"`
}

UsageInfo represents the usage information of a response.

type VisionContent

type VisionContent struct {
	Type     string `json:"type"`
	ImageUrl string `json:"image_url"`
}

type VisionMessage

type VisionMessage struct {
	Role      string     `json:"role"`
	Content   []Content  `json:"content"`
	ToolCalls []ToolCall `json:"tool_calls,omitempty"`
}

Directories

Path Synopsis

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL