chatgpt_commons

package
v0.3.71 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: May 17, 2025 License: BSD-3-Clause Imports: 4 Imported by: 0

Documentation

Index

Constants

View Source
const (
	ChatModelGPT4           = "gpt-4"
	ChatModelGPT4TurboPrev  = "gpt-4-turbo-preview"
	ChatModelGPT4VisionPrev = "gpt-4-vision-preview"
	ChatModelGPT432k        = "gpt-4-32k"
	ChatModelGPT35Turbo     = "gpt-3.5-turbo"
	ChatModelGPT35Turbo16k  = "gpt-3.5-turbo-16k"
)
View Source
const ApiUrl = "https://api.openai.com/v1/chat/completions"

Variables

This section is empty.

Functions

This section is empty.

Types

type ChatGptOptions

type ChatGptOptions struct {
	DriverOptions *llm_commons.LLMDriverOptions `json:"-"`
}

func NewChatGptOptions

func NewChatGptOptions() (instance *ChatGptOptions)

func (*ChatGptOptions) Map

func (instance *ChatGptOptions) Map() (response map[string]interface{})

func (*ChatGptOptions) String

func (instance *ChatGptOptions) String() (response string)

type ChatResponseCompletion

type ChatResponseCompletion struct {
	Id                string                          `json:"id"`                 // A unique identifier for the chat completion.
	Model             string                          `json:"model"`              // The model used for the chat completion.
	Object            string                          `json:"object"`             // The object type, which is always chat.completion.
	Created           int                             `json:"created"`            // The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp.
	SystemFingerprint string                          `json:"system_fingerprint"` // This fingerprint represents the backend configuration that the model runs with.  Can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.
	Choices           []*ChatResponseCompletionChoice `json:"choices"`            // A list of chat completion choices. Can be more than one if n is greater than 1.
	Usage             *ChatResponseCompletionUsage    `json:"usage"`              // Usage statistics for the completion request.

	*OpenAIError // only if response has error
}

func NewChatResponseCompletion

func NewChatResponseCompletion() (instance *ChatResponseCompletion)

func ParseChatResponseCompletion

func ParseChatResponseCompletion(m map[string]interface{}) (instance *ChatResponseCompletion, err error)

func (*ChatResponseCompletion) Error

func (instance *ChatResponseCompletion) Error() string

func (*ChatResponseCompletion) GetMessageContent

func (instance *ChatResponseCompletion) GetMessageContent() string

func (*ChatResponseCompletion) GetMessageContentRows

func (instance *ChatResponseCompletion) GetMessageContentRows(removeBullet bool) (response []string)

func (*ChatResponseCompletion) GetMessageRole

func (instance *ChatResponseCompletion) GetMessageRole() string

func (*ChatResponseCompletion) GetUsageCompletionTokens

func (instance *ChatResponseCompletion) GetUsageCompletionTokens() int

func (*ChatResponseCompletion) GetUsagePromptTokens

func (instance *ChatResponseCompletion) GetUsagePromptTokens() int

func (*ChatResponseCompletion) GetUsageTotalTokens

func (instance *ChatResponseCompletion) GetUsageTotalTokens() int

func (*ChatResponseCompletion) HasError

func (instance *ChatResponseCompletion) HasError() bool

func (*ChatResponseCompletion) Map

func (instance *ChatResponseCompletion) Map() map[string]interface{}

func (*ChatResponseCompletion) String

func (instance *ChatResponseCompletion) String() string

type ChatResponseCompletionChoice

type ChatResponseCompletionChoice struct {
	Index        int                                    `json:"index"`         // The index of the choice in the list of choices
	Message      *ChatResponseCompletionChoiceMessage   `json:"message"`       // A chat completion message generated by the model.
	FinishReason string                                 `json:"finish_reason"` // The reason the model stopped generating tokens. This will be stop if the model hit a natural stop point or a provided stop sequence, length if the maximum number of tokens specified in the request was reached, content_filter if content was omitted due to a flag from our content filters, tool_calls if the model called a tool, or function_call (deprecated) if the model called a function.
	LogProbs     []*ChatResponseCompletionChoiceLogProb `json:"logprobs"`      // Log probability information for the choice.
}

type ChatResponseCompletionChoiceLogProb

type ChatResponseCompletionChoiceLogProb struct {
	ChatResponseCompletionChoiceLogProbBase
	TopLogProbs []*ChatResponseCompletionChoiceLogProbBase `json:"top_logprobs"` // List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested top_logprobs returned.
}

type ChatResponseCompletionChoiceLogProbBase

type ChatResponseCompletionChoiceLogProbBase struct {
	Token   string  `json:"token"`   // The token
	Bytes   []byte  `json:"bytes"`   // A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be null if there is no bytes representation for the token.
	LogProb float32 `json:"logprob"` // The log probability of this token.
}

type ChatResponseCompletionChoiceMessage

type ChatResponseCompletionChoiceMessage struct {
	Role    string `json:"role"` // The index of the choice in the list of choices
	Content string `json:"content"`
}

type ChatResponseCompletionUsage

type ChatResponseCompletionUsage struct {
	CompletionTokens int `json:"completion_tokens"` // Number of tokens in the generated completion.
	PromptTokens     int `json:"prompt_tokens"`     // Number of tokens in the prompt.
	TotalTokens      int `json:"total_tokens"`      // Total number of tokens
}

func (*ChatResponseCompletionUsage) Map

func (instance *ChatResponseCompletionUsage) Map() (m map[string]interface{})

func (*ChatResponseCompletionUsage) String

func (instance *ChatResponseCompletionUsage) String() string

type OpenAIError

type OpenAIError struct {
	Error interface{} `json:"error,omitempty"` // only if response has error
}

func (*OpenAIError) ErrorMessage

func (instance *OpenAIError) ErrorMessage() string

func (*OpenAIError) ErrorObject

func (instance *OpenAIError) ErrorObject() error

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL