entity

package
v0.7.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Apr 27, 2023 License: Apache-2.0 Imports: 3 Imported by: 0

Documentation

Index

Constants

View Source
const (
	SYSTEM = iota
	USER
	ASSISTANT
)

Variables

This section is empty.

Functions

This section is empty.

Types

type AudioFormat

type AudioFormat uint8
const (
	AudioJSONFormat AudioFormat = iota
	AudioSRTFormat
	AudioVTTFormat
	AudioTextFormat
	AudioVerboseJSONFormat
)

func (AudioFormat) MarshalJSON

func (a AudioFormat) MarshalJSON() ([]byte, error)

func (AudioFormat) String

func (a AudioFormat) String() string

func (AudioFormat) UnmarshalJSON

func (a AudioFormat) UnmarshalJSON(data []byte) error

type AudioRequest

type AudioRequest struct {
	Model models.Audio `json:"model" validate:"required"`
	// File The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm
	File *os.File `json:"file" validate:"required"`
	// Prompt An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language
	Prompt string `json:"prompt,omitempty"`
	// Temperature The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values
	//like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase
	//the temperature until certain thresholds are hit
	Temperature float32 `json:"temperature,omitempty"`
	// Language The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency, learn more:
	// https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
	Language string `json:"language,omitempty"`
	// ResponseFormat The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt
	ResponseFormat AudioFormat `json:"responseFormat,omitempty"`
}

type AudioResponse

type AudioResponse struct {
	Text string `json:"text"`
}

type ChatChoice

type ChatChoice struct {
	Index        int         `json:"index"`
	Message      ChatMessage `json:"message"`
	FinishReason string      `json:"finish_reason"`
}

type ChatMessage

type ChatMessage struct {
	// Role The role of the author of this message. One of system, user, or assistant
	Role Role `json:"role" validate:"required"`
	// Content The contents of the message
	Content string `json:"content" validate:"required"`
	// Name The name of the author of this message. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters
	Name string `json:"name,omitempty"`
}

type ChatRequest

type ChatRequest struct {
	Model models.Chat `json:"model" validate:"required"`
	// Messages A list of messages describing the conversation so far
	Messages []ChatMessage `json:"messages"`
	// MaxTokens The maximum number of tokens to generate in the completion
	// The token count of your prompt plus max_tokens cannot exceed the model's
	// context length. Most models have a context length of 2048 tokens
	// (except for the newest models, which support 4096).
	MaxTokens int `json:"max_tokens,omitempty"`
	// Temperature What sampling temperature to use, between 0 and 2.
	//Higher values like 0.8 will make the output more random, while
	//lower values like 0.2 will make it more focused and deterministic
	Temperature float32 `json:"temperature,omitempty"`
	// TopP An alternative to sampling with temperature, called nucleus
	//sampling, where the model considers the results of the tokens with
	//top_p probability mass. So 0.1 means only the tokens comprising the
	//top 10% probability mass are considered
	TopP float32 `json:"top_p,omitempty"`
	// N How many completions to generate for each prompt
	N int `json:"n,omitempty"`

	// Stop Up to 4 sequences where the API will stop generating further tokens.
	//The returned text will not contain the stop sequence
	Stop []string `json:"stop,omitempty"`
	// PresencePenalty Number between -2.0 and 2.0. Positive values penalize
	//new tokens based on whether they appear in the text so far,
	//increasing the model's likelihood to talk about new topics
	PresencePenalty float32 `json:"presence_penalty,omitempty"`
	// FrequencyPenalty Number between -2.0 and 2.0. Positive values penalize new tokens
	//based on their existing frequency in the text so far,
	//decreasing the model's likelihood to repeat the same line verbatim
	FrequencyPenalty float32 `json:"frequency_penalty,omitempty"`
	// LogitBias Modify the likelihood of specified tokens appearing in the completion
	LogitBias map[string]int `json:"logit_bias,omitempty"`
	// User A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
	User string `json:"user,omitempty"`
}

type ChatResponse

type ChatResponse struct {
	ID      string       `json:"id"`
	Object  string       `json:"object"`
	Created int64        `json:"created"`
	Model   string       `json:"model"`
	Choices []ChatChoice `json:"choices"`
	Usage   TokenUsage   `json:"usage"`
}

type CompletionChoice

type CompletionChoice struct {
	Text         string        `json:"text"`
	Index        int           `json:"index"`
	FinishReason string        `json:"finish_reason"`
	LogProbs     LogprobResult `json:"logprobs"`
}

type CompletionRequest

type CompletionRequest struct {
	Model models.Completion `json:"model" validate:"required"`
	// Prompt he prompt(s) to generate completions for, encoded as a string,
	// array of strings, array of tokens, or array of token arrays
	Prompt any `json:"prompt,omitempty" validate:"required"`
	// Suffix The suffix that comes after a completion of inserted text
	Suffix string `json:"suffix,omitempty"`
	// MaxTokens The maximum number of tokens to generate in the completion
	// The token count of your prompt plus max_tokens cannot exceed the model's
	// context length. Most models have a context length of 2048 tokens
	// (except for the newest models, which support 4096).
	MaxTokens int `json:"max_tokens,omitempty"`
	// Temperature What sampling temperature to use, between 0 and 2.
	//Higher values like 0.8 will make the output more random, while
	//lower values like 0.2 will make it more focused and deterministic
	Temperature float32 `json:"temperature,omitempty"`
	// TopP An alternative to sampling with temperature, called nucleus
	//sampling, where the model considers the results of the tokens with
	//top_p probability mass. So 0.1 means only the tokens comprising the
	//top 10% probability mass are considered
	TopP float32 `json:"top_p,omitempty"`
	// N How many completions to generate for each prompt
	N int `json:"n,omitempty"`

	// LogProbs Include the log probabilities on the logprobs most likely
	//tokens, as well the chosen tokens. For example, if logprobs is 5,
	//the API will return a list of the 5 most likely tokens.
	//The API will always return the logprob of the sampled token,
	//so there may be up to logprobs+1 elements in the response
	LogProbs int `json:"logprobs,omitempty"`
	// Echo Echo back the prompt in addition to the completion
	Echo bool `json:"echo,omitempty"`
	// Stop Up to 4 sequences where the API will stop generating further tokens.
	//The returned text will not contain the stop sequence
	Stop []string `json:"stop,omitempty"`
	// PresencePenalty Number between -2.0 and 2.0. Positive values penalize
	//new tokens based on whether they appear in the text so far,
	//increasing the model's likelihood to talk about new topics
	PresencePenalty float32 `json:"presence_penalty,omitempty"`
	// FrequencyPenalty Number between -2.0 and 2.0. Positive values penalize new tokens
	//based on their existing frequency in the text so far,
	//decreasing the model's likelihood to repeat the same line verbatim
	FrequencyPenalty float32 `json:"frequency_penalty,omitempty"`
	// BestOf Generates best_of completions server-side and returns the "best"
	//(the one with the highest log probability per token). Results
	//cannot be streamed
	BestOf int `json:"best_of,omitempty"`
	// LogitBias Modify the likelihood of specified tokens appearing in the completion
	LogitBias map[string]int `json:"logit_bias,omitempty"`
	// User A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
	User string `json:"user,omitempty"`
}

type CompletionResponse

type CompletionResponse struct {
	ID      string             `json:"id"`
	Object  string             `json:"object"`
	Created int64              `json:"created"`
	Model   string             `json:"model"`
	Choices []CompletionChoice `json:"choices"`
	Usage   TokenUsage         `json:"usage"`
}

type EditsChoice

type EditsChoice struct {
	Text  string `json:"text"`
	Index int    `json:"index"`
}

type EditsRequest

type EditsRequest struct {
	Model models.Edit `json:"model" validate:"required"`
	// Input The input text to use as a starting point for the edit
	Input string `json:"input,omitempty"`
	// Instruction The instruction that tells the model how to edit the prompt
	Instruction string `json:"instruction" validate:"required"`
	// N How many edits to generate for the input and instruction
	N int `json:"n,omitempty"`
	// Temperature What sampling temperature to use, between 0 and 2.
	//Higher values like 0.8 will make the output more random, while
	//lower values like 0.2 will make it more focused and deterministic
	Temperature float32 `json:"temperature,omitempty"`
	// TopP An alternative to sampling with temperature, called nucleus
	//sampling, where the model considers the results of the tokens with
	//top_p probability mass. So 0.1 means only the tokens comprising the
	//top 10% probability mass are considered
	TopP float32 `json:"top_p,omitempty"`
}

type EditsResponse

type EditsResponse struct {
	Object  string        `json:"object"`
	Created int64         `json:"created"`
	Usage   TokenUsage    `json:"usage"`
	Choices []EditsChoice `json:"choices"`
}

type EmbeddingData

type EmbeddingData struct {
	Object    string    `json:"object"`
	Embedding []float32 `json:"embedding"`
	Index     int       `json:"index"`
}

type EmbeddingRequest

type EmbeddingRequest struct {
	Model models.Embedding `json:"model" validate:"required"`
	// Input is a slice of strings for which you want to generate an EmbeddingData vector.
	// Each input must not exceed 2048 tokens in length.
	// OpenAPI suggests replacing newlines (\n) in your input with a single space, as they
	// have observed inferior results when newlines are present.
	// E.g.
	//	"The food was delicious and the waiter..."
	Input []string `json:"input" validate:"required"`
	// User A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse.
	User string `json:"user"`
}

EmbeddingRequest is the input to a Create embeddings request.

type EmbeddingResponse

type EmbeddingResponse struct {
	Model  models.Embedding `json:"model"`
	Object string           `json:"object"`
	Data   []EmbeddingData  `json:"data"`
	Usage  TokenUsage       `json:"usage"`
}

EmbeddingResponse is the response from a Create embeddings request.

type ErrorPayload

type ErrorPayload struct {
	Message string `json:"message"`
	Type    string `json:"type"`
	Param   any    `json:"param"`
	Code    string `json:"code"`
}

type ErrorResponse

type ErrorResponse struct {
	HttpCode int
	Err      *ErrorPayload `json:"error"`
}

func (*ErrorResponse) Error

func (e *ErrorResponse) Error() string

type FileResponse

type FileResponse struct {
	ID        string `json:"id"`
	Bytes     int    `json:"bytes"`
	FileName  string `json:"filename"`
	Object    string `json:"object"`
	Owner     string `json:"owner"`
	Purpose   string `json:"purpose"`
	CreatedAt int64  `json:"created_at"`
}

type FileUploadRequest

type FileUploadRequest struct {
	// File Name of the JSON Lines file to be uploaded,
	// If the purpose is set to "fine-tune", each line is a JSON record with "prompt" and "completion" fields
	//representing your training examples: https://platform.openai.com/docs/guides/fine-tuning/prepare-training-data
	File *os.File `json:"file" validate:"required"`
	// Purpose The intended purpose of the uploaded documents, Use "fine-tune" for Fine-tuning. This allows us to validate the format of the uploaded file
	Purpose string `json:"purpose" validate:"required"`
}

type FilesListResponse added in v0.2.0

type FilesListResponse struct {
	Files []FileResponse `json:"data"`
}

type FineTuneDeleteResponse

type FineTuneDeleteResponse struct {
	ID      string `json:"id"`
	Object  string `json:"object"`
	Deleted bool   `json:"deleted"`
}

type FineTuneEvent

type FineTuneEvent struct {
	Object    string `json:"object"`
	CreatedAt int64  `json:"created_at"`
	Level     string `json:"level"`
	Message   string `json:"message"`
}

type FineTuneEventList

type FineTuneEventList struct {
	Object string          `json:"object"`
	Data   []FineTuneEvent `json:"data"`
}

type FineTuneHyperParams

type FineTuneHyperParams struct {
	BatchSize              int     `json:"batch_size"`
	LearningRateMultiplier float64 `json:"learning_rate_multiplier"`
	Epochs                 int     `json:"n_epochs"`
	PromptLossWeight       float64 `json:"prompt_loss_weight"`
}

type FineTuneList

type FineTuneList struct {
	Object string             `json:"object"`
	Data   []FineTuneResponse `json:"data"`
}

type FineTuneRequest

type FineTuneRequest struct {
	Model models.FineTunes `json:"model,omitempty"`
	// TrainingFile The ID of an uploaded file that contains training data
	TrainingFile string `json:"training_file" validate:"required"`
	// ValidationFile The ID of an uploaded file that contains validation data
	ValidationFile string `json:"validation_file,omitempty"`
	// Epochs The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset
	Epochs int `json:"n_epochs,omitempty"`
	// BatchSize The batch size to use for training. The batch size is the number of training examples used to train a single forward and backward pass
	BatchSize int `json:"batch_size,omitempty"`
	// LearningRateMultiplier The learning rate multiplier to use for training. The fine-tuning learning rate is the original learning rate used for pretraining multiplied by this value
	//By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on final batch_size (larger learning rates tend to perform better with larger batch sizes). We recommend experimenting with values in the range 0.02 to 0.2 to see what produces the best results
	LearningRateMultiplier float32 `json:"learning_rate_multiplier,omitempty"`
	// PromptLossWeight The weight to use for loss on the prompt tokens. This controls how much the model tries to learn to generate the prompt (as compared to the completion which always has a weight of 1.0), and can add a stabilizing effect to training when completions are short
	PromptLossWeight float32 `json:"prompt_loss_weight,omitempty"`
	// ComputeClassificationMetrics The number of classes in a classification task
	ComputeClassificationMetrics bool `json:"compute_classification_metrics,omitempty"`
	// ClassificationClasses The number of classes in a classification task
	ClassificationClasses int `json:"classification_n_classes,omitempty"`
	// ClassificationPositiveClass The positive class in binary classification
	ClassificationPositiveClass string `json:"classification_positive_class,omitempty"`
	// ClassificationBetas If this is provided, we calculate F-beta scores at the specified beta values. The F-beta score is a generalization of F-1 score. This is only used for binary classification.
	//With a beta of 1 (i.e. the F-1 score), precision and recall are given the same weight. A larger beta score puts more weight on recall and less on precision. A smaller beta score puts more weight on precision and less on recall.
	ClassificationBetas []float32 `json:"classification_betas,omitempty"`
	// Suffix A string of up to 40 characters that will be added to your fine-tuned model name.
	Suffix string `json:"suffix,omitempty"`
}

type FineTuneResponse

type FineTuneResponse struct {
	ID                string              `json:"id"`
	Object            string              `json:"object"`
	Model             string              `json:"model"`
	CreatedAt         int64               `json:"created_at"`
	FineTuneEventList []FineTuneEvent     `json:"events,omitempty"`
	FineTunedModel    string              `json:"fine_tuned_model"`
	HyperParams       FineTuneHyperParams `json:"hyperparams"`
	OrganizationID    string              `json:"organization_id"`
	ResultFiles       []FileResponse      `json:"result_files"`
	Status            string              `json:"status"`
	ValidationFiles   []FileResponse      `json:"validation_files"`
	TrainingFiles     []FileResponse      `json:"training_files"`
	UpdatedAt         int64               `json:"updated_at"`
}

type ImageEditRequest

type ImageEditRequest struct {
	// Image The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided,
	//image must have transparency, which will be used as the mask
	Image *os.File `json:"image" validate:"required"`
	// Mask An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where image
	//should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as image
	Mask *os.File `json:"mask,omitempty"`
	// Prompt A text description of the desired image(s). The maximum length is 1000 characters
	Prompt string `json:"prompt" validate:"required"`
	// N The number of images to generate. Must be between 1 and 10
	N int `json:"n,omitempty"`
	// Size The size of the generated images. Must be one of ImageSize256x256, ImageSize512x512, or ImageSize1024x1024
	Size ImageSize `json:"size,omitempty"`
	// ResponseFormat The format in which the generated images are returned. Must be one of ImageResponseFormatURL or ImageResponseFormatB64JSON
	ResponseFormat ImageResponseFormat `json:"response_format,omitempty"`
	// User A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse
	User string `json:"user,omitempty"`
}

type ImageRequest

type ImageRequest struct {
	// Prompt A text description of the desired image(s). The maximum length is 1000 characters
	Prompt string `json:"prompt" validate:"required"`
	// N The number of images to generate. Must be between 1 and 10
	N int `json:"n,omitempty"`
	// Size The size of the generated images. Must be one of ImageSize256x256, ImageSize512x512, or ImageSize1024x1024
	Size ImageSize `json:"size,omitempty"`
	// ResponseFormat The format in which the generated images are returned. Must be one of ImageResponseFormatURL or ImageResponseFormatB64JSON
	ResponseFormat ImageResponseFormat `json:"response_format,omitempty"`
	// User A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse
	User string `json:"user,omitempty"`
}

type ImageResponse

type ImageResponse struct {
	Created int64                    `json:"created,omitempty"`
	Data    []ImageResponseDataInner `json:"data,omitempty"`
}

type ImageResponseDataInner

type ImageResponseDataInner struct {
	URL     string `json:"url,omitempty"`
	B64JSON string `json:"b64_json,omitempty"`
}

type ImageResponseFormat

type ImageResponseFormat uint8
const (
	ImageResponseFormatURL ImageResponseFormat = iota
	ImageResponseFormatB64JSON
)

func (ImageResponseFormat) MarshalJSON

func (i ImageResponseFormat) MarshalJSON() ([]byte, error)

func (ImageResponseFormat) String

func (i ImageResponseFormat) String() string

func (ImageResponseFormat) UnmarshalJSON

func (i ImageResponseFormat) UnmarshalJSON(data []byte) error

type ImageSize

type ImageSize uint8
const (
	ImageSize256x256 ImageSize = iota
	ImageSize512x512
	ImageSize1024x1024
)

func (ImageSize) MarshalJSON

func (i ImageSize) MarshalJSON() ([]byte, error)

func (ImageSize) String

func (i ImageSize) String() string

func (ImageSize) UnmarshalJSON

func (i ImageSize) UnmarshalJSON(data []byte) error

type ImageVariationRequest

type ImageVariationRequest struct {
	// Image The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square
	Image *os.File `json:"image" validate:"required"`
	// N The number of images to generate. Must be between 1 and 10
	N int `json:"n,omitempty"`
	// Size The size of the generated images. Must be one of ImageSize256x256, ImageSize512x512, or ImageSize1024x1024
	Size ImageSize `json:"size,omitempty"`
	// ResponseFormat The format in which the generated images are returned. Must be one of ImageResponseFormatURL or ImageResponseFormatB64JSON
	ResponseFormat ImageResponseFormat `json:"response_format,omitempty"`
	// User A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse
	User string `json:"user,omitempty"`
}

type LogprobResult

type LogprobResult struct {
	Tokens        []string             `json:"tokens"`
	TokenLogprobs []float32            `json:"token_logprobs"`
	TopLogprobs   []map[string]float32 `json:"top_logprobs"`
	TextOffset    []int                `json:"text_offset"`
}

type Models

type Models struct {
	ID         string             `json:"id"`
	Object     string             `json:"object"`
	OwnedBy    string             `json:"owned_by"`
	Permission []ModelsPermission `json:"permission"`
	Root       string             `json:"root"`
	Parent     any                `json:"parent"`
}

type ModelsPermission

type ModelsPermission struct {
	ID                 string `json:"id"`
	Object             string `json:"object"`
	Created            int64  `json:"created"`
	AllowCreateEngine  bool   `json:"allow_create_engine"`
	AllowSampling      bool   `json:"allow_sampling"`
	AllowLogprobs      bool   `json:"allow_logprobs"`
	AllowSearchIndices bool   `json:"allow_search_indices"`
	AllowView          bool   `json:"allow_view"`
	AllowFineTuning    bool   `json:"allow_fine_tuning"`
	Organization       string `json:"organization"`
	Group              any    `json:"group"`
	IsBlocking         bool   `json:"is_blocking"`
}

type ModelsResponse

type ModelsResponse struct {
	Object string   `json:"object"`
	Data   []Models `json:"data"`
}

type ModerationRequest

type ModerationRequest struct {
	// Model wo content moderations models are available: models.TEXT_MODERATION_STABLE and models.TEXT_MODERATION_LATEST
	Model models.Moderation `json:"model,omitempty"`
	// Input The input text to classify
	Input any `json:"input" validate:"required"`
}

type ModerationResponse

type ModerationResponse struct {
	ID      string   `json:"id"`
	Model   string   `json:"model"`
	Results []Result `json:"results"`
}

type Result

type Result struct {
	Categories     ResultCategories     `json:"categories"`
	CategoryScores ResultCategoryScores `json:"category_scores"`
	Flagged        bool                 `json:"flagged"`
}

type ResultCategories

type ResultCategories struct {
	Hate            bool `json:"hate"`
	HateThreatening bool `json:"hate/threatening"`
	SelfHarm        bool `json:"self-harm"`
	Sexual          bool `json:"sexual"`
	SexualMinors    bool `json:"sexual/minors"`
	Violence        bool `json:"violence"`
	ViolenceGraphic bool `json:"violence/graphic"`
}

type ResultCategoryScores

type ResultCategoryScores struct {
	Hate            float32 `json:"hate"`
	HateThreatening float32 `json:"hate/threatening"`
	SelfHarm        float32 `json:"self-harm"`
	Sexual          float32 `json:"sexual"`
	SexualMinors    float32 `json:"sexual/minors"`
	Violence        float32 `json:"violence"`
	ViolenceGraphic float32 `json:"violence/graphic"`
}

type Role

type Role uint8

func (Role) MarshalJSON

func (r Role) MarshalJSON() ([]byte, error)

func (Role) String

func (r Role) String() string

func (Role) UnmarshalJSON

func (r Role) UnmarshalJSON(data []byte) error

type TokenUsage

type TokenUsage struct {
	PromptTokens     int `json:"prompt_tokens"`
	CompletionTokens int `json:"completion_tokens"`
	TotalTokens      int `json:"total_tokens"`
}

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL