openaigo

package module
v1.6.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Aug 26, 2023 License: MIT Imports: 10 Imported by: 33

README

openaigo

Go CodeQL App Test over API License FOSSA Status
Maintainability Go Report Card codecov
Reference GoDoc

Yet another API client for api.openai.com.

This library is community-maintained, NOT officially supported by OpenAI.

Usage Example

package main

import (
	"context"
	"fmt"
	"os"

	"github.com/otiai10/openaigo"
)

func main() {
	client := openaigo.NewClient(os.Getenv("OPENAI_API_KEY"))
	request := openaigo.ChatRequest{
		Model: "gpt-3.5-turbo",
		Messages: []openaigo.Message{
			{Role: "user", Content: "Hello!"},
		},
	}
	ctx := context.Background()
	response, err := client.Chat(ctx, request)
	fmt.Println(response, err)
}

if you just want to try, hit commands below.

git clone https://github.com/otiai10/openaigo.git
cd openaigo
OPENAI_API_KEY=YourAPIKey go run ./testapp/main.go

See test app as a working example.

API Keys?

Visit https://beta.openai.com/account/api-keys and you can create your own API key to get started for free.

Endpoint Support

Need function_call?

request := openaigo.ChatRequest{
  Messages: []openaigo.Message{
    {Role: "user", Content: "How's the weather today in Tokyo?"},
  },
  Functions: []openaigo.Function{
    {
      Name: "get_weather",
      Parameters: openaigo.Parameters{
        Type:       "object",
        Properties: map[string]map[string]any{
          "location": {"type": "string"},
          "date":     {"type": "string", "description": "ISO 8601 date string"},
        },
        Required: []string{"location"},
      },
    }
  },
}

if you want shorthand, use functioncall.

import fc "github.com/otiai10/openaigo/functioncall"

request.Functions = fc.Funcs{
  "get_weather": {GetWeather, "Get weather of the location", fc.Params{
    {"location", "string", "location of the weather", true},
    {"date", "string", "ISO 8601 date string", true},
  }},
}

See test app as a working example.

Need stream?

client := openaigo.NewClient(OPENAI_API_KEY)
request := openaigo.ChatRequest{
  Stream: true,
  StreamCallback: func(res ChatCompletionResponse, done bool, err error) {
    // Do what you want!
    // You might need chan handling here.
    // See test app how you can do it.
    // https://github.com/otiai10/openaigo/search?q=chat_completion_stream
  },
}

Need Proxy?

client := openaigo.NewClient(OPENAI_API_KEY)
// You can set whatever you want
transport := &http.Transport{ Proxy: http.ProxyFromEnvironment }
client.HTTPClient = &http.Client{ Transport: transport }
// Done!

Issues

Report any issue here or any feedback is welcomed.

Documentation

Index

Constants

View Source
const (
	Size256  string = "256x256"
	Size512  string = "512x512"
	Size1024 string = "1024x1024"
)
View Source
const (
	// {{{ https://beta.openai.com/docs/models/gpt-3
	TextDavinci003 = "text-davinci-003"
	TextCurie001   = "text-curie-001"
	TextBabbage001 = "text-babbage-001"
	TextAda001     = "text-ada-001"

	// {{{ https://platform.openai.com/docs/models/gpt-3-5
	GPT3_5Turbo          = "gpt-3.5-turbo"
	GPT3_5Turbo_0301     = "gpt-3.5-turbo-0301"
	GPT3_5Turbo_0613     = "gpt-3.5-turbo-0613"
	GPT3_5Turbo_16K      = "gpt-3.5-turbo-16k"
	GPT3_5Turbo_16K_0613 = "gpt-3.5-turbo-16k-0613"

	// {{{ https://platform.openai.com/docs/models/gpt-4
	GPT4          = "gpt-4"
	GPT4_0314     = "gpt-4-0314"
	GPT4_0613     = "gpt-4-0613"
	GPT4_32K      = "gpt-4-32k"
	GPT4_32K_0314 = "gpt-4-32k-0314"
	GPT4_32K_0613 = "gpt-4-32k-0613"
)

https://beta.openai.com/docs/models/overview

View Source
const DefaultOpenAIAPIURL = "https://api.openai.com/v1"

Variables

View Source
var (
	StreamPrefixDATA  = []byte("data: ")
	StreamPrefixERROR = []byte("error: ")
	StreamDataDONE    = []byte("[DONE]")
)

Functions

This section is empty.

Types

type APIError added in v1.0.1

type APIError struct {
	Message string       `json:"message"`
	Type    APIErrorType `json:"type"`
	Param   interface{}  `json:"param"` // TODO: typing
	Code    interface{}  `json:"code"`  // TODO: typing

	Status     string
	StatusCode int
}

func (APIError) Error added in v1.0.1

func (err APIError) Error() string

type APIErrorType added in v1.0.1

type APIErrorType string
const (
	ErrorInsufficientQuota APIErrorType = "insufficient_quota"
	ErrorInvalidRequest    APIErrorType = "invalid_request_error"
)

type ChatCompletionRequestBody added in v1.0.0

type ChatCompletionRequestBody struct {

	// Model: ID of the model to use.
	// Currently, only gpt-3.5-turbo and gpt-3.5-turbo-0301 are supported.
	Model string `json:"model"`

	// Messages: The messages to generate chat completions for, in the chat format.
	// https://platform.openai.com/docs/guides/chat/introduction
	// Including the conversation history helps when user instructions refer to prior messages.
	// In the example above, the user’s final question of “Where was it played?” only makes sense in the context of the prior messages about the World Series of 2020.
	// Because the models have no memory of past requests, all relevant information must be supplied via the conversation.
	// If a conversation cannot fit within the model’s token limit, it will need to be shortened in some way.
	Messages []Message `json:"messages"`

	// Temperature: What sampling temperature to use, between 0 and 2.
	// Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
	// We generally recommend altering this or top_p but not both.
	// Defaults to 1.
	Temperature float32 `json:"temperature,omitempty"`

	// TopP: An alternative to sampling with temperature, called nucleus sampling,
	// where the model considers the results of the tokens with top_p probability mass.
	// So 0.1 means only the tokens comprising the top 10% probability mass are considered.
	// We generally recommend altering this or temperature but not both.
	// Defaults to 1.
	TopP float32 `json:"top_p,omitempty"`

	// N: How many chat completion choices to generate for each input message.
	// Defaults to 1.
	N int `json:"n,omitempty"`

	// Stream: If set, partial message deltas will be sent, like in ChatGPT.
	// Tokens will be sent as data-only server-sent events as they become available,
	// with the stream terminated by a data: [DONE] message.
	Stream bool `json:"stream,omitempty"`

	// StreamCallback is a callback funciton to handle stream response.
	// If provided, this library automatically set `Stream` `true`.
	// This field is added by github.com/otiai10/openaigo only to handle Stream.
	// Thus, it is omitted when the client excute HTTP request.
	StreamCallback func(res ChatCompletionResponse, done bool, err error) `json:"-"`

	// Stop: Up to 4 sequences where the API will stop generating further tokens.
	// Defaults to null.
	Stop []string `json:"stop,omitempty"`

	// MaxTokens: The maximum number of tokens allowed for the generated answer.
	// By default, the number of tokens the model can return will be (4096 - prompt tokens).
	MaxTokens int `json:"max_tokens,omitempty"`

	// PresencePenalty: Number between -2.0 and 2.0.
	// Positive values penalize new tokens based on whether they appear in the text so far,
	// increasing the model's likelihood to talk about new topics.
	// See more information about frequency and presence penalties.
	// https://platform.openai.com/docs/api-reference/parameter-details
	PresencePenalty float32 `json:"presence_penalty,omitempty"`

	// FrequencyPenalty: Number between -2.0 and 2.0.
	// Positive values penalize new tokens based on their existing frequency in the text so far,
	// decreasing the model's likelihood to repeat the same line verbatim.
	// See more information about frequency and presence penalties.
	// https://platform.openai.com/docs/api-reference/parameter-details
	FrequencyPenalty float32 `json:"frequency_penalty,omitempty"`

	// LogitBias: Modify the likelihood of specified tokens appearing in the completion.
	// Accepts a json object that maps tokens (specified by their token ID in the tokenizer)
	// to an associated bias value from -100 to 100.
	// Mathematically, the bias is added to the logits generated by the model prior to sampling.
	// The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;
	// values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
	LogitBias map[string]int `json:"logit_bias,omitempty"`

	// User: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Learn more.
	// https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
	User string `json:"user,omitempty"`

	// Functions: A list of functions which GPT is allowed to request to call.
	// Functions []Function `json:"functions,omitempty"`
	Functions json.Marshaler `json:"functions,omitempty"`

	// FunctionCall: You ain't need it. Default is "auto".
	FunctionCall string `json:"function_call,omitempty"`
}

ChatCompletionRequestBody: https://platform.openai.com/docs/guides/chat/chat-completions-beta https://platform.openai.com/docs/api-reference/chat

type ChatCompletionResponse added in v1.0.0

type ChatCompletionResponse struct {
	ID      string   `json:"id"`
	Object  string   `json:"object"`
	Created int64    `json:"created"`
	Choices []Choice `json:"choices"`
	Usage   Usage    `json:"usage"`
}

type ChatRequest added in v1.2.0

type ChatRequest ChatCompletionRequestBody

ChatRequest is just an alias of ChatCompletionRequestBody.

type Choice

type Choice struct {
	Index        int     `json:"index"`
	Message      Message `json:"message"`
	FinishReason string  `json:"finish_reason"`
	Delta        Message `json:"delta"` // Only appears in stream response
}

type Client

type Client struct {

	// APIKey issued by OpenAI console.
	// See https://beta.openai.com/account/api-keys
	APIKey string

	// BaseURL of API including the version.
	// e.g., https://api.openai.com/v1
	BaseURL string

	// Organization
	Organization string

	// HTTPClient (optional) to proxy HTTP request.
	// If nil, *http.DefaultClient will be used.
	HTTPClient *http.Client
}

Client for api.openai.com API endpoints.

func NewClient

func NewClient(apikey string) *Client

func (*Client) CancelFineTune

func (client *Client) CancelFineTune(ctx context.Context, id string) (resp FineTuneCancelResponse, err error)

CancelFineTune: POST https://api.openai.com/v1/fine-tunes/{fine_tune_id}/cancel Deprecated: you should consider using the updating fine-tuning API https://platform.openai.com/docs/guides/fine-tuning Immediately cancel a fine-tune job. See https://platform.openai.com/docs/api-reference/fine-tunes/cancel

func (*Client) CancelFineTuning added in v1.6.0

func (client *Client) CancelFineTuning(ctx context.Context, id string) (resp FineTuningJob, err error)

CancelFineTuning: POST https://api.openai.com/v1/fine_tuning/{fine_tuning_job_id}/cancel Immediately cancel a fine tuning job. Learn more about Fine-tuning https://platform.openai.com/docs/guides/fine-tuning See https://platform.openai.com/docs/api-reference/fine-tuning/cancel

func (*Client) Chat added in v1.0.0

func (client *Client) Chat(ctx context.Context, body ChatRequest) (resp ChatCompletionResponse, err error)

Chat, short-hand of ChatCompletion. Creates a completion for the chat message.

func (*Client) ChatCompletion added in v1.0.0

func (client *Client) ChatCompletion(ctx context.Context, body ChatCompletionRequestBody) (resp ChatCompletionResponse, err error)

ChatCompletion: POST https://api.openai.com/v1/chat/completions Creates a completion for the chat message. See https://platform.openai.com/docs/api-reference/chat/create

func (*Client) Completion

func (client *Client) Completion(ctx context.Context, body CompletionRequestBody) (resp CompletionResponse, err error)

Completion: POST https://api.openai.com/v1/completions Creates a completion for the provided prompt and parameters See https://beta.openai.com/docs/api-reference/completions/create

func (*Client) CreateEdit added in v0.2.0

func (client *Client) CreateEdit(ctx context.Context, body EditCreateRequestBody) (resp EditCreateResponse, err error)

Edit: POST https://api.openai.com/v1/edits Creates a new edit for the provided input, instruction, and parameters. See https://beta.openai.com/docs/api-reference/edits/create

func (*Client) CreateEmbedding

func (client *Client) CreateEmbedding(ctx context.Context, body EmbeddingCreateRequestBody) (resp EmbeddingCreateResponse, err error)

CreateEmbedding: POST https://api.openai.com/v1/embeddings Creates an embedding vector representing the input text. See https://beta.openai.com/docs/api-reference/embeddings/create

func (*Client) CreateFineTune

func (client *Client) CreateFineTune(ctx context.Context, body FineTuneCreateRequestBody) (resp FineTuneCreateResponse, err error)

CreateFineTune: POST https://api.openai.com/v1/fine-tunes Deprecated: you should consider using the updating fine-tuning API https://platform.openai.com/docs/guides/fine-tuning Creates a job that fine-tunes a specified model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. Learn more about Fine-tuning: https://platform.openai.com/docs/api-reference/fine-tuning See https://platform.openai.com/docs/api-reference/fine-tunes/create

func (*Client) CreateFineTuning added in v1.6.0

func (client *Client) CreateFineTuning(ctx context.Context, body FineTuningCreateRequestBody) (resp FineTuningJob, err error)

CreateFineTuning: POST https://api.openai.com/v1/fine_tuning/jobs Creates a job that fine-tunes a specified model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. Learn more about Fine-tuning: https://platform.openai.com/docs/guides/fine-tuning See https://platform.openai.com/docs/api-reference/fine-tuning/create

func (*Client) CreateImage

func (client *Client) CreateImage(ctx context.Context, body ImageGenerationRequestBody) (resp ImageGenerationResponse, err error)

CreateImage: POST https://api.openai.com/v1/images/generations Creates an image given a prompt. See https://beta.openai.com/docs/api-reference/images/create

func (*Client) CreateImageVariation

func (client *Client) CreateImageVariation(ctx context.Context, body ImageVariationRequestBody) (resp ImageVariationResponse, err error)

CreateImageVariation: POST https://api.openai.com/v1/images/variations Creates a variation of a given image. See https://beta.openai.com/docs/api-reference/images/create-variation

func (*Client) CreateModeration

func (client *Client) CreateModeration(ctx context.Context, body ModerationCreateRequestBody) (resp ModerationCreateResponse, err error)

CreateModeration: POST https://api.openai.com/v1/moderations Classifies if text violates OpenAI's Content Policy. See https://beta.openai.com/docs/api-reference/moderations/create

func (*Client) DeleteFile

func (client *Client) DeleteFile(ctx context.Context, id string) (resp FileDeleteResponse, err error)

DeleteFile: DELETE https://api.openai.com/v1/files/{file_id} Delete a file. See https://beta.openai.com/docs/api-reference/files/delete

func (*Client) DeleteFineTuneModel

func (client *Client) DeleteFineTuneModel(ctx context.Context, id string) (resp FineTuneDeleteModelResponse, err error)

DeleteFineTuneModel: DELETE https://api.openai.com/v1/models/{model} Deprecated: you should consider using the updating fine-tuning API https://platform.openai.com/docs/guides/fine-tuning Delete a fine-tuned model. You must have the Owner role in your organization. See https://platform.openai.com/docs/api-reference/fine-tunes/delete-model

func (*Client) EditImage

func (client *Client) EditImage(ctx context.Context, body ImageEditRequestBody) (resp ImageEditResponse, err error)

func (*Client) ListFiles

func (client *Client) ListFiles(ctx context.Context) (resp FileListResponse, err error)

ListFiles: GET https://api.openai.com/v1/files Returns a list of files that belong to the user's organization. See https://beta.openai.com/docs/api-reference/files/list

func (*Client) ListFineTuneEvents

func (client *Client) ListFineTuneEvents(ctx context.Context, id string) (resp FineTuneListEventsResponse, err error)

ListFineTuneEvents: GET https://api.openai.com/v1/fine-tunes/{fine_tune_id}/events Deprecated: you should consider using the updating fine-tuning API https://platform.openai.com/docs/guides/fine-tuning Get fine-grained status updates for a fine-tune job. See https://platform.openai.com/docs/api-reference/fine-tunes/events

func (*Client) ListFineTunes

func (client *Client) ListFineTunes(ctx context.Context) (resp FineTuneListResponse, err error)

ListFineTunes: GET https://api.openai.com/v1/fine-tunes Deprecated: you should consider using the updating fine-tuning API https://platform.openai.com/docs/guides/fine-tuning List your organization's fine-tuning jobs. See https://platform.openai.com/docs/api-reference/fine-tunes/list

func (*Client) ListFineTuningEvents added in v1.6.0

func (client *Client) ListFineTuningEvents(ctx context.Context, id string) (resp FineTuningListEventsResponse, err error)

ListFineTuningEvents: GET https://api.openai.com/v1/fine_tuning/jobs/{fine_tuning_job_id}/events Get fine-grained status updates for a fine-tuning job. Learn more about Fine-tuning https://platform.openai.com/docs/guides/fine-tuning See https://platform.openai.com/docs/api-reference/fine-tuning/list-events

func (*Client) ListModels

func (client *Client) ListModels(ctx context.Context) (resp ModelsListResponse, err error)

ListModels: GET /models Lists the currently available models, and provides basic information about each one such as the owner and availability. See https://beta.openai.com/docs/api-reference/models/list

func (*Client) RetrieveFile

func (client *Client) RetrieveFile(ctx context.Context, id string) (resp FileRetrieveResponse, err error)

RetrieveFile: GET https://api.openai.com/v1/files/{file_id} Returns information about a specific file. See https://beta.openai.com/docs/api-reference/files/retrieve

func (*Client) RetrieveFileContent

func (client *Client) RetrieveFileContent(ctx context.Context, id string) (res io.ReadCloser, err error)

RetrieveFileContent: GET https://api.openai.com/v1/files/{file_id}/content Returns the contents of the specified file. User must Close response after used. See https://beta.openai.com/docs/api-reference/files/retrieve-content

func (*Client) RetrieveFineTune

func (client *Client) RetrieveFineTune(ctx context.Context, id string) (resp FineTuneRetrieveResponse, err error)

RetrieveFineTune: GET https://api.openai.com/v1/fine-tunes/{fine_tune_id} Deprecated: you should consider using the updating fine-tuning API https://platform.openai.com/docs/guides/fine-tuning Gets info about the fine-tune job. Learn more about Fine-tuning https://platform.openai.com/docs/api-reference/fine-tuning See https://platform.openai.com/docs/api-reference/fine-tunes/retrieve

func (*Client) RetrieveFineTuning added in v1.6.0

func (client *Client) RetrieveFineTuning(ctx context.Context, id string) (resp FineTuningJob, err error)

RetrieveFineTuning: GET https://api.openai.com/v1/fine_tuning/jobs//{fine_tuning_job_id} Gets info about the fine-tuning job. Learn more about Fine-tuning https://platform.openai.com/docs/guides/fine-tuning See https://platform.openai.com/docs/api-reference/fine-tuning/retrieve

func (*Client) RetrieveModel

func (client *Client) RetrieveModel(ctx context.Context, model string) (resp ModelRetrieveResponse, err error)

RetrieveModel: GET /models/{model} Retrieves a model instance, providing basic information about the model such as the owner and permissioning. See https://beta.openai.com/docs/api-reference/models/retrieve

func (*Client) UploadFile

func (client *Client) UploadFile(ctx context.Context, body FileUploadRequestBody) (resp FileUploadResponse, err error)

UploadFile: POST https://api.openai.com/v1/files Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit. See https://beta.openai.com/docs/api-reference/files/upload

type CompletionChoice added in v1.2.0

type CompletionChoice struct {
	Text         string `json:"text"`
	Index        int    `json:"index"`
	LogProbs     int    `json:"logprobs"`
	FinishReason string `json:"finish_reason"`
}

type CompletionRequestBody

type CompletionRequestBody struct {

	// Model: ID of the model to use.
	// You can use the List models API to see all of your available models, or see our Model overview for descriptions of them.
	// See https://beta.openai.com/docs/api-reference/completions/create#completions/create-model
	Model string `json:"model"`

	// Prompt: The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.
	// Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.
	// See https://beta.openai.com/docs/api-reference/completions/create#completions/create-prompt
	Prompt []string `json:"prompt"`

	// MaxTokens: The maximum number of tokens to generate in the completion.
	// The token count of your prompt plus max_tokens cannot exceed the model's context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096).
	// See https://beta.openai.com/docs/api-reference/completions/create#completions/create-max_tokens
	MaxTokens int `json:"max_tokens,omitempty"`

	// Temperature: What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
	// We generally recommend altering this or top_p but not both.
	// See https://beta.openai.com/docs/api-reference/completions/create#completions/create-temperature
	Temperature float32 `json:"temperature,omitempty"`

	// Suffix: The suffix that comes after a completion of inserted text.
	// See https://beta.openai.com/docs/api-reference/completions/create#completions/create-suffix
	Suffix string `json:"suffix,omitempty"`

	// TopP: An alternative to sampling with temperature, called nucleus sampling,
	// where the model considers the results of the tokens with top_p probability mass.
	// So 0.1 means only the tokens comprising the top 10% probability mass are considered.
	// We generally recommend altering this or temperature but not both.
	// See https://beta.openai.com/docs/api-reference/completions/create#completions/create-top_p
	TopP float32 `json:"top_p,omitempty"`

	// N: How many completions to generate for each prompt.
	// Note: Because this parameter generates many completions, it can quickly consume your token quota.
	// Use carefully and ensure that you have reasonable settings for max_tokens and stop.
	// See https://beta.openai.com/docs/api-reference/completions/create#completions/create-n
	N int `json:"n,omitempty"`

	// Stream: Whether to stream back partial progress.
	// If set, tokens will be sent as data-only server-sent events as they become available,
	// with the stream terminated by a data: [DONE] message.
	// See https://beta.openai.com/docs/api-reference/completions/create#completions/create-stream
	Stream bool `json:"stream,omitempty"`

	// LogProbs: Include the log probabilities on the logprobs most likely tokens, as well the chosen tokens.
	// For example, if logprobs is 5, the API will return a list of the 5 most likely tokens. The API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response.
	// The maximum value for logprobs is 5. If you need more than this, please contact us through our Help center and describe your use case.
	// See https://beta.openai.com/docs/api-reference/completions/create#completions/create-logprobs
	LogProbs int `json:"logprobs,omitempty"`

	// Echo: Echo back the prompt in addition to the completion.
	// See https://beta.openai.com/docs/api-reference/completions/create#completions/create-echo
	Echo bool `json:"echo,omitempty"`

	// Stop: Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
	// See https://beta.openai.com/docs/api-reference/completions/create#completions/create-stop
	Stop []string `json:"stop,omitempty"`

	// PresencePenalty: Number between -2.0 and 2.0.
	// Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
	// See more information about frequency and presence penalties.
	// See https://beta.openai.com/docs/api-reference/completions/create#completions/create-presence_penalty
	PresencePenalty float32 `json:"presence_penalty,omitempty"`

	// FrequencyPenalty: Number between -2.0 and 2.0.
	// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
	// See more information about frequency and presence penalties.
	// See https://beta.openai.com/docs/api-reference/completions/create#completions/create-frequency_penalty
	FrequencyPenalty float32 `json:"frequency_penalty,omitempty"`

	// BestOf: Generates best_of completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed.
	// When used with n, best_of controls the number of candidate completions and n specifies how many to return – best_of must be greater than n.
	// Note: Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for max_tokens and stop.
	// See https://beta.openai.com/docs/api-reference/completions/create#completions/create-best_of
	BestOf int `json:"best_of,omitempty"`

	// LogitBias: Modify the likelihood of specified tokens appearing in the completion.
	// Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this tokenizer tool (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
	// As an example, you can pass {"50256": -100} to prevent the <|endoftext|> token from being generated.
	// See https://beta.openai.com/docs/api-reference/completions/create#completions/create-logit_bias
	LogitBias map[string]int `json:"logit_bias,omitempty"`

	// User: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Learn more.
	// See https://beta.openai.com/docs/api-reference/completions/create#completions/create-user
	User string `json:"user,omitempty"`
}

type CompletionResponse

type CompletionResponse struct {
	ID      string             `json:"id"`
	Object  ObjectType         `json:"object"`
	Created int64              `json:"created"`
	Model   string             `json:"model"`
	Choices []CompletionChoice `json:"choices"`
	Usage   Usage
}

type EditCreateRequestBody added in v0.2.0

type EditCreateRequestBody struct {
	Model       string  `json:"model"`
	Instruction string  `json:"instruction"`
	Input       string  `json:"input,omitempty"`
	N           int     `json:"n,omitempty"`
	Temperature float32 `json:"temperature,omitempty"`
	TopP        float32 `json:"top_p,omitempty"`
}

type EditCreateResponse added in v0.2.0

type EditCreateResponse struct {
	Object  ObjectType         `json:"object"`
	Created int64              `json:"created"`
	Choices []CompletionChoice `json:"choices"`
	Usage   Usage              `json:"usage"`
}

type EmbeddingCreateRequestBody

type EmbeddingCreateRequestBody struct {
	Model string   `json:"model"`
	Input []string `json:"input"`
	User  string   `json:"user,omitempty"`
}

type EmbeddingCreateResponse

type EmbeddingCreateResponse struct {
	Object string          `json:"object"`
	Data   []EmbeddingData `json:"data"`
	Usage  Usage           `json:"usage"`
}

type EmbeddingData

type EmbeddingData struct {
	Object    string    `json:"object"`
	Embedding []float32 `json:"embedding"`
	Index     int       `json:"index"`
}

type FileData

type FileData struct {
	ID        string `json:"id"`
	Object    string `json:"object"`
	Bytes     int64  `json:"bytes"`
	CreatedAt int64  `json:"created_at"`
	Filename  string `json:"filename"`
	Purpose   string `json:"purpuse"`
}

type FileDeleteResponse

type FileDeleteResponse struct {
	ID      string `json:"id"`
	Object  string `json:"object"`
	Deleted bool   `json:"deleted"`
}

type FileListResponse

type FileListResponse struct {
	Object string     `json:"object"`
	Data   []FileData `json:"data"`
}

type FileRetrieveResponse

type FileRetrieveResponse FileData

type FileUploadRequestBody

type FileUploadRequestBody struct {
	File    io.Reader
	Purpose string
}

func (FileUploadRequestBody) ToMultipartFormData

func (body FileUploadRequestBody) ToMultipartFormData() (*bytes.Buffer, string, error)

type FileUploadResponse

type FileUploadResponse FileData

type FineTuneCancelResponse

type FineTuneCancelResponse struct {
	Events       []FineTuneEvent `json:"events"`
	FineTuneData `json:",inline"`
}

type FineTuneCreateRequestBody

type FineTuneCreateRequestBody struct {
	TrainingFile                 string    `json:"training_file"`
	ValidationFile               string    `json:"validation_file,omitempty"`
	Model                        string    `json:"model,omitempty"`
	NEpochs                      int       `json:"n_epochs,omitempty"`
	BatchSize                    int       `json:"batch_size,omitempty"`
	LearningRateMultiplier       float32   `json:"learning_rate_multiplier,omitempty"`
	PromptLossWeight             float32   `json:"prompt_loss_weight,omitempty"`
	ComputeClassificationMetrics bool      `json:"compute_classification_metrics,omitempty"`
	ClassificationNClasses       int       `json:"classification_n_classes,omitempty"`
	ClassificationPositiveClass  string    `json:"classification_positive_class,omitempty"`
	ClassificationBetas          []float32 `json:"classification_betas,omitempty"`
	Suffix                       string    `json:"suffix,omitempty"`
}

type FineTuneCreateResponse

type FineTuneCreateResponse struct {
	Events       []FineTuneEvent `json:"events"`
	FineTuneData `json:",inline"`
}

type FineTuneData

type FineTuneData struct {
	ID              string          `json:"id"`
	Object          string          `json:"object"`
	Model           string          `json:"model"`
	CreatedAt       int64           `json:"created_at"`
	Events          []FineTuneEvent `json:"events"`
	FineTunedModel  interface{}     `json:"fine_tuned_model"` // TODO: typing
	Hyperparams     Hyperparams     `json:"hyperparams"`
	OrganizationID  string          `json:"organization_id"`
	ResultFiles     []FileData      `json:"result_files"`
	Status          string          `json:"status"`
	ValidationFiles []FileData      `json:"validation_files"`
	TrainingFiles   []FileData      `json:"training_files"`
	UpdatedAt       int64           `json:"updated_at"`
}

type FineTuneDeleteModelResponse

type FineTuneDeleteModelResponse struct {
	ID      string `json:"string"`
	Object  string `json:"object"`
	Deleted bool   `json:"deleted"`
}

type FineTuneEvent

type FineTuneEvent struct {
	Object    string `json:"object"`
	CreatedAt int64  `json:"created_at"`
	Level     string `json:"level"`
	Message   string `json:"message"`
}

type FineTuneListEventsResponse

type FineTuneListEventsResponse struct {
	Object string          `json:"object"`
	Data   []FineTuneEvent `json:"data"`
}

type FineTuneListResponse

type FineTuneListResponse struct {
	Object string         `json:"object"`
	Data   []FineTuneData `json:"data"`
}

type FineTuneRetrieveResponse

type FineTuneRetrieveResponse struct {
	Events       []FineTuneEvent `json:"events"`
	FineTuneData `json:",inline"`
}

type FineTuningCreateRequestBody added in v1.6.0

type FineTuningCreateRequestBody struct {
	TrainingFile    string           `json:"training_file"`
	ValidationFile  string           `json:"validation_file,omitempty"`
	Model           string           `json:"model,omitempty"`
	Hyperparameters *Hyperparameters `json:"hyperparameters,omitempty"`
	Suffix          string           `json:"suffix,omitempty"`
}

type FineTuningEvent added in v1.6.0

type FineTuningEvent struct {
	Object    string `json:"object"`
	ID        string `json:"id"`
	CreatedAt int    `json:"created_at"`
	Level     string `json:"level"`
	Message   string `json:"message"`
	Data      any    `json:"data"`
	Type      string `json:"type"`
}

type FineTuningJob added in v1.6.0

type FineTuningJob struct {
	ID              string          `json:"id"`
	Object          string          `json:"object"`
	CreatedAt       int64           `json:"created_at"`
	FinishedAt      int64           `json:"finished_at"`
	Model           string          `json:"model"`
	FineTunedModel  string          `json:"fine_tuned_model,omitempty"`
	OrganizationID  string          `json:"organization_id"`
	Status          string          `json:"status"`
	Hyperparameters Hyperparameters `json:"hyperparameters"`
	TrainingFile    string          `json:"training_file"`
	ValidationFile  string          `json:"validation_file,omitempty"`
	ResultFiles     []string        `json:"result_files"`
	TrainedTokens   int             `json:"trained_tokens"`
}

type FineTuningListEventsResponse added in v1.6.0

type FineTuningListEventsResponse struct {
	Object  string          `json:"object"`
	Data    []FineTuneEvent `json:"data"`
	HasMore bool            `json:"has_more"`
}

type Function added in v1.2.0

type Function struct {
	Name        string     `json:"name,omitempty"`
	Description string     `json:"description,omitempty"`
	Parameters  Parameters `json:"parameters,omitempty"`
}

type FunctionCall added in v1.2.0

type FunctionCall struct {
	NameRaw      string `json:"name,omitempty"`
	ArgumentsRaw string `json:"arguments,omitempty"`
}

func (*FunctionCall) Args added in v1.5.0

func (fc *FunctionCall) Args() map[string]any

func (*FunctionCall) Name added in v1.2.0

func (fc *FunctionCall) Name() string

type Functions added in v1.4.0

type Functions []Function

func (Functions) MarshalJSON added in v1.4.0

func (funcs Functions) MarshalJSON() ([]byte, error)

type Hyperparameters added in v1.6.0

type Hyperparameters struct {
	Epochs int `json:"n_epochs"`
}

type Hyperparams

type Hyperparams struct {
	BatchSize              int     `json:"batch_size"`
	LearningRateMultiplier float32 `json:"learning_rate_multiplier"`
	NEpochs                int     `json:"n_epochs"`
	PromptLossWeight       float32 `json:"prompt_loss_weight"`
}

type ImageData

type ImageData struct {
	Base64 string `json:"b64_json"`
	URL    string `json:"url"`
}

type ImageEditRequestBody

type ImageEditRequestBody struct {
	// image Required
	// The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square.
	// User MUST close it if it's like ReadCloser.
	Image io.Reader

	// n integer Optional Defaults to 1
	// The number of images to generate. Must be between 1 and 10.
	N int

	// size string Optional Defaults to 1024x1024
	// The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024.
	Size string

	// response_format string Optional Defaults to url
	// The format in which the generated images are returned. Must be one of url or b64_json.
	ResponseFormat string

	// user string Optional
	// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
	// Learn more: https://beta.openai.com/docs/guides/safety-best-practices/end-user-ids
	User string

	// mask string Optional
	// An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where image should be edited.
	// Must be a valid PNG file, less than 4MB, and have the same dimensions as image.
	// User MUST close it if it's like ReadCloser.
	Mask io.Reader

	// prompt string Required
	// A text description of the desired image(s). The maximum length is 1000 characters.
	Prompt string
}

func (ImageEditRequestBody) ToMultipartFormData

func (body ImageEditRequestBody) ToMultipartFormData() (buf *bytes.Buffer, contenttype string, err error)

type ImageEditResponse

type ImageEditResponse ImageResponse

type ImageGenerationRequestBody

type ImageGenerationRequestBody struct {
	Prompt         string `json:"prompt"`
	N              int    `json:"n,omitempty"`
	Size           string `json:"size,omitempty"`
	ResponseFormat string `json:"response_format,omitempty"`
	User           string `json:"user,omitempty"`
}

type ImageGenerationResponse

type ImageGenerationResponse ImageResponse

type ImageResponse

type ImageResponse struct {
	Created int64       `json:"created"`
	Data    []ImageData `json:"data"`
}

type ImageVariationRequestBody

type ImageVariationRequestBody struct {
	// image Required
	// The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square.
	// User MUST close it if it's like ReadCloser.
	Image io.Reader

	// n integer Optional Defaults to 1
	// The number of images to generate. Must be between 1 and 10.
	N int

	// size string Optional Defaults to 1024x1024
	// The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024.
	Size string

	// response_format string Optional Defaults to url
	// The format in which the generated images are returned. Must be one of url or b64_json.
	ResponseFormat string

	// user string Optional
	// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
	// Learn more: https://beta.openai.com/docs/guides/safety-best-practices/end-user-ids
	User string
}

func (ImageVariationRequestBody) ToMultipartFormData

func (body ImageVariationRequestBody) ToMultipartFormData() (buf *bytes.Buffer, contenttype string, err error)

type ImageVariationResponse

type ImageVariationResponse ImageResponse

type Message added in v1.2.0

type Message struct {

	// Role: Either of "system", "user", "assistant".
	// Typically, a conversation is formatted with a system message first, followed by alternating user and assistant messages.
	// The system message helps set the behavior of the assistant. In the example above, the assistant was instructed with “You are a helpful assistant.”
	// The user messages help instruct the assistant. They can be generated by the end users of an application, or set by a developer as an instruction.
	// The assistant messages help store prior responses. They can also be written by a developer to help give examples of desired behavior.
	Role string `json:"role"`

	// Content: A content of the message.
	Content string `json:"content"`

	// FunctionCall requested by ChatGPT.
	// Only appears in a response from ChatGPT in which ChatGPT wants to call a function.
	FunctionCall *FunctionCall `json:"function_call,omitempty"`

	// Name of the function called, to tell this message is a result of function_call.
	// Only appears in a request from us when the previous message is "function_call" requested by ChatGPT.
	Name string `json:"name,omitempty"`
}

Message: An element of messages parameter. The main input is the messages parameter. Messages must be an array of message objects, where each object has a role (either “system”, “user”, or “assistant”) and content (the content of the message). Conversations can be as short as 1 message or fill many pages. See https://platform.openai.com/docs/api-reference/chat/create#chat/create-messages

type ModelData

type ModelData struct {
	ID         string            `json:"id"`
	Object     ObjectType        `json:"object"`
	Created    int64             `json:"created"`
	OwnedBy    string            `json:"owned_by"`
	Permission []ModelPermission `json:"permission"`
	Root       string            `json:"root"`
	Parent     string            `json:"parent"`
}

type ModelPermission

type ModelPermission struct {
	ID                 string     `json:"id"`
	Object             ObjectType `json:"object"`
	Created            int64      `json:"created"`
	AllowCreateEngine  bool       `json:"allow_create_engine"`
	AllowSampling      bool       `json:"allow_sampling"`
	AllowLogProbs      bool       `json:"allow_logprobs"`
	AllowSearchIndices bool       `json:"allow_search_indices"`
	AllowView          bool       `json:"allow_view"`
	AllowFineTuning    bool       `json:"allow_fine_tuning"`
	Organization       string     `json:"organization"`
	Group              string     `json:"group"`
	IsBlocking         bool       `json:"is_blocking"`
}

type ModelRetrieveResponse

type ModelRetrieveResponse ModelData

type ModelsListResponse

type ModelsListResponse struct {
	Data   []ModelData `json:"data"`
	Object ObjectType
}

type ModerationCreateRequestBody

type ModerationCreateRequestBody struct {
	Input string `json:"input"`
	Model string `json:"model,omitempty"`
}

type ModerationCreateResponse

type ModerationCreateResponse struct {
	ID      string           `json:"id"`
	Model   string           `json:"model"`
	Results []ModerationData `json:"results"`
}

type ModerationData

type ModerationData struct {
	Categories struct {
		Hate            bool `json:"hate"`
		HateThreatening bool `json:"hate/threatening"`
		SelfHarm        bool `json:"self-harm"`
		Sexual          bool `json:"sexual"`
		SexualMinors    bool `json:"sexual/minors"`
		Violence        bool `json:"violence"`
		ViolenceGraphic bool `json:"violence/graphic"`
	} `json:"categories"`
	CategoryScores struct {
		Hate            float32 `json:"hate"`
		HateThreatening float32 `json:"hate/threatening"`
		SelfHarm        float32 `json:"self-harm"`
		Sexual          float32 `json:"sexual"`
		SexualMinors    float32 `json:"sexual/minors"`
		Violence        float32 `json:"violence"`
		ViolenceGraphic float32 `json:"violence/graphic"`
	} `json:"category_scores"`
}

type MultipartFormDataRequestBody

type MultipartFormDataRequestBody interface {
	ToMultipartFormData() (*bytes.Buffer, string, error)
}

type ObjectType

type ObjectType string
const (
	OTModel           ObjectType = "model"
	OTModelPermission ObjectType = "model_permission"
	OTList            ObjectType = "list"
	OTEdit            ObjectType = "edit"
	OTTextCompletion  ObjectType = "text_completion"
	OTEEmbedding      ObjectType = "embedding"
	OTFile            ObjectType = "file"
	OTFineTune        ObjectType = "fine-tune"
	OTFineTuneEvent   ObjectType = "fine-tune-event"
)

type Parameters added in v1.2.0

type Parameters struct {
	Type       string                    `json:"type,omitempty"` // Must be "object"
	Properties map[string]map[string]any `json:"properties,omitempty"`
	Required   []string                  `json:"required,omitempty"`
}

type Usage

type Usage struct {
	PromptTokens     int `json:"prompt_tokens"`
	CompletionTokens int `json:"completion_tokens"`
	TotalTokens      int `json:"total_tokens"`
}

Directories

Path Synopsis

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL