Documentation
¶
Index ¶
- Constants
- Variables
- func BuildChatGPTBody(model string, messages []map[string]interface{}, maxTokens *int, ...) map[string]interface{}
- func BuildOpenAIURL(apiType APIType, azureUrl string, model string, azureVersion string) string
- func CalculateTokens(stringToWorkOn string, model string) (int, error)
- func CalculateTokensForMessages(messages []map[string]interface{}, model string) int
- func CheckLimit(messages []ChatGPTMessage, model string) int
- func EncoderForModel(model string) tokenizer.Model
- func FetchChatGPTForSession(sessionId string, sessionMap *SessionIdToChatGPTResponse, modelUsed string, ...) error
- func FetchChatGPTForSessionWithStream(sessionId string, sessionMap *SessionIdToChatGPTResponse, modelUsed string, ...) error
- func FetchFromOldSession(oldSessionId string, sessionMap *SessionIdToChatGPTResponse) (string, error)
- func FindTrimPosition(messages []ChatGPTMessage) int
- func GenerateSessionId() string
- func GetChannelWithSize(size int) chan []byte
- func GetValidOpenAIPlans() []util.Plan
- func IsOpenAIAllowed() bool
- func IsValidJSON(str string) bool
- func LimitForModel(model string) int
- func MakeChatGPTRequest(model string, messages []map[string]interface{}, apiKey string, maxTokens *int, ...) (*http.Response, []byte, []byte, int64, *http.Request, error)
- func MakeChatGPTRequestWithStream(model string, messages []map[string]interface{}, apiKey string, maxTokens *int, ...) ([][]byte, []byte, int64, int64, int64, *http.Request, error)
- func ParseGPT4Responses(responsesArr [][]byte) (map[string]interface{}, string, *error)
- func PingChatGPTWithDetails(model string, apiKey string, maxTokens *int, temperature *float64, ...) *util.Error
- func ValidateFAQBody(bodyPassed FAQBody) error
- type AISessionDoc
- type APIType
- type CacheSyncScript
- type ChatGPTMessage
- type ChatGPTRequest
- type ConfigDetails
- type EmbeddingModel
- type FAQBody
- type FAQElasticsearch
- type FilterQueryParams
- type FollowUpRequest
- type InternalChatGPTRequest
- type InternalChatGPTResponse
- func (i InternalChatGPTResponse) GetChannel() *chan []byte
- func (i InternalChatGPTResponse) GetIsFailed() bool
- func (i InternalChatGPTResponse) GetIsReady() bool
- func (i InternalChatGPTResponse) GetIsStreaming() bool
- func (i InternalChatGPTResponse) GetRemoveAt() int64
- func (i InternalChatGPTResponse) GetSession() *AISessionDoc
- func (i InternalChatGPTResponse) Request() InternalChatGPTRequest
- func (i InternalChatGPTResponse) Response() []byte
- func (i InternalChatGPTResponse) SetChannel(passedChan chan []byte)
- func (i InternalChatGPTResponse) SetIsReady(newVal bool)
- func (i InternalChatGPTResponse) TrimAt() int
- func (i InternalChatGPTResponse) UpdateSession(sessionId string)
- type MappingsMigration
- type OpenAI
- func (r *OpenAI) ESMiddleware() []middleware.Middleware
- func (r *OpenAI) GetConfig() OpenAIConfig
- func (r *OpenAI) InitFunc() error
- func (r *OpenAI) IsOpenAIEnabled() bool
- func (r *OpenAI) Name() string
- func (a *OpenAI) RSMiddleware() []middleware.Middleware
- func (r *OpenAI) Routes() []plugins.Route
- func (r *OpenAI) SetConfig(configPassed OpenAIConfig)
- func (o *OpenAI) TrimContextAsPerLimits(messagesArrToPassChatGPT []map[string]interface{}, maxTokens int, ...) ([]map[string]interface{}, int, error)
- func (r *OpenAI) UpdatePartialFields(ctx context.Context, faqBody FAQBody, faqId string) (FAQBody, error)
- type OpenAIConfig
- func (o OpenAIConfig) GetAPIType() APIType
- func (o OpenAIConfig) GetAzureURL() string
- func (o OpenAIConfig) GetAzureVersion() string
- func (o OpenAIConfig) GetMaxTokens() int
- func (o OpenAIConfig) GetMinTokens() int
- func (o OpenAIConfig) GetModel() string
- func (o OpenAIConfig) GetSystemPrompt() string
- func (o OpenAIConfig) IsIndexWhitelisted(index string) bool
- func (o OpenAIConfig) IsKeyValid() bool
- func (o OpenAIConfig) Key() string
- func (o OpenAIConfig) ToExternalConfig() ConfigDetails
- type ResponseResolutionDetails
- type SessionContext
- type SessionIdToChatGPTResponse
- func (s *SessionIdToChatGPTResponse) AddResponse(sessionId string, responseInBytes []byte)
- func (s *SessionIdToChatGPTResponse) CloneSession(oldSessionId string, userId string, usageInBytes []byte) (string, error)
- func (s *SessionIdToChatGPTResponse) FetchSession(ctx context.Context, sessionId string) *InternalChatGPTResponse
- func (s *SessionIdToChatGPTResponse) GetChannel(sessionId string) *chan []byte
- func (s *SessionIdToChatGPTResponse) GetResponse(sessionId string) *InternalChatGPTResponse
- func (s *SessionIdToChatGPTResponse) RegisterResponse(sessionId string, userId string, indices []string, documentIds []string)
- func (s *SessionIdToChatGPTResponse) SessionFromBytes(sessionInBytes []byte, userId string) (string, error)
- func (s *SessionIdToChatGPTResponse) SetChannel(sessionId string, passedChan chan []byte)
- func (s *SessionIdToChatGPTResponse) SetIsFailed(sessionId string, value bool) error
- func (s *SessionIdToChatGPTResponse) SetIsStreaming(sessionId string, value bool)
- func (s *SessionIdToChatGPTResponse) SetRequest(sessionId string, requestBody []byte, trimmingAt int)
- func (s *SessionIdToChatGPTResponse) SetResolvedAt(sessionId string, resolvedAt int64, req *http.Request, firstByteReadAt int64, ...)
- func (s *SessionIdToChatGPTResponse) SetSSECallAt(sessionId string, sseCallAt int64, sseCallStreamAt int64, firstWrittenAt int64)
- func (s *SessionIdToChatGPTResponse) SetUseful(sessionId string, useful UsefulAnalytics) error
- type UsefulAnalytics
Constants ¶
const (
FAQMapping = `` /* 150-byte string literal not displayed */
)
const (
OpenAIAPIURL string = "https://api.openai.com/v1"
)
Variables ¶
var MODEL_ENCODER_MAP = map[string]tokenizer.Model{ "gpt-3.5": tokenizer.GPT35Turbo, "gpt-3.5-16k": tokenizer.GPT35Turbo, "gpt-3.5-turbo-16k": tokenizer.GPT35Turbo, "gpt-4": tokenizer.GPT4, "gpt-4-32k": tokenizer.GPT4, "gpt-4-turbo": tokenizer.GPT4, "gpt-4o": tokenizer.GPT4, "gpt-4o-mini": tokenizer.GPT4, "gpt-4.1": tokenizer.GPT4, "gpt-4.1-mini": tokenizer.GPT4, "gpt-5": tokenizer.GPT4, "gpt-5-mini": tokenizer.GPT4, "gpt-5-nano": tokenizer.GPT4, "gpt-5-pro": tokenizer.GPT4, "gpt-5.1": tokenizer.GPT4, "gpt-5.2": tokenizer.GPT4, "gpt-5.2-pro": tokenizer.GPT4, "o1": tokenizer.GPT4, "o3-mini": tokenizer.GPT4, "o3": tokenizer.GPT4, "o3-pro": tokenizer.GPT4, "o4-mini": tokenizer.GPT4, }
var MODEL_LIMIT_MAP = map[string]int{
"gpt-4.1-mini": 128000,
"gpt-4.1": 128000,
"gpt-4o-mini": 128000,
"gpt-4o": 128000,
"gpt-4-turbo": 128000,
"gpt-4-32k": 32768,
"gpt-4": 8192,
"gpt-3.5-16k": 16384,
"gpt-3.5-turbo-16k": 16384,
"gpt-3.5": 4096,
"gpt-5.2-pro": 400000,
"gpt-5.2": 400000,
"gpt-5.1": 400000,
"gpt-5-pro": 400000,
"gpt-5-nano": 400000,
"gpt-5-mini": 400000,
"gpt-5": 400000,
"o3-pro": 200000,
"o3-mini": 200000,
"o3": 200000,
"o4-mini": 200000,
"o1": 200000,
}
These are prefix-matched, so set the longer model name before the shorter one
Functions ¶
func BuildChatGPTBody ¶
func BuildChatGPTBody(model string, messages []map[string]interface{}, maxTokens *int, temperature *float64, isStream bool, apiType APIType) map[string]interface{}
BuildChatGPTBody will build the chatGPT body that will be sent to the API
func BuildOpenAIURL ¶
Build the OpenAI API URL based on the api type.
func CalculateTokens ¶
CalculateTokens will use a naive way to calculate the number of tokens for the passed string by simply calculating the length of the string and dividing it by 4.
func CalculateTokensForMessages ¶
CalculateTokensForMessages will calculate the total toke count for the passed messages
func CheckLimit ¶
func CheckLimit(messages []ChatGPTMessage, model string) int
CheckLimit will check the token limit for the passed messages context and accordingly return the results
func EncoderForModel ¶
EncoderForModel will return the encoder to use for the passed model
func FetchChatGPTForSession ¶
func FetchChatGPTForSession( sessionId string, sessionMap *SessionIdToChatGPTResponse, modelUsed string, messages []map[string]interface{}, DocumentIds []string, apiKey string, maxTokens *int, temperature *float64, apiType APIType, azureUrl string, azureVersion string, skipStoringFailedResponse bool, ) error
FetchChatGPTForSession will fetch the ChatGPT response and accordingly update it in the session map
func FetchChatGPTForSessionWithStream ¶
func FetchChatGPTForSessionWithStream( sessionId string, sessionMap *SessionIdToChatGPTResponse, modelUsed string, messages []map[string]interface{}, DocumentIds []string, apiKey string, maxTokens *int, temperature *float64, apiType APIType, azureURL string, azureVersion string, skipStoringFailedResponse bool, ) error
FetchChatGPTForSessionWithStream will fetch the ChatGPT response with streaming enabled.
func FetchFromOldSession ¶
func FetchFromOldSession(oldSessionId string, sessionMap *SessionIdToChatGPTResponse) (string, error)
FetchFromOldSession will use the older session Id to pull the older request body and make a new call with those details.
This function should be used in the cache of a partial cache hit where there is a cache hit for RS but the OpenAI part cache is not updated (because of OpenAI call failing or any other reason).
func FindTrimPosition ¶
func FindTrimPosition(messages []ChatGPTMessage) int
FindTrimPosition will iterate the messages array and find the position from where trimming can start. Essentially this will be the first message object that has the role as `user` indicating we can start trimming from the first question that user asked to ChatGPT
func GenerateSessionId ¶
func GenerateSessionId() string
GenerateSessionId will generate a sessionId for the ChatGPT request
func GetChannelWithSize ¶
GetChannelWithSize will return a channel of the specified size. If size is passed as 0, it will return an un-buffered channel
func GetValidOpenAIPlans ¶
func IsOpenAIAllowed ¶
func IsOpenAIAllowed() bool
IsOpenAIAllowed will indicate whether on not the plan has access to use OpenAI
func IsValidJSON ¶
IsValidJSON checks if a string is a valid JSON.
func LimitForModel ¶
LimitForModel will return the token limit provided by OpenAI for their different models that we support using for now
If model is not found then -1 will be returned indicating that we don't know what a valid limit is for the passed model.
func MakeChatGPTRequest ¶
func MakeChatGPTRequest( model string, messages []map[string]interface{}, apiKey string, maxTokens *int, temperature *float64, apiType APIType, azureUrl string, azureVersion string, ) (*http.Response, []byte, []byte, int64, *http.Request, error)
MakeChatGPTRequest will make the ChatGPT request and return the response accordingly
func MakeChatGPTRequestWithStream ¶
func MakeChatGPTRequestWithStream( model string, messages []map[string]interface{}, apiKey string, maxTokens *int, temperature *float64, responseChan *chan []byte, apiType APIType, azureUrl string, azureVersion string, ) ([][]byte, []byte, int64, int64, int64, *http.Request, error)
MakeChatGPTRequestWithStream will make the ChatGPT request but stream it so that each chunk is written to the passed channel until the `[DONE]` text is received.
func ParseGPT4Responses ¶
Parse the streamed response received from GPT 4 or newer API models
func PingChatGPTWithDetails ¶
func PingChatGPTWithDetails( model string, apiKey string, maxTokens *int, temperature *float64, apiType APIType, azureUrl string, azureVersion string, systemPrompt string, ) *util.Error
PingChatGPTWithDetails will make a ping call to chatGPT based on the passed details to check whether the configuration is good to go.
func ValidateFAQBody ¶
ValidateFAQBody will validate the FAQ body passed and make sure that it contains all the required values.
Types ¶
type AISessionDoc ¶
type AISessionDoc struct {
SessionId *string `json:"session_id"`
UserId *string `json:"user_id"`
Useful *bool `json:"useful"`
Reason *string `json:"reason,omitempty"`
InputTokens *int64 `json:"input_tokens"`
OutputTokens *int64 `json:"output_tokens"`
InvocationCount *int `json:"invocation_count"`
Messages *[]ChatGPTMessage `json:"messages"`
Model *string `json:"model"`
Index []string `json:"index"`
DocumentIds []string `json:"document_ids"`
CreatedAt *int64 `json:"created_at,omitempty"`
UpdatedAt *int64 `json:"updated_at,omitempty"`
TimeStamp *int64 `json:"timestamp"`
Meta *map[string]interface{} `json:"meta,omitempty"`
ResponseResolution ResponseResolutionDetails `json:"response_resolution,omitempty"`
InternalResponse *string `json:"internal_response,omitempty"`
}
AISessionDoc will contain the session details that will be stored in ES
type APIType ¶
type APIType int
func (APIType) MarshalJSON ¶
MarshalJSON is the implementation of the Marshaler interface for marshaling APIType
func (APIType) String ¶
String is the implementation of stringer interface that returns the string representation of APIType
func (*APIType) UnmarshalJSON ¶
UnmarshalJSON is the implementation of the Unmarshaler interface for unmarshalling APIType.
type CacheSyncScript ¶
type CacheSyncScript struct {
// contains filtered or unexported fields
}
func (CacheSyncScript) Index ¶
func (s CacheSyncScript) Index() string
func (CacheSyncScript) PluginName ¶
func (s CacheSyncScript) PluginName() string
func (CacheSyncScript) SetCache ¶
func (s CacheSyncScript) SetCache(response *elastic.SearchResult) error
type ChatGPTMessage ¶
func TrimMessagesAsPerModel ¶
func TrimMessagesAsPerModel(messages []ChatGPTMessage, model string, startTrimmingAt int) ([]ChatGPTMessage, error)
TrimMessagesAsPerLimit will trim the messages array passed as per the passed limit
messages should be an array of ChatGPTMessage object
type ChatGPTRequest ¶
type ChatGPTRequest struct {
Model string `json:"model"`
Messages []ChatGPTMessage `json:"messages"`
MaxTokens *int `json:"maxTokens"`
Temperature *float64 `json:"temperature"`
DocumentIds []string `json:"documentIds"`
}
func BuildFollowUpBody ¶
func BuildFollowUpBody(response *InternalChatGPTResponse, followUp FollowUpRequest) (*ChatGPTRequest, error)
BuildFollowUpBody will build the follow-up body for the response passed
type ConfigDetails ¶
type ConfigDetails struct {
Enable *bool `json:"enable,omitempty"`
ApiKey *string `json:"apiKey,omitempty"`
DefaultModel *string `json:"defaultModel,omitempty"`
DefaultEmbeddingModel *EmbeddingModel `json:"defaultEmbeddingModel,omitempty"`
DefaultSystemPrompt *string `json:"defaultSystemPrompt,omitempty"`
DefaultMaxTokens *int `json:"defaultMaxTokens,omitempty"`
DefaultMinTokens *int `json:"defaultMinTokens,omitempty"`
EnabledIndexes *[]string `json:"enabledIndexes,omitempty"`
APIType *APIType `json:"apiType,omitempty"`
AzureBaseURL *string `json:"azureBaseURL,omitempty"`
AzureVersion *string `json:"apiVersion,omitempty"`
}
ConfigIn will contain details of the configuration passed by the user
func ValidateConfigPassed ¶
func ValidateConfigPassed(config ConfigDetails) (ConfigDetails, error)
ValidateConfigPassed will validate the passed config body and accordingly throw errors, if any
func (ConfigDetails) ToInternalConfig ¶
func (c ConfigDetails) ToInternalConfig() OpenAIConfig
ToInternalConfig will return the config details in the internal config structure
type EmbeddingModel ¶
type EmbeddingModel int
const ( TextEmbedding3Small EmbeddingModel = iota TextEmbedding3Large TextEmbeddingAda002 )
func (EmbeddingModel) MarshalJSON ¶
func (a EmbeddingModel) MarshalJSON() ([]byte, error)
MarshalJSON is the implementation of the Marshaler interface for marshaling EmbeddingModel
func (EmbeddingModel) String ¶
func (a EmbeddingModel) String() string
String is the implementation of stringer interface that returns the string representation of EmbeddingModel
func (*EmbeddingModel) UnmarshalJSON ¶
func (a *EmbeddingModel) UnmarshalJSON(bytes []byte) error
UnmarshalJSON is the implementation of the Unmarshaler interface for unmarshalling EmbeddingModel.
type FAQBody ¶
type FAQBody struct {
Question *string `json:"question,omitempty"`
Answer *string `json:"answer,omitempty"`
SearchboxId *[]string `json:"searchboxId,omitempty"`
ID *string `json:"faq_id,omitempty"`
Order *int `json:"order,omitempty"`
UpdatedAt *int64 `json:"updated_at"`
}
FAQBody will contain the details about the FAQ
type FAQElasticsearch ¶
type FAQElasticsearch struct {
// contains filtered or unexported fields
}
type FilterQueryParams ¶
type FilterQueryParams struct {
FromTimeStamp *int64
ToTimeStamp *int64
InvocationMin *int
InvocationMax *int
Useful *bool
Size *int
Offset *int
ModelPrefix *[]string
UserId *[]string
}
FilterQueryParams will contain the query params allowed for filtering the sessions
type FollowUpRequest ¶
type FollowUpRequest struct {
Request *ChatGPTRequest `json:"request,omitempty"`
Question *string `json:"question,omitempty"`
}
FollowUpRequest will accept the follow-up request for the AI Answer flow
type InternalChatGPTRequest ¶
type InternalChatGPTRequest struct {
InBytes []byte `json:"in_bytes"`
AsStruct ChatGPTRequest `json:"as_struct"`
UnmarshalErr error `json:"unmarshal_err"`
}
func (InternalChatGPTRequest) Bytes ¶
func (i InternalChatGPTRequest) Bytes() []byte
Bytes will return the ChatGPT request body in bytes
func (InternalChatGPTRequest) Error ¶
func (i InternalChatGPTRequest) Error() error
Error will return the unmarshalErr for the struct.
This method will not check whether the error is nil or not
func (InternalChatGPTRequest) IsStructReady ¶
func (i InternalChatGPTRequest) IsStructReady() bool
IsStructReady will indicate whether or not the struct is ready for use
func (InternalChatGPTRequest) Struct ¶
func (i InternalChatGPTRequest) Struct() ChatGPTRequest
Struct will return the request body in the custom datatype
This method will not check whether or not the struct is valid. That can be checked by using the `IsStructReady()` method or the `Error()` method.
type InternalChatGPTResponse ¶
type InternalChatGPTResponse struct {
RequestBody InternalChatGPTRequest `json:"request_body"`
ResponseInBytes []byte `json:"response_in_bytes"`
AddedAt int64 `json:"added_at"`
RemoveAt int64 `json:"remove_at"`
IsReady bool `json:"is_ready"`
IsFailed bool `json:"is_failed"`
StartTrimmingAtPosition int `json:"start_trimming_at_position"`
StreamChannel *chan []byte `json:"-"`
IsStreaming bool
// contains filtered or unexported fields
}
func (InternalChatGPTResponse) GetChannel ¶
func (i InternalChatGPTResponse) GetChannel() *chan []byte
GetChannel will return the channel attached to the session
func (InternalChatGPTResponse) GetIsFailed ¶
func (i InternalChatGPTResponse) GetIsFailed() bool
IsFailed will indicate whether the request failed to execute or complete execution
func (InternalChatGPTResponse) GetIsReady ¶
func (i InternalChatGPTResponse) GetIsReady() bool
IsReady will indicate if the response is ready to be read
func (InternalChatGPTResponse) GetIsStreaming ¶
func (i InternalChatGPTResponse) GetIsStreaming() bool
GetIsStreaming will return the value for IsStreaming indicating whether the response is being streamed.
func (InternalChatGPTResponse) GetRemoveAt ¶
func (i InternalChatGPTResponse) GetRemoveAt() int64
RemoveAt will return the time when the response should be removed at
func (InternalChatGPTResponse) GetSession ¶
func (i InternalChatGPTResponse) GetSession() *AISessionDoc
GetSession will return the session details attached to the response
func (InternalChatGPTResponse) Request ¶
func (i InternalChatGPTResponse) Request() InternalChatGPTRequest
Request will return the ChatGPT request sent
func (InternalChatGPTResponse) Response ¶
func (i InternalChatGPTResponse) Response() []byte
Response will return the ChatGPT response
func (InternalChatGPTResponse) SetChannel ¶
func (i InternalChatGPTResponse) SetChannel(passedChan chan []byte)
SetChannel will set the passed channel in the response
func (InternalChatGPTResponse) SetIsReady ¶
func (i InternalChatGPTResponse) SetIsReady(newVal bool)
SetIsReady will set the value of isReady to the passed value
func (InternalChatGPTResponse) TrimAt ¶
func (i InternalChatGPTResponse) TrimAt() int
TrimAt will return the position at which trimming the messages can start
func (InternalChatGPTResponse) UpdateSession ¶
func (i InternalChatGPTResponse) UpdateSession(sessionId string)
UpdateSession will update the AISessionDoc from the request, response values present in the current ChatGPT response.
type MappingsMigration ¶
type MappingsMigration struct {
NewMapping string
// contains filtered or unexported fields
}
func (MappingsMigration) ConditionCheck ¶
func (m MappingsMigration) ConditionCheck() (bool, *util.Error)
Check whether or not the script should run.
Since we don't have any conditions to check, we will return `true` directly.
func (MappingsMigration) IsAsync ¶
func (m MappingsMigration) IsAsync() bool
func (MappingsMigration) Script ¶
func (m MappingsMigration) Script() *util.Error
Script to run for the migration
type OpenAI ¶
type OpenAI struct {
// contains filtered or unexported fields
}
OpenAI plugin deals with managing query translation.
func Instance ¶
func Instance() *OpenAI
Instance returns the singleton instance of the plugin. Instance should be the only way (both within or outside the package) to fetch the instance of the plugin, in order to avoid stateless duplicates.
func (*OpenAI) ESMiddleware ¶
func (r *OpenAI) ESMiddleware() []middleware.Middleware
func (*OpenAI) GetConfig ¶
func (r *OpenAI) GetConfig() OpenAIConfig
GetConfig will get the OpenAI config value
func (*OpenAI) InitFunc ¶
InitFunc initializes the dao, i.e. elasticsearch client, and should be executed only once in the lifetime of the plugin.
func (*OpenAI) IsOpenAIEnabled ¶
IsOpenAIEnabled will indicate whether OpenAI is enabled or not
func (*OpenAI) RSMiddleware ¶
func (a *OpenAI) RSMiddleware() []middleware.Middleware
Default empty middleware array function
func (*OpenAI) Routes ¶
Routes returns an empty slices since the plugin solely acts as a middleware.
func (*OpenAI) SetConfig ¶
func (r *OpenAI) SetConfig(configPassed OpenAIConfig)
SetConfig will set the OpenAI config value
func (*OpenAI) TrimContextAsPerLimits ¶
func (o *OpenAI) TrimContextAsPerLimits(messagesArrToPassChatGPT []map[string]interface{}, maxTokens int, minTokens int, strictSelection bool) ([]map[string]interface{}, int, error)
TrimContextAsPerLimits will trim the passed context based on the maxTokens, minTokens values passed by the user.
type OpenAIConfig ¶
type OpenAIConfig struct {
Enable *bool `json:"enable,omitempty"`
OpenAIKey *string `json:"open_ai_key,omitempty"`
Model *string `json:"model,omitempty"`
DefaultEmbeddingModel *EmbeddingModel `json:"embeddingModel"`
SystemPrompt *string `json:"systemPrompt,omitempty"`
MaxTokens *int `json:"maxTokens,omitempty"`
MinTokens *int `json:"minTokens,omitempty"`
Indexes *[]string `json:"enabledIndexes,omitempty"`
APIType *APIType `json:"apiType,omitempty"`
AzureBaseURL *string `json:"azureBaseURL"`
AzureVersion *string `json:"azureVersion"`
}
func GetDefaultConfig ¶
func GetDefaultConfig() OpenAIConfig
GetDefaultConfig will return the default config for OpenAI
func (OpenAIConfig) GetAPIType ¶
func (o OpenAIConfig) GetAPIType() APIType
GetAPIType will return the API type specified for the instance
func (OpenAIConfig) GetAzureURL ¶
func (o OpenAIConfig) GetAzureURL() string
GetAzureURL will return the Azure URL for OpenAI calls
func (OpenAIConfig) GetAzureVersion ¶
func (o OpenAIConfig) GetAzureVersion() string
GetAzureVersion will return the azure version for OpenAI calls
func (OpenAIConfig) GetMaxTokens ¶
func (o OpenAIConfig) GetMaxTokens() int
GetMaxTokens will return the max tokens set by the user
This will prioritize the maxTokens set by the user, if present and fall back to `300`
func (OpenAIConfig) GetMinTokens ¶
func (o OpenAIConfig) GetMinTokens() int
GetMinTokens will return the min tokens set by the user
This will prioritize minTokens set by the user, if present and will fall back to `100`
func (OpenAIConfig) GetModel ¶
func (o OpenAIConfig) GetModel() string
GetModel will return the model to be used for the OpenAI call.
This will prioritize the model that user has configured, if any and fallback to `gpt-3.5-turbo`
func (OpenAIConfig) GetSystemPrompt ¶
func (o OpenAIConfig) GetSystemPrompt() string
GetSystemPrompt will return the system prompt set by the user.
This will prioritize the systemPrompt set by the user, if present and fall back to `You're a good helpful assistant`
func (OpenAIConfig) IsIndexWhitelisted ¶
func (o OpenAIConfig) IsIndexWhitelisted(index string) bool
IsIndexWhitelisted will indicate whether the passed index is whitelisted by checking if the index exists in the whitelisted indexes list
func (OpenAIConfig) IsKeyValid ¶
func (o OpenAIConfig) IsKeyValid() bool
IsKeyValid returns a bool indicating whether or not the key is valid.
func (OpenAIConfig) ToExternalConfig ¶
func (o OpenAIConfig) ToExternalConfig() ConfigDetails
ToExternalConfig will return the config details in the external config structure
type ResponseResolutionDetails ¶
type ResponseResolutionDetails struct {
ResolvedAt int64 `json:"resolved_at"`
FirstByteReadAt int64 `json:"first_byte_read_at"`
ClosedAt int64 `json:"closed_at"`
CallReceivedAt int64 `json:"call_received_at"`
CallStreamResolvedAt int64 `json:"call_stream_resolved_at"`
CallFirstWrittenAt int64 `json:"first_written_at"`
AICallCurl string `json:"ai_call_curl"`
AIBody string `json:"ai_body"`
}
type SessionContext ¶
type SessionContext struct {
Question *string `json:"question,omitempty"`
OlderContext *[]ChatGPTMessage `json:"olderContext,omitempty"`
MaxTokens *int `json:"maxTokens,omitempty"`
Temperature *float64 `json:"temperature,omitempty"`
Model *string `json:"model,omitempty"`
}
SessionContext will accept the session context for a new session initiation
type SessionIdToChatGPTResponse ¶
type SessionIdToChatGPTResponse struct {
// contains filtered or unexported fields
}
func SessionInstance ¶
func SessionInstance() *SessionIdToChatGPTResponse
func (*SessionIdToChatGPTResponse) AddResponse ¶
func (s *SessionIdToChatGPTResponse) AddResponse(sessionId string, responseInBytes []byte)
AddResponse will add a new response to the session ID to ChatGPT response storage
func (*SessionIdToChatGPTResponse) CloneSession ¶
func (s *SessionIdToChatGPTResponse) CloneSession(oldSessionId string, userId string, usageInBytes []byte) (string, error)
CloneSession will clone the passed sessionId's session and create a copy session from it. Returns the new sessionId for the session created from the source session.
func (*SessionIdToChatGPTResponse) FetchSession ¶
func (s *SessionIdToChatGPTResponse) FetchSession(ctx context.Context, sessionId string) *InternalChatGPTResponse
FetchSession will try to fetch the session details by using the passed sessionId and accordingly populate the local session details
func (*SessionIdToChatGPTResponse) GetChannel ¶
func (s *SessionIdToChatGPTResponse) GetChannel(sessionId string) *chan []byte
GetChannel will return the channel associated to the passed SessionID
func (*SessionIdToChatGPTResponse) GetResponse ¶
func (s *SessionIdToChatGPTResponse) GetResponse(sessionId string) *InternalChatGPTResponse
GetResponse will return the ChatGPT response for the passed session ID
func (*SessionIdToChatGPTResponse) RegisterResponse ¶
func (s *SessionIdToChatGPTResponse) RegisterResponse(sessionId string, userId string, indices []string, documentIds []string)
RegisterResponse will register a new response for the passed sessionId indicating that the response is in progress
func (*SessionIdToChatGPTResponse) SessionFromBytes ¶
func (s *SessionIdToChatGPTResponse) SessionFromBytes(sessionInBytes []byte, userId string) (string, error)
SessionFromBytes will create a new session from the passed session doc in bytes
func (*SessionIdToChatGPTResponse) SetChannel ¶
func (s *SessionIdToChatGPTResponse) SetChannel(sessionId string, passedChan chan []byte)
SetChannel will set the channel and associate it to the passed sessionId
func (*SessionIdToChatGPTResponse) SetIsFailed ¶
func (s *SessionIdToChatGPTResponse) SetIsFailed(sessionId string, value bool) error
SetIsFailed will set the passed value in the IsFailed field
This function returns an error if the response is not registered or the passed sessionId is invalid
func (*SessionIdToChatGPTResponse) SetIsStreaming ¶
func (s *SessionIdToChatGPTResponse) SetIsStreaming(sessionId string, value bool)
SetIsStreaming will set the value for isStreaming in the response
func (*SessionIdToChatGPTResponse) SetRequest ¶
func (s *SessionIdToChatGPTResponse) SetRequest(sessionId string, requestBody []byte, trimmingAt int)
SetRequest will set the request body for later access for the passed sessionId
func (*SessionIdToChatGPTResponse) SetResolvedAt ¶
func (s *SessionIdToChatGPTResponse) SetResolvedAt(sessionId string, resolvedAt int64, req *http.Request, firstByteReadAt int64, closedAt int64, requestInBytes []byte)
SetResolvedAt will set the time of the ChatGPT call resolution
func (*SessionIdToChatGPTResponse) SetSSECallAt ¶
func (s *SessionIdToChatGPTResponse) SetSSECallAt(sessionId string, sseCallAt int64, sseCallStreamAt int64, firstWrittenAt int64)
SetSSECallAt will set the sse call at and sse call streaming start time
func (*SessionIdToChatGPTResponse) SetUseful ¶
func (s *SessionIdToChatGPTResponse) SetUseful(sessionId string, useful UsefulAnalytics) error
SetUseful will set the useful flag with other details for the passed sessionId