Documentation
¶
Index ¶
- Constants
- Variables
- func AllModelNames() []string
- func Chunk(cfg *Config, project *projectinfo.ProjectInfo, ...) ([]string, error)
- func FileContents(project *projectinfo.ProjectInfo, filename string) string
- func GetConfidencePrompt(opType OperationType) string
- func GetDefaultFilename(opType OperationType) string
- func GetFixPrompt(opType OperationType) string
- func GetInitialPrompt(opType OperationType) string
- func ModelNamesAndDescriptions() map[string]string
- func SetModels(models []Model)
- type Config
- func (cfg *Config) BuildPrompt(promptTemplate string, templateData TemplateData) (string, error)
- func (cfg *Config) CountPromptTokens(prompt string) int
- func (cfg *Config) GatherSources(customInitialPrompt, customFixPrompt, customConfidencePrompt string, ...) (*projectinfo.ProjectInfo, error)
- func (cfg *Config) InitializeOutputFile()
- func (cfg *Config) OutputResponse(response string) error
- func (cfg *Config) PostPrompt(prompt string) (string, error)
- func (cfg *Config) Process(status io.Writer, project *projectinfo.ProjectInfo) (string, string, int, float64, error)
- func (cfg *Config) ProcessChunk(status io.Writer, i, n int, project *projectinfo.ProjectInfo, ...) (string, float64, error)
- func (cfg *Config) ReactToConfigAndReadSources(args []string, ...) (*projectinfo.ProjectInfo, error)
- type Model
- type OperationType
- type TemplateData
Constants ¶
const ( OpGenDoc = iota // generate general documentation OpGenAPI // generate API documentation OpGenReadme // generate a README.md style file OpGenCatalog // generate app-catalog.yaml config for Backstage OpGenAnyFile // generate any file OpFindBug // find a bug OpFindTypo // find a typo )
The different operations that this program can do
Variables ¶
var AllModels []Model
AllModels holds the list of models configured by the caller.
var PromptMargin = 1.2 // to compensate for inaccurate token count, this increases it with 20%
TODO: Use accurate token counting instead
Functions ¶
func AllModelNames ¶
func AllModelNames() []string
func Chunk ¶
func Chunk(cfg *Config, project *projectinfo.ProjectInfo, includeSourceFiles, includeConfAndDocFiles bool) ([]string, error)
Chunk breaks down project information into manageable JSON chunks to adhere to token limitations
func FileContents ¶
func FileContents(project *projectinfo.ProjectInfo, filename string) string
func GetConfidencePrompt ¶
func GetConfidencePrompt(opType OperationType) string
func GetDefaultFilename ¶
func GetDefaultFilename(opType OperationType) string
func GetFixPrompt ¶
func GetFixPrompt(opType OperationType) string
func GetInitialPrompt ¶
func GetInitialPrompt(opType OperationType) string
Types ¶
type Config ¶
type Config struct {
Model Model
FallbackModel Model
Output *os.File
InitialPrompt string
FixPrompt string
ConfidencePrompt string
OutputFilename string
Force bool
Silent bool
OutputPrompt bool
Version bool
Directory string
OpType OperationType
IncludeConfAndDoc bool
ExcludeSources bool
AlsoOutputFixAndConfidence bool
Timeout time.Duration
}
func (*Config) BuildPrompt ¶
func (cfg *Config) BuildPrompt(promptTemplate string, templateData TemplateData) (string, error)
BuildPrompt constructs the final prompt from the template and data
func (*Config) CountPromptTokens ¶
CountPromptTokens sends the given prompt and model name to the configured AI server and counts the tokens. Only works for gemini*, for now. For the other models, the tokens are estimated. If there are errors, a warning is logged and the tokens are estimated instead.
func (*Config) GatherSources ¶
func (cfg *Config) GatherSources(customInitialPrompt, customFixPrompt, customConfidencePrompt string, opType OperationType) (*projectinfo.ProjectInfo, error)
func (*Config) InitializeOutputFile ¶
func (cfg *Config) InitializeOutputFile()
InitializeOutputFile opens the output file based on configuration
func (*Config) OutputResponse ¶
func (*Config) PostPrompt ¶
PostPrompt sends the given prompt and model name to the configured AI server and returns the answer. The answer may optionally be trimmed for code block markers (ie. ```yaml ... ```).
func (*Config) Process ¶
func (cfg *Config) Process(status io.Writer, project *projectinfo.ProjectInfo) (string, string, int, float64, error)
Process processes the entire project with AI returns the combined initial results, the combined fix results, the confidence from 1 to 10, the cost in USD and an error if applicable
func (*Config) ProcessChunk ¶
func (cfg *Config) ProcessChunk(status io.Writer, i, n int, project *projectinfo.ProjectInfo, jsonChunk, promptTemplate, previousAIAnswer string) (string, float64, error)
ProcessChunk processes a chunk of source code with either the initial or the correction prompt (if not blank)
func (*Config) ReactToConfigAndReadSources ¶
func (cfg *Config) ReactToConfigAndReadSources(args []string, customInitialPrompt, customFixPrompt, customConfidencePrompt string, apidoc, bug, catalog, readme, typo bool) (*projectinfo.ProjectInfo, error)
ReactToConfigAndReadSources sets up command line flags for the application
type Model ¶
type Model struct {
Description string
Name string
MaxTokens int
PostURL string
USDPerMillionTokensForShortPrompts float64 // < 128K tokens
USDPerMillionTokensForLongPrompts float64 // > 128K tokens
USDPerMillionTokensOutputForShortPrompts float64 // < 128K tokens
USDPerMillionTokensOutputForLongPrompts float64 // > 128K tokens
}
func (*Model) CalculateCost ¶
CalculateCost returns the approximate cost in USD
type OperationType ¶
type OperationType int
type TemplateData ¶
TemplateData defines the structure used for prompt templates