types

package
v1.7.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Oct 29, 2025 License: MIT Imports: 2 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

View Source
var CommitPrompt = `` /* 762-byte string literal not displayed */

CommitPrompt is the base instruction template sent to LLM providers before appending repository changes and optional style guidance.

Functions

func BuildCommitPrompt added in v1.3.23

func BuildCommitPrompt(changes string, opts *GenerationOptions) string

BuildCommitPrompt constructs the prompt that will be sent to the LLM, applying any optional tone/style instructions before appending the repository changes.

func GetSupportedProviderStrings added in v1.3.22

func GetSupportedProviderStrings() []string

GetSupportedProviderStrings returns the human-friendly names for providers.

Types

type CacheConfig added in v1.6.0

type CacheConfig struct {
	Enabled         bool   `json:"enabled"`
	MaxEntries      int    `json:"max_entries"`
	MaxAgeDays      int    `json:"max_age_days"`
	CleanupInterval int    `json:"cleanup_interval_hours"`
	CacheFilePath   string `json:"cache_file_path"`
}

CacheConfig holds configuration for the cache system.

type CacheEntry added in v1.6.0

type CacheEntry struct {
	Message          string      `json:"message"`
	Provider         LLMProvider `json:"provider"`
	DiffHash         string      `json:"diff_hash"`
	StyleInstruction string      `json:"style_instruction,omitempty"`
	Attempt          int         `json:"attempt"`
	CreatedAt        string      `json:"created_at"`
	LastAccessedAt   string      `json:"last_accessed_at"`
	AccessCount      int         `json:"access_count"`
	Cost             float64     `json:"cost,omitempty"`
	Tokens           *UsageInfo  `json:"tokens,omitempty"`
}

CacheEntry represents a cached commit message with metadata.

type CacheStats added in v1.6.0

type CacheStats struct {
	TotalEntries   int     `json:"total_entries"`
	TotalHits      int     `json:"total_hits"`
	TotalMisses    int     `json:"total_misses"`
	HitRate        float64 `json:"hit_rate"`
	TotalCostSaved float64 `json:"total_cost_saved"`
	OldestEntry    string  `json:"oldest_entry"`
	NewestEntry    string  `json:"newest_entry"`
	CacheSizeBytes int64   `json:"cache_size_bytes"`
}

CacheStats provides statistics about the cache.

type Choice

type Choice struct {
	Message      Message `json:"message"`
	Index        int     `json:"index"`
	FinishReason string  `json:"finish_reason"`
}

Choice details a single response option returned by Grok.

type Config

type Config struct {
	GrokAPI string                `json:"grok_api"`
	Repos   map[string]RepoConfig `json:"repos"`
}

Config stores CLI-level configuration including named repositories.

type GenerationEvent added in v1.7.0

type GenerationEvent struct {
	Provider       LLMProvider `json:"provider"`
	Success        bool        `json:"success"`
	GenerationTime float64     `json:"generation_time_ms"`
	TokensUsed     int         `json:"tokens_used"`
	Cost           float64     `json:"cost"`
	CacheHit       bool        `json:"cache_hit"`
	CacheChecked   bool        `json:"cache_checked"`
	Timestamp      string      `json:"timestamp"`
	ErrorMessage   string      `json:"error_message,omitempty"`
}

GenerationEvent represents a single commit message generation event for tracking.

type GenerationOptions added in v1.3.23

type GenerationOptions struct {
	// StyleInstruction contains optional tone/style guidance appended to the base prompt.
	StyleInstruction string
	// Attempt records the 1-indexed attempt number for this generation request.
	// Attempt > 1 signals that the LLM should provide an alternative output.
	Attempt int
}

GenerationOptions controls how commit messages should be produced by LLM providers.

type GrokRequest

type GrokRequest struct {
	Messages    []Message `json:"messages"`
	Model       string    `json:"model"`
	Stream      bool      `json:"stream"`
	Temperature float64   `json:"temperature"`
}

GrokRequest represents a chat completion request sent to X.AI's API.

type GrokResponse

type GrokResponse struct {
	Message Message   `json:"message,omitempty"`
	Choices []Choice  `json:"choices,omitempty"`
	Id      string    `json:"id,omitempty"`
	Object  string    `json:"object,omitempty"`
	Created int64     `json:"created,omitempty"`
	Model   string    `json:"model,omitempty"`
	Usage   UsageInfo `json:"usage,omitempty"`
}

GrokResponse contains the relevant fields parsed from X.AI responses.

type LLMProvider added in v1.3.22

type LLMProvider string

LLMProvider identifies the large language model backend used to author commit messages.

const (
	ProviderOpenAI LLMProvider = "OpenAI"
	ProviderClaude LLMProvider = "Claude"
	ProviderGemini LLMProvider = "Gemini"
	ProviderGrok   LLMProvider = "Grok"
	ProviderGroq   LLMProvider = "Groq"
	ProviderOllama LLMProvider = "Ollama"
)

func GetSupportedProviders added in v1.3.22

func GetSupportedProviders() []LLMProvider

GetSupportedProviders returns all available provider enums.

func ParseLLMProvider added in v1.3.22

func ParseLLMProvider(s string) (LLMProvider, bool)

ParseLLMProvider converts a string into an LLMProvider enum when supported.

func (LLMProvider) IsValid added in v1.3.22

func (p LLMProvider) IsValid() bool

IsValid reports whether the provider is part of the supported set.

func (LLMProvider) String added in v1.3.22

func (p LLMProvider) String() string

String returns the provider identifier as a plain string.

type Message

type Message struct {
	Role    string `json:"role"`
	Content string `json:"content"`
}

Message captures the role/content pairs exchanged with Grok.

type ProviderStats added in v1.7.0

type ProviderStats struct {
	Name                  LLMProvider `json:"name"`
	TotalUses             int         `json:"total_uses"`
	SuccessfulUses        int         `json:"successful_uses"`
	FailedUses            int         `json:"failed_uses"`
	TotalCost             float64     `json:"total_cost"`
	TotalTokensUsed       int         `json:"total_tokens_used"`
	AverageGenerationTime float64     `json:"average_generation_time_ms"`
	FirstUsed             string      `json:"first_used"`
	LastUsed              string      `json:"last_used"`
	SuccessRate           float64     `json:"success_rate"`
}

ProviderStats tracks statistics for a specific LLM provider.

type RepoConfig

type RepoConfig struct {
	Path    string `json:"path"`
	LastRun string `json:"last_run"`
}

RepoConfig tracks metadata for a configured Git repository.

type UsageInfo

type UsageInfo struct {
	PromptTokens     int `json:"prompt_tokens"`
	CompletionTokens int `json:"completion_tokens"`
	TotalTokens      int `json:"total_tokens"`
}

UsageInfo reports token usage statistics from Grok responses.

type UsageStats added in v1.7.0

type UsageStats struct {
	TotalGenerations      int                            `json:"total_generations"`
	SuccessfulGenerations int                            `json:"successful_generations"`
	FailedGenerations     int                            `json:"failed_generations"`
	ProviderStats         map[LLMProvider]*ProviderStats `json:"provider_stats"`
	FirstUse              string                         `json:"first_use"`
	LastUse               string                         `json:"last_use"`
	TotalCost             float64                        `json:"total_cost"`
	TotalTokensUsed       int                            `json:"total_tokens_used"`
	CacheHits             int                            `json:"cache_hits"`
	CacheMisses           int                            `json:"cache_misses"`
	AverageGenerationTime float64                        `json:"average_generation_time_ms"`
}

UsageStats tracks comprehensive usage statistics for the application.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL