cli

package
v0.0.0-...-5626d12 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: May 3, 2026 License: MPL-2.0 Imports: 12 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

This section is empty.

Functions

func Exec

func Exec(args []string) int

Exec is an alternative entry point for testing

func Main

func Main() int

Main is the entry point for the CLI

Types

type AskCmd

type AskCmd struct {
	Question string `arg:"" help:"Question to ask the wiki"`
	Limit    int    `help:"Maximum number of context pages to use" default:"0"`
}

AskCmd handles queries to the wiki

func (*AskCmd) Run

func (c *AskCmd) Run(cli *CLI) error

Run executes the ask command

type BenchmarkCmd

type BenchmarkCmd struct {
	// Dataset selects the benchmark to run.
	Dataset string `help:"Benchmark dataset to run" enum:"locomo,longmemeval," default:""`

	// DataPath is the local JSON file for the selected dataset.
	DataPath string `help:"Path to dataset JSON file" default:""`

	// Limit caps the number of conversations/items evaluated. 0 means all.
	Limit int `help:"Maximum conversations/items to evaluate (0 = all)" default:"0"`

	// Report is the output path; empty means stdout.
	Report string `help:"Output file path (default: stdout)"`

	// Format controls output encoding.
	Format string `help:"Output format" default:"" enum:"markdown,json,"`

	// RealLLM switches from the deterministic mock to the configured OpenAI
	// adapter for embeddings — produces quality numbers closer to production.
	// If not specified, uses value from config file.
	RealLLM bool `help:"Use real OpenAI LLM for embeddings (requires --openai-key)"`

	// OutputDir is the directory where wiki data will be persisted.
	// If empty, uses in-memory storage (faster, no disk I/O).
	OutputDir string `help:"Directory to persist wiki data for inspection (default: in-memory)"`
}

BenchmarkCmd runs external benchmark datasets (LoCoMo, LongMemEval) against the memory system and produces comparison reports vs published baselines.

Examples:

wiki benchmark --dataset locomo --data-path locomo.json
wiki benchmark --dataset longmemeval --data-path lme.json --format json
wiki benchmark --dataset locomo --data-path locomo.json --real-llm --limit 5

func (*BenchmarkCmd) Run

func (c *BenchmarkCmd) Run(cli *CLI) error

Run executes the benchmark command.

type CLI

type CLI struct {
	// Global flags
	Config           string `help:"Path to configuration file" env:"WIKI_CONFIG"`
	WikiDir          string `help:"Wiki storage directory" env:"WIKI_DIR"`
	OpenAIKey        string `help:"OpenAI API key" env:"OPENAI_API_KEY" short:"k"`
	OpenAIBaseURL    string `help:"Custom OpenAI-compatible endpoint" env:"OPENAI_BASE_URL"`
	OpenAIModel      string `help:"Chat model name" env:"OPENAI_MODEL"`
	OpenAIEmbedModel string `help:"Embedding model name" env:"OPENAI_EMBED_MODEL"`

	// Commands
	Cfg       ConfigCmd    `cmd:"" help:"Manage configuration"`
	Ingest    IngestCmd    `cmd:"" help:"Ingest content from stdin or file"`
	Ask       AskCmd       `cmd:"" help:"Ask the wiki a question"`
	Eval      EvalCmd      `cmd:"" help:"Run memory quality evaluation scenarios"`
	Benchmark BenchmarkCmd `cmd:"" help:"Run external benchmark datasets (LoCoMo, LongMemEval)"`
	// contains filtered or unexported fields
}

CLI represents the CLI application

type ConfigCmd

type ConfigCmd struct {
	Init bool   `help:"Create a new config file" short:"i"`
	Path string `arg:"" optional:"" help:"Path to config file (default: .wiki/config.yml)"`
	Show bool   `help:"Show current configuration" short:"s"`
}

ConfigCmd handles configuration management

func (*ConfigCmd) Run

func (c *ConfigCmd) Run(cli *CLI) error

Run executes the config command

type EvalCmd

type EvalCmd struct {
	// Scenarios is the path to a directory of *.yaml files or a single .yaml.
	Scenarios string `help:"Path to scenarios directory or single .yaml file" required:""`

	// Report is the output path. Empty string writes to stdout.
	Report string `help:"Output file path (default: stdout)"`

	// Format is the output format: "markdown" (default) or "json".
	Format string `help:"Output format" default:"markdown" enum:"markdown,json"`

	// RealLLM uses the configured OpenAI adapter for embeddings instead of
	// the deterministic mock. Slower and more expensive, but produces
	// quality numbers comparable to production.
	RealLLM bool `help:"Use real OpenAI LLM for embeddings (requires --openai-key)"`
}

EvalCmd runs the memory-system evaluation scenarios.

Examples:

wiki eval --scenarios eval/scenarios
wiki eval --scenarios eval/scenarios --format json --report report.json
wiki eval --scenarios eval/scenarios/preference-recall.yaml --real-llm

func (*EvalCmd) Run

func (c *EvalCmd) Run(cli *CLI) error

Run executes the eval command.

type IngestCmd

type IngestCmd struct {
	Path  string `arg:"" optional:"" help:"File path to ingest (reads from stdin if empty or '-')"`
	Title string `help:"Optional title for the source"`
}

IngestCmd handles content ingestion

func (*IngestCmd) Run

func (c *IngestCmd) Run(cli *CLI) error

Run executes the ingest command

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL