db

package
v1.1.3 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Apr 29, 2026 License: Apache-2.0 Imports: 22 Imported by: 0

Documentation

Index

Constants

View Source
const MaxRows = 500

MaxRows caps the number of rows returned by a gated query.

View Source
const SelectCols = selectCols

SelectCols is the canonical column list for SELECT queries against the items table.

Variables

View Source
var ValidLinkTypes = []string{
	"related_to",
	"duplicate_of",
	"informs",
	"supersedes",
	"spike_ref",
}

ValidLinkTypes lists every allowed value for the link_type column in item_links.

Functions

func AddDependencyChecked

func AddDependencyChecked(ctx context.Context, database *sql.DB, itemID, dependsOn, depType string) error

AddDependencyChecked atomically checks for cycles and inserts the dependency in a single serializable transaction, preventing TOCTOU races under concurrent access.

func AddLink(ctx context.Context, database *sql.DB, sourceID, targetID, linkType string) error

AddLink inserts a directed semantic link from sourceID to targetID. Returns an error when link_type is not in ValidLinkTypes. The insert is idempotent: duplicate (sourceID, targetID, linkType) tuples are silently ignored via INSERT OR IGNORE.

func ApplySchemaExtensions

func ApplySchemaExtensions(db *sql.DB, headerDef *config.HeaderDefConfig) error

ApplySchemaExtensions executes the generated ALTER TABLE statements idempotently within an explicit transaction to prevent partial schema migrations on failure.

func BuildManifest added in v1.1.0

func BuildManifest(workspacePath string) (map[string]FileEntry, error)

BuildManifest walks the workspace directory tree and builds an in-memory manifest keyed by workspace-relative path. Hidden directories (name starting with ".") are skipped. For artifact files, it extracts the ItemID from the full frontmatter using models.ParseFrontmatter. Files that cannot be stat'd or read are skipped with a slog.Warn entry.

func ClearStashIndex

func ClearStashIndex(ctx context.Context, database *sql.DB) error

ClearStashIndex deletes stash entries and stash links before rehydration.

func DeleteAllItemLogs

func DeleteAllItemLogs(ctx context.Context, database *sql.DB) error

DeleteAllItemLogs clears indexed item log relationships and entries before rehydration.

func DeleteDependency

func DeleteDependency(ctx context.Context, db *sql.DB, itemID, dependsOn string) error

DeleteDependency removes a specific dependency edge.

func DeleteItem deprecated

func DeleteItem(ctx context.Context, db *sql.DB, id string) error

DeleteItem removes an artifact from the index. DeleteItem removes an artifact from the index together with all related rows in a single atomic transaction. It delegates to DeleteItemCascade.

Deprecated: call DeleteItemCascade directly; this shim exists for callsite compatibility only.

func DeleteItemCascade

func DeleteItemCascade(ctx context.Context, database *sql.DB, id string) error

DeleteItemCascade removes an artifact and all its related rows (deps, links, logs, events, stash links, commit links) from the index in a single atomic transaction. It returns ErrNotFound when no items row matched id.

func DeleteItemTx

func DeleteItemTx(ctx context.Context, tx *sql.Tx, id string) error

DeleteItemTx removes an item from the items table within an existing transaction. Returns ErrNotFound when no row matched the ID.

func DetectCycle

func DetectCycle(ctx context.Context, db *sql.DB, itemID, dependsOn string) (bool, error)

DetectCycle checks whether adding a dependency from itemID to dependsOn would create a circular dependency in the graph. It performs a BFS from dependsOn following existing dependency edges. If itemID is reachable, a cycle exists.

func EnsureSchema

func EnsureSchema(db *sql.DB) error

EnsureSchema creates the items table, indexes, FTS5 virtual table, and maintenance triggers idempotently, wrapped in a single transaction.

func EnsureSchemaWithExtensions

func EnsureSchemaWithExtensions(db *sql.DB, headerDef *config.HeaderDefConfig) error

EnsureSchemaWithExtensions creates base schema and applies dynamic columns from header-def.

func EnsureTelemetrySchema

func EnsureTelemetrySchema(db *sql.DB) error

EnsureTelemetrySchema creates the telemetry_sessions and telemetry_tool_usage tables idempotently. Called lazily by the telemetry harvest handler on first use rather than during workspace initialization; tables are created on demand.

telemetry_tool_usage uses a composite primary key (session_id, server_name, tool_name) — no AUTOINCREMENT (Plan Review F7). This enforces uniqueness per harvest run and prevents double-counting on re-harvest.

func ExecuteGatedQuery

func ExecuteGatedQuery(db *sql.DB, query string, params ...any) ([]map[string]interface{}, error)

ExecuteGatedQuery runs a validated read-only query capped at MaxRows.

func GenerateSchemaExtensions

func GenerateSchemaExtensions(db *sql.DB, headerDef *config.HeaderDefConfig) ([]string, error)

GenerateSchemaExtensions reads custom field definitions from HeaderDefConfig and returns ALTER TABLE statements to add columns not yet present in the items table.

func GetItem

func GetItem(ctx context.Context, db *sql.DB, id string) (*models.Artifact, error)

GetItem retrieves a single artifact by ID.

func IndexEvent

func IndexEvent(ctx context.Context, database *sql.DB, logsDir string, event events.Event) error

IndexEvent records the relationship between a work item and its log file and stores the event in the database for search and efficient lookup.

func InsertItemLogEntry

func InsertItemLogEntry(ctx context.Context, database *sql.DB, logPath string, event events.Event) error

InsertItemLogEntry stores a single event log entry.

func LinkStashEntry

func LinkStashEntry(ctx context.Context, database *sql.DB, stashID, itemID string, linkedAt time.Time) error

LinkStashEntry records a harvested stash-to-item relationship.

func MapFieldTypeToSQLite

func MapFieldTypeToSQLite(fieldType string) (string, error)

MapFieldTypeToSQLite converts a YAML field type to the corresponding SQLite column type.

func Open

func Open(dbPath string) (*sql.DB, error)

Open returns a configured *sql.DB backed by the SQLite file at dbPath.

DSN options are injected via query parameters so the driver applies them on every new physical connection opened from the pool:

  • _pragma=journal_mode(WAL) — WAL journal enables concurrent readers alongside writers.
  • _pragma=foreign_keys(1) — Enforces FK constraints (SQLite disables them by default).
  • _pragma=busy_timeout(30000) — Waits up to 30 s before returning SQLITE_BUSY; gives multi-process MCP workloads enough headroom to serialise writes.
  • _txlock=immediate — All transactions use BEGIN IMMEDIATE, acquiring the write lock at transaction start rather than on the first write. This eliminates lock-upgrade conflicts that produce SQLITE_BUSY mid-transaction.

Pool sizing: SetMaxOpenConns(4) / SetMaxIdleConns(4). WAL mode supports multiple simultaneous readers so a pool > 1 is safe and beneficial when the MCP server and CLI share a process. 4 is a conservative ceiling for a single-user tool; raising it further would add contention on the write lock without meaningful throughput gain. (F-19)

func QueryItems

func QueryItems(ctx context.Context, db *sql.DB, filters QueryFilters) ([]*models.Artifact, error)

QueryItems retrieves artifacts matching the provided filters.

func Rehydrate

func Rehydrate(ctx context.Context, workspacePath string, db *sql.DB) (int, error)

Rehydrate walks the workspace directory tree and rebuilds the SQLite index from the Markdown source files. Files that fail to parse are skipped with a debug log entry. Returns the number of artifacts successfully indexed.

The rebuild is split into three phases to reduce write-lock hold time:

  1. Collect: walk the filesystem and parse all Markdown files into memory. No database interaction occurs during this phase.
  2. Clear: a single IMMEDIATE transaction deletes all existing index rows so that removed or renamed Markdown files do not leave ghost entries.
  3. Batch-insert: parsed artifacts are inserted in batches of rehydrateBatchSize per transaction. Each batch acquires and releases the write lock independently, allowing concurrent readers to make progress between batches.

Note: between the clear commit and the final batch commit the index is empty or partially populated. This is acceptable because backlogit.db is an ephemeral cache that can be rebuilt at any time.

func RehydrateStashIndex

func RehydrateStashIndex(ctx context.Context, database *sql.DB, activeEntries []StashRecord, harvested map[string]StashRecord) error

RehydrateStashIndex rebuilds the stash index from the active stash records and artifact provenance. The entire clear-and-rebuild sequence runs inside a single transaction to prevent partial state.

func RehydrateTelemetry

func RehydrateTelemetry(ctx context.Context, workspacePath string, sqlDB *sql.DB) error

RehydrateTelemetry clears and rebuilds the telemetry_sessions and telemetry_tool_usage tables from .backlogit/telemetry-sessions.jsonl.

Single write path: JSONL → rehydrate → SQLite (Plan Review F5). No direct upserts during harvest — RehydrateTelemetry is the only writer. Idempotent: calling twice with the same JSONL produces the same table state.

func RehydrateWithManifest added in v1.1.0

func RehydrateWithManifest(ctx context.Context, workspacePath string, database *sql.DB) (int, map[string]FileEntry, error)

RehydrateWithManifest performs a full rehydration by calling Rehydrate, then builds and returns a file manifest snapshot of the post-rehydration workspace. Rehydrate is the canonical rebuild path; this function wraps it for callers that also need the manifest baseline (e.g. to seed backlogit_merge_sync).

func RemoveLink(ctx context.Context, database *sql.DB, sourceID, targetID, linkType string) error

RemoveLink deletes the directed link matching (sourceID, targetID, linkType). A no-op removal (link not found) is not an error.

func RetryWrite

func RetryWrite(ctx context.Context, fn func() error) error

RetryWrite calls fn, retrying on SQLITE_BUSY and SQLITE_LOCKED errors using the default backoff schedule (1 s / 2 s / 4 s, up to 3 retries). ctx cancellation stops further retries immediately and returns ctx.Err().

func RetryWriteWithDelays

func RetryWriteWithDelays(ctx context.Context, fn func() error, delays []time.Duration) error

RetryWriteWithDelays is the testable inner implementation of RetryWrite. Production code should call RetryWrite; tests pass a zero-duration slice to eliminate timing overhead.

func RewriteAncillaryReferences

func RewriteAncillaryReferences(ctx context.Context, tx *sql.Tx, oldID, newID, newLogPath string) error

RewriteAncillaryReferences updates item_id references in commit_links, stash_links, item_logs, and item_log_entries. For item_logs and RewriteAncillaryReferences updates item_id columns in commit_links, stash_links, item_logs, and item_log_entries from oldID to newID. For item_logs and item_log_entries the log_path column is also set to newLogPath (a .backlogit/-relative path like "logs/<id>.jsonl"). This function operates within an existing transaction.

func RewriteDependencyEdges

func RewriteDependencyEdges(ctx context.Context, tx *sql.Tx, oldID, newID string) error

RewriteDependencyEdges updates all item_deps rows that reference oldID, changing them to reference newID. Both item_id and depends_on columns are rewritten. This function operates within an existing transaction.

func RewriteLinkEdges

func RewriteLinkEdges(ctx context.Context, tx *sql.Tx, oldID, newID string) error

RewriteLinkEdges updates all item_links rows that reference oldID, changing them to reference newID. Both source_id and target_id columns are rewritten. This function operates within an existing transaction.

func ScanArtifactRow

func ScanArtifactRow(row rowScanner) (*models.Artifact, error)

ScanArtifactRow is the exported wrapper for scanning a single row into an Artifact. It is used by packages that build custom queries against the items table.

func SearchItems

func SearchItems(ctx context.Context, db *sql.DB, query string, limit int) ([]*models.Artifact, error)

SearchItems performs FTS5 full-text search across titles, descriptions, and labels. The query is wrapped in FTS5 phrase-quote delimiters so that hyphens and other FTS5 operator characters in the input are treated as literal phrase content rather than query operators.

func ShouldFallback added in v1.1.0

func ShouldFallback(diff DiffResult, manifestSize, maxChangedFiles int) (bool, string)

ShouldFallback reports whether the diff is large enough to warrant a full rehydrate instead of an incremental sync. Returns true and a descriptive reason string when either:

  • the total changed-file count (added+changed+deleted+relocated) meets or exceeds maxChangedFiles, or
  • manifestSize is non-zero, manifestSize exceeds total (the manifest is meaningfully larger than the change set), and the changed-file count meets or exceeds 50% of manifestSize.

func UpsertDependency

func UpsertDependency(ctx context.Context, db *sql.DB, itemID, dependsOn, depType string) error

UpsertDependency creates or updates a dependency edge in the item_deps table. Both referenced items must exist in the items table.

func UpsertItem

func UpsertItem(ctx context.Context, db *sql.DB, artifact *models.Artifact) error

UpsertItem inserts or replaces an artifact in the index.

func UpsertItemLog

func UpsertItemLog(ctx context.Context, database *sql.DB, itemID, logPath string, updatedAt time.Time) error

UpsertItemLog stores the item-to-log-file relationship.

func UpsertItemTx

func UpsertItemTx(ctx context.Context, tx *sql.Tx, artifact *models.Artifact) error

UpsertItemTx inserts or replaces an artifact in the items table within an existing transaction. Mirrors UpsertItem's column set and value formatting (including hierarchy_path and RFC3339Nano timestamps) to ensure scan compatibility via scanArtifactRow.

func UpsertItemsTx

func UpsertItemsTx(ctx context.Context, tx *sql.Tx, artifacts ...*models.Artifact) error

UpsertItemsTx inserts or replaces one or more artifacts within an existing SQL transaction. All writes share the same transaction so a rollback reverts every artifact atomically.

func UpsertStashEntry

func UpsertStashEntry(ctx context.Context, database *sql.DB, stashID, priority, kind, text, deliberationID, state, sourcePath string, updatedAt time.Time) error

UpsertStashEntry writes or updates a stash entry record.

func ValidateColumnName

func ValidateColumnName(name string) error

ValidateColumnName checks that a column name is safe for use in DDL statements.

Types

type DependencyEdge

type DependencyEdge struct {
	ItemID    string `json:"item_id"`
	DependsOn string `json:"depends_on"`
	DepType   string `json:"dep_type"`
}

DependencyEdge represents a single dependency relationship between two artifacts.

func GetDependencies

func GetDependencies(ctx context.Context, db *sql.DB, itemID string) ([]DependencyEdge, error)

GetDependencies returns all items that the given item depends on (upstream edges).

func GetDependents

func GetDependents(ctx context.Context, db *sql.DB, dependsOn string) ([]DependencyEdge, error)

GetDependents returns all items that depend on the given item (downstream/reverse edges).

type DiffResult added in v1.1.0

type DiffResult struct {
	// Added holds files present in current but absent from old.
	Added []FileEntry
	// Changed holds files present in both snapshots but with differing mtime or size.
	Changed []FileEntry
	// Deleted holds files present in old but absent from current, excluding relocated files.
	Deleted []FileEntry
	// Relocated holds files whose ItemID appears in both a delete and an add,
	// indicating a path change rather than removal.
	Relocated []RelocationEntry
}

DiffResult holds the classified difference between two manifest snapshots.

func ComputeDiff added in v1.1.0

func ComputeDiff(old, current map[string]FileEntry) DiffResult

ComputeDiff compares two manifest snapshots and returns a DiffResult that classifies each file change as added, changed, deleted, or relocated. Relocation detection matches ItemID values across delete and add pairs; a file that moved from one directory to another is reported as a single RelocationEntry rather than independent delete and add entries.

type DuplicateGroup

type DuplicateGroup struct {
	NormalizedTitle string   `json:"normalized_title"`
	IDs             []string `json:"ids"`
	Count           int      `json:"count"`
}

DuplicateGroup holds a set of artifact IDs that share the same normalized title.

func FindDuplicates

func FindDuplicates(ctx context.Context, db *sql.DB) ([]DuplicateGroup, error)

FindDuplicates returns groups of artifacts that share identical normalized titles. Normalization lowercases the title and trims surrounding whitespace before comparison. Groups with only one member are excluded from the result.

type FileEntry added in v1.1.0

type FileEntry struct {
	// RelPath is the workspace-relative path (e.g., "queue/037-F.md").
	RelPath string
	// Kind classifies the file's functional role.
	Kind FileKind
	// Size is the file size in bytes at the time the manifest was built.
	Size int64
	// ModTime is the file modification time at the time the manifest was built.
	ModTime time.Time
	// ItemID is the artifact ID extracted from frontmatter. Empty for non-artifact
	// kinds (stash, log, config, other).
	ItemID string
}

FileEntry records the stat metadata for a single file in the workspace manifest, keyed by its workspace-relative path.

type FileKind added in v1.1.0

type FileKind int

FileKind categorises a file in the .backlogit workspace by its functional role.

const (
	// FileKindArtifact covers Markdown artifact files under queue/, done/, active/, archive/.
	FileKindArtifact FileKind = iota
	// FileKindStash covers the stash.jsonl intake file.
	FileKindStash
	// FileKindLog covers per-item JSONL log files under logs/.
	FileKindLog
	// FileKindConfig covers workspace configuration files such as config.yaml.
	FileKindConfig
	// FileKindOther covers files that do not match any recognised category.
	FileKindOther
)

func ClassifyFile added in v1.1.0

func ClassifyFile(relPath string) FileKind

ClassifyFile returns the FileKind for a workspace-relative path. Paths under queue/, done/, active/, blocked/, or archive/ with a .md extension are classified as FileKindArtifact. stash.jsonl is FileKindStash. Paths under logs/ with a .jsonl extension are FileKindLog. Known config filenames are FileKindConfig. All other paths return FileKindOther.

type GateResult

type GateResult struct {
	Allowed bool
	Reason  string
}

GateResult reports whether a SQL statement passed the read-only gate.

func ValidateQuery

func ValidateQuery(sqlStr string) GateResult

ValidateQuery checks whether a SQL statement is safe for read-only execution.

type ItemLogEntry

type ItemLogEntry struct {
	ID        int64
	ItemID    string
	LogPath   string
	Timestamp time.Time
	Actor     string
	EventType string
	Content   string
	Delta     map[string]any
}

ItemLogEntry represents an indexed event log entry for a work item.

func ListItemLogEntries

func ListItemLogEntries(ctx context.Context, database *sql.DB, itemID string, limit int) ([]ItemLogEntry, error)

ListItemLogEntries returns indexed log entries for a single work item.

func SearchItemLogEntries

func SearchItemLogEntries(ctx context.Context, database *sql.DB, query string, limit int) ([]ItemLogEntry, error)

SearchItemLogEntries performs FTS5 search across indexed log entry content.

type LinkEdge

type LinkEdge struct {
	SourceID  string `json:"source_id"`
	TargetID  string `json:"target_id"`
	LinkType  string `json:"link_type"`
	CreatedAt string `json:"created_at"`
}

LinkEdge represents a directed semantic link between two backlogit artifacts.

func GetLinks(ctx context.Context, database *sql.DB, sourceID string) ([]LinkEdge, error)

GetLinks returns all outgoing links from sourceID, regardless of type.

func GetLinksByType

func GetLinksByType(ctx context.Context, database *sql.DB, sourceID, linkType string) ([]LinkEdge, error)

GetLinksByType returns all outgoing links from sourceID that match linkType.

type MergeSyncResult added in v1.1.0

type MergeSyncResult struct {
	// Added holds artifacts indexed for the first time by this sync.
	Added []SyncEntry `json:"added"`
	// Changed holds artifacts whose index entry was updated.
	Changed []SyncEntry `json:"changed"`
	// Deleted holds artifacts removed from the index because their file was deleted.
	Deleted []SyncEntry `json:"deleted"`
	// Relocated holds artifacts whose file path changed (same ID, new path).
	Relocated []SyncEntry `json:"relocated"`
	// StashRefreshed reports whether the stash table was fully rebuilt
	// because stash.jsonl appeared in the diff.
	StashRefreshed bool `json:"stash_refreshed"`
	// LogsRefreshed reports whether the item_log_entries table was fully
	// rebuilt because one or more logs/*.jsonl files appeared in the diff.
	LogsRefreshed bool `json:"logs_refreshed"`
	// FallbackUsed reports that the incremental path was skipped and a full
	// Rehydrate was performed instead.
	FallbackUsed bool `json:"fallback_used"`
	// DryRun reports that the call was made with dryRun=true, meaning no DB
	// changes were applied.
	DryRun bool `json:"dry_run"`
	// FallbackReason explains why the fallback was triggered when FallbackUsed is true.
	FallbackReason string `json:"fallback_reason,omitempty"`
}

MergeSyncResult describes the complete outcome of a MergeSync call.

func MergeSync added in v1.1.0

func MergeSync(
	ctx context.Context,
	workspacePath string,
	database *sql.DB,
	manifest map[string]FileEntry,
	dryRun bool,
) (MergeSyncResult, map[string]FileEntry, error)

MergeSync performs an incremental sync of the .backlogit workspace cache.

The call sequence is:

  1. Build a current manifest from the filesystem.
  2. Compute a diff against the provided manifest snapshot.
  3. If ShouldFallback returns true, delegate to Rehydrate and return with FallbackUsed: true.
  4. If dryRun is true, return the diff result without modifying the database.
  5. Apply targeted upserts for added and changed artifacts, deletes for removed artifacts, and upserts for relocated artifacts within a RetryWrite-wrapped transaction.
  6. When stash.jsonl appears in the diff, run rehydrateStash.
  7. When any logs/*.jsonl file appears in the diff, run rehydrateItemLogs.

Returns the sync result, the updated manifest (suitable for storage in the caller's in-memory cache), and any error.

type QueryFilters

type QueryFilters struct {
	Status          string
	Type            string
	ParentID        string
	Sprint          string
	AssignedTo      string
	Owner           string
	Priority        string
	IncludeArchived bool // when false (default), archived items are excluded from results
	Limit           int  // max results to return (0 = no limit)
	Offset          int  // number of results to skip for pagination
}

QueryFilters holds optional filters for item queries.

type RelocationEntry added in v1.1.0

type RelocationEntry struct {
	// ItemID is the artifact ID shared by the old and new paths.
	ItemID string
	// OldPath is the previous workspace-relative path.
	OldPath string
	// NewPath is the current workspace-relative path.
	NewPath string
	// Entry holds the current file metadata at NewPath.
	Entry FileEntry
}

RelocationEntry records a file that moved within the workspace. A relocation is detected when the same ItemID appears in both the deleted and added sets of a diff, indicating a path change (e.g., queue/ → done/) rather than a true delete-and-recreate.

type StashRecord

type StashRecord struct {
	ID             string     `json:"id"`
	Priority       string     `json:"priority"`
	DeliberationID string     `json:"deliberation_id,omitempty"`
	Kind           string     `json:"kind"`
	Text           string     `json:"text"`
	State          string     `json:"state"`
	SourcePath     string     `json:"source_path"`
	UpdatedAt      time.Time  `json:"updated_at"`
	ItemID         string     `json:"item_id,omitempty"`
	LinkedAt       *time.Time `json:"linked_at,omitempty"`
}

StashRecord represents an indexed stash entry with any harvested link.

func ListStashEntries

func ListStashEntries(ctx context.Context, database *sql.DB, includeHarvested bool) ([]StashRecord, error)

ListStashEntries returns indexed stash entries with any harvested item links.

type SyncEntry added in v1.1.0

type SyncEntry struct {
	// ID is the backlogit artifact ID (e.g., "037-F").
	ID string `json:"id"`
	// Path is the workspace-relative file path (e.g., "queue/037-F.md").
	Path string `json:"path"`
}

SyncEntry records a single artifact touched by a MergeSync operation, pairing the artifact ID with its workspace-relative file path.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL