Documentation
¶
Overview ¶
Package team implements the WUPHF team launcher that starts a multi-agent collaborative team using tmux + Claude Code + the WUPHF office broker.
Architecture:
- Each agent is a real Claude Code session in a tmux window
- the office broker provides the shared channel (all agents see all messages)
- Nex is an optional context layer, not a requirement
- CEO has final decision authority; agents participate when relevant
- Go TUI is the channel "observer" — displays the conversation
launcher_nex.go — Nex-specific types and functions used by the team launcher.
FORKING NOTE: Everything in this file is Nex CRM-specific. If you are building a fork that does not use Nex, delete this file and remove the calls to:
- pollNexNotificationsLoop (in launcher.go Launch())
- pollNexInsightsLoop (in launcher.go Launch())
That is the complete surface area; no other files need changes.
Index ¶
- Constants
- Variables
- func ArtifactKind(relPath string) (string, bool)
- func ArtifactSHAFromPath(relPath string) (string, bool)
- func CleanupPersistedTaskWorktrees() error
- func ClearPersistedBrokerState() error
- func CompilePlaybook(repo *Repo, wikiPath string) (string, []byte, error)
- func CompilePlaybookAndCommit(ctx context.Context, repo *Repo, wikiPath string) (string, string, error)
- func CompiledSkillRelPath(slug string) string
- func ComputeFactID(artifactSHA string, sentenceOffset int, subject, predicate, object string) string
- func DMSlugFor(agentSlug string) string
- func DMTargetAgent(slug string) string
- func DisableRealTaskWorktreeForTests()
- func EscapeForPromptBody(s string) string
- func ExecutionLogRelPath(slug string) string
- func FactLogAppendSHA(kind, slug, artifactSHA string) string
- func FactLogPath(kind EntityKind, slug string) string
- func FormatChannelView(messages []channelMessage) string
- func FormatLookupMessage(ans QueryAnswer) string
- func GitCleanEnv() []stringdeprecated
- func HasLiveTmuxSession() bool
- func InferAgentDomain(slug string) string
- func InferTextDomain(text string) string
- func IsArtifactPath(relPath string) bool
- func IsDMSlug(slug string) bool
- func IsDraining() bool
- func IsPlaybookPath(relPath string) bool
- func IsSafeTaskID(id string) bool
- func IsSkillAuthoringTool(toolName string) bool
- func JaroWinkler(a, b string) float64
- func MinimalBrief(ent IndexEntity) string
- func NewDefaultIntakeProvider() *defaultIntakeProvider
- func NewDefaultLLMProvider(systemPromptPath string) *defaultLLMProvider
- func NewDefaultStageBLLMProvider(b *Broker) *defaultStageBLLMProvider
- func NormalizeForFactID(s string) string
- func NormalizeOneOnOneAgent(slug string) string
- func NormalizeSessionMode(mode string) string
- func PlaybookSlugFromPath(relPath string) (string, bool)
- func PromotionDemandSignalLabel(s PromotionDemandSignal) string
- func RegisterTransports(b *Broker) (func(), error)
- func RegistryKeyForActionCapability(cap action.Capability) string
- func RelativeJoinURL(token string) string
- func RenderSkillMarkdown(fm SkillFrontmatter, body string) ([]byte, error)
- func RenderTeamLearningsMarkdown(records []LearningRecord) string
- func ResetBrokerState() error
- func ResolveActionProviderForCapability(cap action.Capability) (action.Provider, error)
- func ReviewLogPath(wikiRoot string) string
- func SendTelegramMessage(token string, chatID int64, text string) error
- func SendTypingAction(ctx context.Context, token string, chatID int64) error
- func SetTargetBrokerURLResolver(_ func(name string) string)
- func SlugifyTelegramTitle(title string) string
- func Staleness(f TypedFact, now time.Time) float64
- func StartOpenclawRouter(ctx context.Context, broker *Broker, bridge *OpenclawBridge) <-chan struct{}
- func ValidateExecutionInput(slug string, outcome PlaybookOutcome, summary, notes, recordedBy string) error
- func ValidateFactInput(kind EntityKind, slug, text, sourcePath, recordedBy string) error
- func ValidateLearningInput(rec LearningRecord) error
- func VerifyBot(token string) (string, error)
- func VerifyChat(token string, chatID int64) (string, error)
- func WikiBackupDir() string
- func WikiRootDir() string
- func WriteSharedMemory(ctx context.Context, note SharedMemoryWrite) (string, error)
- type ACItem
- type AgentLogEntriesResponse
- type AgentLogTasksResponse
- type ArticleMeta
- type AuditEntry
- type AutoAssignCountdown
- type AutoNotebookCounters
- type AutoNotebookEventKind
- type AutoNotebookWriter
- func (w *AutoNotebookWriter) Counters() AutoNotebookCounters
- func (w *AutoNotebookWriter) Handle(evt autoNotebookEvent)
- func (w *AutoNotebookWriter) Start(ctx context.Context)
- func (w *AutoNotebookWriter) Stop(timeout time.Duration)
- func (w *AutoNotebookWriter) WaitForCondition(ctx context.Context, predicate func() bool) error
- type Backlink
- type BleveTextIndex
- type BridgeBackoff
- type BriefSummary
- type Broker
- func (b *Broker) AckTask(req TaskAckRequest) (TaskResponse, error)
- func (b *Broker) Actions() []officeActionLog
- func (b *Broker) Addr() string
- func (b *Broker) AgentIssues() []agentIssueRecord
- func (b *Broker) AgentStream(slug string) *agentStreamBuffer
- func (b *Broker) AllMessages() []channelMessage
- func (b *Broker) AllTasks() []teamTask
- func (b *Broker) AppendDiffSummary(taskID string, files []DiffSummary) error
- func (b *Broker) AppendReviewerGrade(taskID string, grade ReviewerGrade) error
- func (b *Broker) AppendSessionReport(taskID string, report SessionReport) error
- func (b *Broker) AppendTaskDetail(taskID, actor, detail string) (teamTask, error)
- func (b *Broker) AssignReviewers(taskID string, slugs []string) error
- func (b *Broker) AttachOpenclawBridge(bridge *OpenclawBridge)
- func (b *Broker) BlockTask(taskID, actor, reason, blockerID string) (teamTask, bool, error)
- func (b *Broker) ChannelMessages(channel string) []channelMessage
- func (b *Broker) ChannelStore() *channel.Store
- func (b *Broker) ChannelTasks(channel string) []teamTask
- func (b *Broker) CreateRequest(req humanInterview) (humanInterview, error)
- func (b *Broker) CreateWatchdogAlert(kind, channel, targetType, targetID, owner, summary string) (watchdogAlert, bool, error)
- func (b *Broker) DMPartner(channelSlug string) string
- func (b *Broker) Decisions() []officeDecisionRecord
- func (b *Broker) DisabledMembers(channel string) []string
- func (b *Broker) DueSchedulerJobs() []schedulerJob
- func (b *Broker) EnabledMembers(channel string) []string
- func (b *Broker) EnqueueSectionsRefresh()
- func (b *Broker) EnsureBridgedMember(slug, name, createdBy string) error
- func (b *Broker) EnsureDirectChannel(agentSlug string) (string, error)
- func (b *Broker) EnsurePlannedTask(input plannedTaskInput) (teamTask, bool, error)
- func (b *Broker) EnsureTask(channel, title, details, owner, createdBy, threadID string, ...) (teamTask, bool, error)
- func (b *Broker) EntityGraph() *EntityGraph
- func (b *Broker) EntitySynthesizer() *EntitySynthesizer
- func (b *Broker) EvaluateConvergence(taskID string) error
- func (b *Broker) ExternalQueue(provider string) []channelMessage
- func (b *Broker) FactLog() *FactLog
- func (b *Broker) FindRequest(channel, requestID string) (humanInterview, bool)
- func (b *Broker) FindTask(channel, taskID string) (teamTask, bool)
- func (b *Broker) FocusModeEnabled() bool
- func (b *Broker) GetDecisionPacket(taskID string) (DecisionPacket, error)
- func (b *Broker) HasBlockingRequest() bool
- func (b *Broker) HasPendingInterview() bool
- func (b *Broker) HasRecentlyTaggedAgents(within time.Duration) bool
- func (b *Broker) HumanHasPosted() bool
- func (b *Broker) InFlightTasks() []teamTask
- func (b *Broker) Inbox(filter InboxFilter) (InboxPayload, error)
- func (b *Broker) InsightsCursor() string
- func (b *Broker) IntakeSpec(taskID string) (Spec, bool)
- func (b *Broker) IsAgentMemberSlug(slug string) bool
- func (b *Broker) LifecycleIndexSnapshot() map[LifecycleState][]string
- func (b *Broker) ListPolicies() []officePolicy
- func (b *Broker) ListTasks(req TaskListRequest) (TaskListResponse, error)
- func (b *Broker) MarkRoutingTargets(slugs []string)
- func (b *Broker) MemberProviderBinding(slug string) provider.ProviderBinding
- func (b *Broker) MemberProviderKind(slug string) string
- func (b *Broker) Messages() []channelMessage
- func (b *Broker) MigrateLifecycleStatesOnce()
- func (b *Broker) MutateTask(body TaskPostRequest) (TaskResponse, error)
- func (b *Broker) NotebookSearchAll(_ context.Context, query string) ([]WikiSearchHit, []string, error)
- func (b *Broker) NotificationCursor() string
- func (b *Broker) OfficeMembers() []officeMember
- func (b *Broker) OnDecisionRecorded(completedTaskID string)
- func (b *Broker) OnReviewerConvergence(taskID string, reason string) error
- func (b *Broker) PamDispatcher() *PamDispatcher
- func (b *Broker) PlaybookExecutionLog() *ExecutionLog
- func (b *Broker) PlaybookSynthesizer() *PlaybookSynthesizer
- func (b *Broker) PostAutomationMessage(from, channel, title, content, eventID, source, sourceLabel string, ...) (channelMessage, bool, error)
- func (b *Broker) PostInboundSurfaceMessage(from, channel, content, provider string) (channelMessage, error)
- func (b *Broker) PostMessage(from, channel, content string, tagged []string, replyTo string) (channelMessage, error)
- func (b *Broker) PostSystemMessage(channel, content, kind string)
- func (b *Broker) PublishEntityBriefSynthesized(evt EntityBriefSynthesizedEvent)
- func (b *Broker) PublishEntityFactRecorded(evt EntityFactRecordedEvent)
- func (b *Broker) PublishNotebookEvent(evt notebookWriteEvent)
- func (b *Broker) PublishPamActionDone(evt PamActionDoneEvent)
- func (b *Broker) PublishPamActionFailed(evt PamActionFailedEvent)
- func (b *Broker) PublishPamActionStarted(evt PamActionStartedEvent)
- func (b *Broker) PublishPlaybookExecutionRecorded(evt PlaybookExecutionRecordedEvent)
- func (b *Broker) PublishPlaybookSynthesized(evt PlaybookSynthesizedEvent)
- func (b *Broker) PublishWikiEvent(evt wikiWriteEvent)
- func (b *Broker) PublishWikiSectionsUpdated(evt WikiSectionsUpdatedEvent)
- func (b *Broker) Purge()
- func (b *Broker) QueueSnapshot() queueSnapshot
- func (b *Broker) RecentHumanMessages(limit int) []channelMessage
- func (b *Broker) ReconcileMemoryWorkflows(ctx context.Context) (MemoryWorkflowReconcileReport, error)
- func (b *Broker) RecordAction(kind, source, channel, actor, summary, relatedID string, signalIDs []string, ...) error
- func (b *Broker) RecordAgentUsage(slug, model string, usage provider.ClaudeUsage)
- func (b *Broker) RecordDecision(kind, channel, summary, reason, owner string, signalIDs []string, ...) (officeDecisionRecord, error)
- func (b *Broker) RecordDemandSignal(evt PromotionDemandEvent) error
- func (b *Broker) RecordPolicy(source, rule string) (officePolicy, error)
- func (b *Broker) RecordSignals(signals []officeSignal) ([]officeSignalRecord, error)
- func (b *Broker) RecordTaskDecision(taskID, rawAction, actorSlug string) error
- func (b *Broker) RecordTaskMemoryCapture(taskID, actor string, artifact MemoryWorkflowArtifact) (teamTask, bool, bool, error)
- func (b *Broker) RecordTaskMemoryLookup(taskID, actor, query string, citations []ContextCitation) (teamTask, bool, bool, error)
- func (b *Broker) RecordTaskMemoryPromotion(taskID, actor string, artifact MemoryWorkflowArtifact) (teamTask, bool, bool, error)
- func (b *Broker) RecordTelegramGroup(chatID int64, title string)
- func (b *Broker) RegenerateOrMarkUnknown(taskID string) error
- func (b *Broker) ReportAgentIssue(agentSlug, targetChannel, replyTo, detail string) (channelMessage, agentIssueRecord, bool, error)
- func (b *Broker) RequestSelfHealing(agentSlug, taskID string, reason agent.EscalationReason, detail string) (teamTask, bool, error)
- func (b *Broker) Requests(channel string, includeResolved bool) []humanInterview
- func (b *Broker) Reset()
- func (b *Broker) ResolveReviewers(taskID string) ([]string, error)
- func (b *Broker) RestartBrokerListener() (WebBrokerRestartStatus, error)
- func (b *Broker) ResumeTask(taskID, actor, reason string) (teamTask, bool, error)
- func (b *Broker) ReviewLog() *ReviewLog
- func (b *Broker) ReviewerGrades(taskID string) []ReviewerGrade
- func (b *Broker) RevokeHumanInvite(inviteID string) ([]string, error)
- func (b *Broker) ScheduleRecheck(channel, targetType, targetID, label, payload string, when time.Time) error
- func (b *Broker) ScheduleRequestFollowUp(requestID, channel, label, payload string, when time.Time) error
- func (b *Broker) ScheduleTaskFollowUp(taskID, channel, owner, label, payload string, when time.Time) error
- func (b *Broker) Scheduler() []schedulerJob
- func (b *Broker) SchedulerJobControl(slug string, defaultInterval time.Duration) (bool, time.Duration)
- func (b *Broker) SeedDefaultSkills(specs []agent.PackSkillSpec)
- func (b *Broker) SeenTelegramGroups() map[int64]string
- func (b *Broker) ServeWebUI(port int) error
- func (b *Broker) SessionModeState() (string, string)
- func (b *Broker) SetAdminPauseExitFn(fn func(int))
- func (b *Broker) SetAgentLogRoot(root string)
- func (b *Broker) SetDecisionPacketStore(store decisionPacketStore)
- func (b *Broker) SetDependencies(taskID string, deps Dependencies) error
- func (b *Broker) SetEntityGraph(graph *EntityGraph)
- func (b *Broker) SetEntitySynthesizer(factLog *FactLog, synth *EntitySynthesizer)
- func (b *Broker) SetFocusMode(enabled bool) error
- func (b *Broker) SetGenerateChannelFn(fn func(context.Context, string) (generatedChannelTemplate, error))
- func (b *Broker) SetGenerateMemberFn(fn func(string) (generatedMemberTemplate, error))
- func (b *Broker) SetHumanAdmitHook(hook humanAdmitHookFn)
- func (b *Broker) SetInsightsCursor(cursor string) error
- func (b *Broker) SetLauncherDrainer(d launcherDrainer)
- func (b *Broker) SetMemberProvider(slug string, binding provider.ProviderBinding) error
- func (b *Broker) SetNotificationCursor(cursor string) error
- func (b *Broker) SetPlaybookExecutionLog(log *ExecutionLog)
- func (b *Broker) SetPlaybookSynthesizer(synth *PlaybookSynthesizer)
- func (b *Broker) SetReviewerResolver(resolver ReviewerResolver)
- func (b *Broker) SetSchedulerJob(job schedulerJob) error
- func (b *Broker) SetSessionMode(mode, agent string) error
- func (b *Broker) SetShareTransport(t *ShareTransport)
- func (b *Broker) SetSkillCounter(c *SkillCounter)
- func (b *Broker) SetSkillScanner(s *SkillScanner)
- func (b *Broker) SetSkillSynthesizer(s *SkillSynthesizer)
- func (b *Broker) SetSpec(taskID string, spec Spec) error
- func (b *Broker) SetTeamLearningLog(log *LearningLog)
- func (b *Broker) SetWebShareController(start func() (WebShareStatus, error), status func() WebShareStatus, ...)
- func (b *Broker) SetWebTunnelController(start func() (WebTunnelStatus, error), status func() WebTunnelStatus, ...)
- func (b *Broker) SetWikiCompressor(c *WikiCompressor)
- func (b *Broker) SetWorkspaceOrchestrator(o workspaceOrchestrator)
- func (b *Broker) ShareTransport() *ShareTransport
- func (b *Broker) Signals() []officeSignalRecord
- func (b *Broker) Start() error
- func (b *Broker) StartIntake(ctx context.Context, intent string, provider IntakeProvider) (IntakeOutcome, error)
- func (b *Broker) StartOnPort(port int) error
- func (b *Broker) StartReviewConvergenceSweeper(ctx context.Context) func()
- func (b *Broker) Stop()
- func (b *Broker) SubscribeActions(buffer int) (<-chan officeActionLog, func())
- func (b *Broker) SubscribeActivity(buffer int) (<-chan agentActivitySnapshot, func())
- func (b *Broker) SubscribeEntityBriefEvents(buffer int) (<-chan EntityBriefSynthesizedEvent, func())
- func (b *Broker) SubscribeEntityFactEvents(buffer int) (<-chan EntityFactRecordedEvent, func())
- func (b *Broker) SubscribeMessages(buffer int) (<-chan channelMessage, func())
- func (b *Broker) SubscribeNotebookEvents(buffer int) (<-chan notebookWriteEvent, func())
- func (b *Broker) SubscribeOfficeChanges(buffer int) (<-chan officeChangeEvent, func())
- func (b *Broker) SubscribePamActionEvents(buffer int) (<-chan PamActionStartedEvent, <-chan PamActionDoneEvent, ...)
- func (b *Broker) SubscribePlaybookExecutionEvents(buffer int) (<-chan PlaybookExecutionRecordedEvent, func())
- func (b *Broker) SubscribePlaybookSynthesizedEvents(buffer int) (<-chan PlaybookSynthesizedEvent, func())
- func (b *Broker) SubscribeReviewEvents(buffer int) (<-chan ReviewStateChangeEvent, func())
- func (b *Broker) SubscribeWikiEvents(buffer int) (<-chan wikiWriteEvent, func())
- func (b *Broker) SubscribeWikiSectionsUpdated(buffer int) (<-chan WikiSectionsUpdatedEvent, func())
- func (b *Broker) SurfaceChannels(provider string) []teamChannel
- func (b *Broker) SweepReviewConvergence()
- func (b *Broker) TaskByID(id string) *teamTask
- func (b *Broker) TeamLearningLog() *LearningLog
- func (b *Broker) Token() string
- func (b *Broker) TransitionLifecycle(taskID string, newState LifecycleState, reason string) error
- func (b *Broker) UnackedTasks(timeout time.Duration) []teamTask
- func (b *Broker) UpdateAgentActivity(update agentActivitySnapshot)
- func (b *Broker) UpdateSchedulerJobState(slug string, nextRun time.Time, status string) error
- func (b *Broker) UpdateSkillExecutionByWorkflowKey(workflowKey, status string, when time.Time) error
- func (b *Broker) Watchdogs() []watchdogAlert
- func (b *Broker) WikiCompressor() *WikiCompressor
- func (b *Broker) WikiIndex() *WikiIndex
- func (b *Broker) WikiInitErr() error
- func (b *Broker) WikiReadLog() *ReadLog
- func (b *Broker) WikiSectionsCache() *wikiSectionsCache
- func (b *Broker) WikiWorker() *WikiWorker
- type CapabilityCategory
- type CapabilityDescriptor
- type CapabilityLevel
- type CapabilityLifecycle
- type CapabilityProbeOptions
- type CapabilityRegistry
- type CapabilityStatus
- type CatalogEntry
- type ChannelIntentCounters
- type ChannelIntentDispatcher
- func (d *ChannelIntentDispatcher) Counters() ChannelIntentCounters
- func (d *ChannelIntentDispatcher) Handle(msg channelMessage)
- func (d *ChannelIntentDispatcher) Start(ctx context.Context)
- func (d *ChannelIntentDispatcher) Stop(timeout time.Duration)
- func (d *ChannelIntentDispatcher) WaitForCondition(ctx context.Context, predicate func() bool) error
- type CircuitBreaker
- type CoalescedEdge
- type Comment
- type CommitRef
- type CompressJob
- type CompressorConfig
- type ContextCitation
- type CreateRequest
- type DLQ
- func (d *DLQ) CorruptLineCounts() (extractions, permanent uint64)
- func (d *DLQ) Enqueue(ctx context.Context, e DLQEntry) error
- func (d *DLQ) Inspect(ctx context.Context) (Snapshot, error)
- func (d *DLQ) MarkResolved(ctx context.Context, artifactSHA string) error
- func (d *DLQ) ReadyForReplay(ctx context.Context, now time.Time) ([]DLQEntry, error)
- func (d *DLQ) RecordAttempt(ctx context.Context, artifactSHA string, attemptErr error, cat string) error
- type DLQEntry
- type DLQErrorCategory
- type DeadEnd
- type DecisionPacket
- type DemandCandidate
- type Dependencies
- type DiffSummary
- type Direction
- type DiscoveredSection
- type EntityBriefSynthesizedEvent
- type EntityEdge
- type EntityFactRecordedEvent
- type EntityGraph
- type EntityKind
- type EntityRef
- type EntitySlug
- type EntitySynthesizer
- func (s *EntitySynthesizer) EnqueueSynthesis(kind EntityKind, slug, requestBy string) (uint64, error)
- func (s *EntitySynthesizer) IsInflightOrQueued(kind EntityKind, slug string) bool
- func (s *EntitySynthesizer) Mode() SynthesisMode
- func (s *EntitySynthesizer) Start(ctx context.Context)
- func (s *EntitySynthesizer) Stop()
- func (s *EntitySynthesizer) Threshold() int
- type Execution
- type ExecutionLog
- type Extractor
- type ExtractorHook
- type Fact
- type FactCluster
- type FactLog
- type FactLogAppendPayload
- type FactStore
- type FeedbackItem
- type GraphAllNode
- type GuardScanResult
- type GuardTrustLevel
- type GuardVerdict
- type HeadlessEvent
- type HeadlessEventMetrics
- type HeadlessManifestEntry
- type HeadlessPamRunner
- type HealthResponse
- type HumanIdentity
- type HumanIdentityRegistry
- func (r *HumanIdentityRegistry) Dir() string
- func (r *HumanIdentityRegistry) List() []HumanIdentity
- func (r *HumanIdentityRegistry) Local() HumanIdentity
- func (r *HumanIdentityRegistry) Lookup(slug string) (HumanIdentity, bool)
- func (r *HumanIdentityRegistry) Observe(name, email string) (HumanIdentity, error)
- type HumanWikiIntentCounters
- type HumanWikiIntentWriter
- func (w *HumanWikiIntentWriter) Counters() HumanWikiIntentCounters
- func (w *HumanWikiIntentWriter) Handle(msg channelMessage)
- func (w *HumanWikiIntentWriter) Start(ctx context.Context)
- func (w *HumanWikiIntentWriter) Stop(timeout time.Duration)
- func (w *HumanWikiIntentWriter) WaitForCondition(ctx context.Context, predicate func() bool) error
- type InboxCounts
- type InboxFilter
- type InboxPayload
- type InboxRow
- type IndexEdge
- type IndexEntity
- type IndexOption
- type IntakeOutcome
- type IntakeProvider
- type JoinURLBuilder
- type Launcher
- func (l *Launcher) AgentCount() int
- func (l *Launcher) Attach() error
- func (l *Launcher) Broker() *Broker
- func (l *Launcher) BrokerBaseURL() string
- func (l *Launcher) BrokerToken() string
- func (l *Launcher) Drain(ctx context.Context) error
- func (l *Launcher) GenerateChannelTemplateFromPrompt(request string) (generatedChannelTemplate, error)
- func (l *Launcher) GenerateChannelTemplateFromPromptCtx(ctx context.Context, request string) (generatedChannelTemplate, error)
- func (l *Launcher) GenerateMemberTemplateFromPrompt(request string) (generatedMemberTemplate, error)
- func (l *Launcher) Kill() error
- func (l *Launcher) Launch() error
- func (l *Launcher) LaunchWeb(webPort int) error
- func (l *Launcher) OneOnOneAgent() string
- func (l *Launcher) PackName() string
- func (l *Launcher) Preflight() error
- func (l *Launcher) PreflightWeb() error
- func (l *Launcher) ReconfigureSession() error
- func (l *Launcher) ResetSession() error
- func (l *Launcher) SetBrokerConfigurator(fn func(*Broker))
- func (l *Launcher) SetFocusMode(v bool)
- func (l *Launcher) SetNoOpen(v bool)
- func (l *Launcher) SetOneOnOne(slug string)
- func (l *Launcher) SetOpusCEO(v bool)
- func (l *Launcher) SetUnsafe(v bool)
- func (l *Launcher) UsesTmuxRuntime() bool
- type LearningLog
- type LearningRecord
- type LearningSearchFilters
- type LearningSearchResult
- type LearningSource
- type LearningType
- type LifecycleManifestSubKind
- type LifecycleState
- type Lint
- type LintFinding
- type LintProvider
- type LintReport
- type LocalProviderStatus
- type MaintenanceAction
- type MaintenanceAssistant
- type MaintenanceDiff
- type MaintenanceEvidence
- type MaintenanceFactProposal
- type MaintenanceSuggestion
- type MemoryBackendStatus
- type MemoryWorkflow
- type MemoryWorkflowArtifact
- type MemoryWorkflowOverride
- type MemoryWorkflowReconcileReport
- type MemoryWorkflowReconcileTaskResult
- type MemoryWorkflowReconciler
- type MemoryWorkflowStep
- type MemoryWorkflowStepState
- type NotebookDemandIndex
- func (idx *NotebookDemandIndex) AutoEscalateDemandCandidates(ctx context.Context, reviewLog *ReviewLog, reader promotionDemandReader) error
- func (idx *NotebookDemandIndex) Record(evt PromotionDemandEvent) error
- func (idx *NotebookDemandIndex) Score(entryPath string) float64
- func (idx *NotebookDemandIndex) SetClockForTest(clock func() time.Time)
- func (idx *NotebookDemandIndex) Threshold() float64
- func (idx *NotebookDemandIndex) TopCandidates(n int) []DemandCandidate
- func (idx *NotebookDemandIndex) WaitForCondition(ctx context.Context, predicate func() bool) error
- func (idx *NotebookDemandIndex) WindowDays() int
- type NotebookEntry
- type NotebookSignalScanner
- type OnboardingFields
- type OpenclawBridge
- func BuildOpenclawBridgeFromConfig(broker *Broker) (*OpenclawBridge, error)
- func NewOpenclawBridge(broker *Broker, client openclawClient, bindings []config.OpenclawBridgeBinding) *OpenclawBridge
- func NewOpenclawBridgeWithDialer(broker *Broker, initial openclawClient, dialer openclawDialer, ...) *OpenclawBridge
- func StartOpenclawBridgeFromConfig(ctx context.Context, broker *Broker) (*OpenclawBridge, error)
- func (b *OpenclawBridge) AttachSlug(slug, sessionKey string)
- func (b *OpenclawBridge) AttachSlugAndSubscribe(ctx context.Context, slug, sessionKey string) error
- func (b *OpenclawBridge) Binding() transport.Binding
- func (b *OpenclawBridge) CreateSession(ctx context.Context, agentID, label string) (string, error)
- func (b *OpenclawBridge) DetachSession(ctx context.Context, slug, sessionKey string) error
- func (b *OpenclawBridge) DetachSlug(slug string)
- func (b *OpenclawBridge) DetachSlugAndUnsubscribe(ctx context.Context, slug string) error
- func (b *OpenclawBridge) HasSlug(slug string) bool
- func (b *OpenclawBridge) Health() transport.Health
- func (b *OpenclawBridge) Name() string
- func (b *OpenclawBridge) OnOfficeMessage(ctx context.Context, slug, channel, message string) error
- func (b *OpenclawBridge) Run(ctx context.Context, host transport.Host) error
- func (b *OpenclawBridge) Send(ctx context.Context, msg transport.Outbound) error
- func (b *OpenclawBridge) SetRetryDelaysForTest(d []time.Duration)
- func (b *OpenclawBridge) SnapshotBindings() map[string]string
- func (b *OpenclawBridge) Start(ctx context.Context) error
- func (b *OpenclawBridge) Stop()
- type PamAction
- type PamActionDoneEvent
- type PamActionFailedEvent
- type PamActionID
- type PamActionStartedEvent
- type PamDispatcher
- type PamDispatcherConfig
- type PamJob
- type PamRunner
- type PlaybookExecutionRecordedEvent
- type PlaybookOutcome
- type PlaybookSummary
- type PlaybookSynthesisJob
- type PlaybookSynthesizedEvent
- type PlaybookSynthesizer
- func (s *PlaybookSynthesizer) EnqueueSynthesis(slug, requestBy string, triggeredByUser bool) (uint64, error)
- func (s *PlaybookSynthesizer) OnExecutionRecorded(slug string)
- func (s *PlaybookSynthesizer) Start(ctx context.Context)
- func (s *PlaybookSynthesizer) Stop()
- func (s *PlaybookSynthesizer) SynthesizeNow(ctx context.Context, slug, actor string) (uint64, error)
- func (s *PlaybookSynthesizer) Threshold() int
- type PlaybookSynthesizerConfig
- type Promotion
- type PromotionDemandEvent
- type PromotionDemandSignal
- type PromotionState
- type PromotionSweep
- type PromotionSweepConfig
- type PromotionSweepCounters
- type ProposedEntity
- type QueryAnswer
- type QueryClass
- type QueryHandler
- type QueryProvider
- type QueryRequest
- type QuerySource
- type ReadEvent
- type ReadLog
- type ReadStats
- type Redirect
- type Repo
- func (r *Repo) AppendFactLog(ctx context.Context, slug, relPath, additionalContent, message string) (string, int, error)
- func (r *Repo) ApplyPromotion(ctx context.Context, p *Promotion, approverSlug string) (string, error)
- func (r *Repo) AuditLog(ctx context.Context, since time.Time, limit int) ([]AuditEntry, error)
- func (r *Repo) BackupMirror(ctx context.Context) error
- func (r *Repo) BackupRoot() string
- func (r *Repo) BuildArticle(ctx context.Context, relPath, reader string, readLog *ReadLog) (ArticleMeta, error)
- func (r *Repo) BuildCatalog(ctx context.Context, sortBy string, readLog *ReadLog, includeArchived bool) ([]CatalogEntry, error)
- func (r *Repo) Commit(ctx context.Context, slug, relPath, content, mode, message string) (string, int, error)
- func (r *Repo) CommitArchive(ctx context.Context, ...) (string, error)
- func (r *Repo) CommitArtifact(ctx context.Context, slug, relPath, content, message string) (string, int, error)
- func (r *Repo) CommitBootstrap(ctx context.Context, message string) (string, error)
- func (r *Repo) CommitEntityFact(ctx context.Context, slug, relPath, content, message string) (string, int, error)
- func (r *Repo) CommitEntityGraph(ctx context.Context, slug, content, message string) (string, int, error)
- func (r *Repo) CommitFactLog(ctx context.Context, slug, relPath, content, message string) (string, int, error)
- func (r *Repo) CommitGhostBrief(ctx context.Context, kind, slug, content, message string) (string, int, error)
- func (r *Repo) CommitHuman(ctx context.Context, relPath, content, expectedSHA, message string, ...) (string, int, error)
- func (r *Repo) CommitLintReport(ctx context.Context, slug, relPath, content, message string) (string, int, error)
- func (r *Repo) CommitNotebook(ctx context.Context, slug, relPath, content, mode, message string) (string, int, error)
- func (r *Repo) CommitPlaybookExecution(ctx context.Context, slug, relPath, content, message string) (string, int, error)
- func (r *Repo) CommitPlaybookSkill(ctx context.Context, slug, relPath, content, message string) (string, int, error)
- func (r *Repo) CommitScanStaged(ctx context.Context, message string) (string, error)
- func (r *Repo) CommitTeamLearnings(ctx context.Context, ...) (string, int, error)
- func (r *Repo) EnsureNotebookDirs(ctx context.Context, slugs []string) (string, error)
- func (r *Repo) Fsck(ctx context.Context) error
- func (r *Repo) HeadSHA(ctx context.Context) (string, error)
- func (r *Repo) IndexAllPath() string
- func (r *Repo) IndexDir() string
- func (r *Repo) IndexRegen(ctx context.Context) error
- func (r *Repo) Init(ctx context.Context) error
- func (r *Repo) Log(ctx context.Context, relPath string) ([]CommitRef, error)
- func (r *Repo) RecoverDirtyTree(ctx context.Context) error
- func (r *Repo) RestoreFromBackup(ctx context.Context) error
- func (r *Repo) Root() string
- func (r *Repo) TeamDir() string
- type ResolvedEntity
- type ReviewLog
- func (l *ReviewLog) AddComment(promotionID, actorSlug, body string) (*Promotion, Comment, error)
- func (l *ReviewLog) AdvanceToInReview(promotionID, actorSlug string) (*Promotion, StateTransition, error)
- func (l *ReviewLog) Approve(promotionID, actorSlug, rationale, commitSHA string) (*Promotion, StateTransition, error)
- func (l *ReviewLog) CanApprove(promotionID, actorSlug string) error
- func (l *ReviewLog) Get(promotionID string) (*Promotion, error)
- func (l *ReviewLog) List(scope string) []*Promotion
- func (l *ReviewLog) Path() string
- func (l *ReviewLog) Reject(promotionID, actorSlug string) (*Promotion, StateTransition, error)
- func (l *ReviewLog) RequestChanges(promotionID, actorSlug, rationale string) (*Promotion, StateTransition, error)
- func (l *ReviewLog) Resubmit(promotionID, actorSlug string) (*Promotion, StateTransition, error)
- func (l *ReviewLog) SubmitPromotion(req SubmitPromotionRequest) (*Promotion, error)
- func (l *ReviewLog) TickExpiry(now time.Time) []StateTransition
- type ReviewStateChangeEvent
- type ReviewerGrade
- type ReviewerResolver
- type ReviewerRoutingSignals
- type ReviewerSummary
- type RouteContract
- type RuntimeArtifact
- type RuntimeArtifactKind
- type RuntimeCapabilities
- type RuntimeMessage
- type RuntimeRequest
- type RuntimeSnapshot
- type RuntimeSnapshotInput
- type RuntimeTask
- type SQLiteFactStore
- func (s *SQLiteFactStore) CanonicalHashAll(ctx context.Context) (string, error)
- func (s *SQLiteFactStore) CanonicalHashFacts(ctx context.Context) (string, error)
- func (s *SQLiteFactStore) Close() error
- func (s *SQLiteFactStore) CountFacts(ctx context.Context) (int, error)
- func (s *SQLiteFactStore) GetFact(ctx context.Context, id string) (TypedFact, bool, error)
- func (s *SQLiteFactStore) IterateEntities(ctx context.Context, fn func(IndexEntity) error) error
- func (s *SQLiteFactStore) ListAllFacts(ctx context.Context) ([]TypedFact, error)
- func (s *SQLiteFactStore) ListAllFactsPaged(ctx context.Context, afterID string, limit int) ([]TypedFact, error)
- func (s *SQLiteFactStore) ListEdgesForEntity(ctx context.Context, slug string) ([]IndexEdge, error)
- func (s *SQLiteFactStore) ListFactsByPredicateObject(ctx context.Context, predicate, object string) ([]TypedFact, error)
- func (s *SQLiteFactStore) ListFactsByTriplet(ctx context.Context, subject, predicate, objectPrefix string) ([]TypedFact, error)
- func (s *SQLiteFactStore) ListFactsForEntity(ctx context.Context, slug string) ([]TypedFact, error)
- func (s *SQLiteFactStore) ListReinforcedFactsByPredicate(ctx context.Context, predicate string) ([]TypedFact, error)
- func (s *SQLiteFactStore) ResolveRedirect(ctx context.Context, slug string) (string, bool, error)
- func (s *SQLiteFactStore) UpsertEdge(ctx context.Context, e IndexEdge) error
- func (s *SQLiteFactStore) UpsertEntity(ctx context.Context, e IndexEntity) error
- func (s *SQLiteFactStore) UpsertFact(ctx context.Context, f TypedFact) error
- func (s *SQLiteFactStore) UpsertRedirect(ctx context.Context, r Redirect) error
- type ScanError
- type ScanResult
- type ScopedMemoryHit
- type SearchHit
- type SelfHealSignalScanner
- type SessionMemoryActionSummary
- type SessionMemoryMessageSummary
- type SessionMemoryRequestSummary
- type SessionMemorySnapshot
- type SessionMemoryTaskSummary
- type SessionRecovery
- type SessionReport
- type SessionRestoreContext
- type Severity
- type SeveritySummary
- type ShareInviteDetails
- type ShareTransport
- func (s *ShareTransport) Binding() transport.Binding
- func (s *ShareTransport) CreateInvite(_ context.Context, _ string) (string, error)
- func (s *ShareTransport) CreateInviteDetailed(_ context.Context) (ShareInviteDetails, error)
- func (s *ShareTransport) CreateInviteDetailedWithBuilder(_ context.Context, builder JoinURLBuilder) (ShareInviteDetails, error)
- func (s *ShareTransport) Health() transport.Health
- func (s *ShareTransport) Name() string
- func (s *ShareTransport) RevokeInvite(ctx context.Context, inviteID string) error
- func (s *ShareTransport) Run(ctx context.Context, host transport.Host) error
- func (s *ShareTransport) Send(_ context.Context, _ transport.Outbound) error
- func (s *ShareTransport) SetURLBuilder(b JoinURLBuilder)
- type SharedMemoryWrite
- type SignalIndex
- type Signals
- type SkillCandidate
- type SkillCandidateExcerpt
- type SkillCandidateSource
- type SkillCompileMetrics
- type SkillCounter
- func (c *SkillCounter) Cooldown() time.Duration
- func (c *SkillCounter) Increment(agentSlug, toolName, summary string) (shouldNudge bool, iterations int)
- func (c *SkillCounter) RecentToolCalls(agentSlug string, limit int) []recentToolCall
- func (c *SkillCounter) Reset(agentSlug string)
- func (c *SkillCounter) SetClock(now func() time.Time)
- func (c *SkillCounter) Stats() map[string]SkillCounterMetrics
- func (c *SkillCounter) Threshold() int
- func (c *SkillCounter) TotalNudgesFired() int64
- type SkillCounterMetrics
- type SkillFrontmatter
- type SkillMetadata
- type SkillSafetyScan
- type SkillScanner
- type SkillSpec
- type SkillSynthesizer
- type SkillTombstoneEntry
- type SkillWuphfMeta
- type Snapshot
- type Spec
- type StageBSignalAggregator
- type StageBSynthResult
- type StateTransition
- type SubmitPromotionRequest
- type SweepResult
- type SynthError
- type SynthesisJob
- type SynthesisMode
- type SynthesizerConfig
- type TaskAckRequest
- type TaskListRequest
- type TaskListResponse
- type TaskMemoryWorkflowReconcileResponse
- type TaskMemoryWorkflowRequest
- type TaskMemoryWorkflowResponse
- type TaskMutationError
- type TaskMutationErrorKind
- type TaskPlanInput
- type TaskPlanRequest
- type TaskPostRequest
- type TaskResponse
- type TelegramGroup
- type TelegramTransport
- func (t *TelegramTransport) Binding() transport.Binding
- func (t *TelegramTransport) FormatOutbound(msg channelMessage) (transport.Outbound, bool)
- func (t *TelegramTransport) HandleInbound(chatID int64, chatType string, from *telegramUser, text string) error
- func (t *TelegramTransport) Health() transport.Health
- func (t *TelegramTransport) Name() string
- func (t *TelegramTransport) Run(ctx context.Context, host transport.Host) error
- func (t *TelegramTransport) Send(ctx context.Context, msg transport.Outbound) error
- func (t *TelegramTransport) SendToTelegram(ctx context.Context, chatID string, msg channelMessage) error
- func (t *TelegramTransport) Start(ctx context.Context) error
- type TextIndex
- type TmuxCapability
- type TmuxSessionStatus
- type TrashEntry
- type Triplet
- type TypedFact
- type UpgradeChangelogResponse
- type UpgradeCheckErrorResponse
- type UpgradeCheckResponse
- type Watching
- type WebBrokerRestartStatus
- type WebShareStatus
- type WebTunnelStatus
- type WikiArchiver
- type WikiCompressor
- type WikiIndex
- func (w *WikiIndex) CanonicalHashAll(ctx context.Context) (string, error)
- func (w *WikiIndex) CanonicalHashFacts(ctx context.Context) (string, error)
- func (w *WikiIndex) Close() error
- func (w *WikiIndex) GetFact(ctx context.Context, id string) (TypedFact, bool, error)
- func (w *WikiIndex) LastBuild() time.Time
- func (w *WikiIndex) ListEdgesForEntity(ctx context.Context, slug string) ([]IndexEdge, error)
- func (w *WikiIndex) ListFactsByPredicateObject(ctx context.Context, predicate, object string) ([]TypedFact, error)
- func (w *WikiIndex) ListFactsByTriplet(ctx context.Context, subject, predicate, objectPrefix string) ([]TypedFact, error)
- func (w *WikiIndex) ListFactsForEntity(ctx context.Context, slug string) ([]TypedFact, error)
- func (w *WikiIndex) ReconcileFromMarkdown(ctx context.Context) error
- func (w *WikiIndex) ReconcilePath(ctx context.Context, relPath string) error
- func (w *WikiIndex) Search(ctx context.Context, query string, topK int) ([]SearchHit, error)
- type WikiIndexSignalAdapter
- func (a *WikiIndexSignalAdapter) EntityByDomain(ctx context.Context, domain string) ([]resolverEntity, error)
- func (a *WikiIndexSignalAdapter) EntityByEmail(ctx context.Context, email string) (resolverEntity, bool, error)
- func (a *WikiIndexSignalAdapter) EntityByName(ctx context.Context, name string) ([]resolverEntity, error)
- func (a *WikiIndexSignalAdapter) EntityBySlug(ctx context.Context, slug string) (resolverEntity, bool, error)
- type WikiSearchHit
- type WikiSectionsUpdatedEvent
- type WikiWorker
- func (w *WikiWorker) AgentsWithNotebooks() ([]string, error)
- func (w *WikiWorker) Done() <-chan struct{}
- func (w *WikiWorker) Enqueue(ctx context.Context, slug, path, content, mode, commitMsg string) (string, int, error)
- func (w *WikiWorker) EnqueueArchiveSweep(ctx context.Context, readLog *ReadLog, minAge time.Duration) (SweepResult, error)
- func (w *WikiWorker) EnqueueArtifact(ctx context.Context, slug, path, content, commitMsg string) (string, int, error)
- func (w *WikiWorker) EnqueueEntityFact(ctx context.Context, slug, path, content, commitMsg string) (string, int, error)
- func (w *WikiWorker) EnqueueEntityGraph(ctx context.Context, slug, content, commitMsg string) (string, int, error)
- func (w *WikiWorker) EnqueueFactLog(ctx context.Context, slug, path, content, commitMsg string) (string, int, error)
- func (w *WikiWorker) EnqueueFactLogAppend(ctx context.Context, slug, path, content, commitMsg string) (string, int, error)
- func (w *WikiWorker) EnqueueHuman(ctx context.Context, path, content, commitMsg, expectedSHA string) (string, int, error)
- func (w *WikiWorker) EnqueueHumanAs(ctx context.Context, id HumanIdentity, ...) (string, int, error)
- func (w *WikiWorker) EnqueueLintReport(ctx context.Context, slug, path, content, commitMsg string) (string, int, error)
- func (w *WikiWorker) EnqueuePlaybookCompile(ctx context.Context, slug, authorSlug string) (string, int, error)
- func (w *WikiWorker) EnqueuePlaybookExecution(ctx context.Context, slug, path, content, commitMsg string) (string, int, error)
- func (w *WikiWorker) EnqueueTeamLearning(ctx context.Context, ...) (string, int, error)
- func (w *WikiWorker) EnsureNotebookDirs(ctx context.Context, slugs []string) (string, error)
- func (w *WikiWorker) Index() *WikiIndex
- func (w *WikiWorker) NotebookCommitCount() int
- func (w *WikiWorker) NotebookList(slug string) ([]NotebookEntry, error)
- func (w *WikiWorker) NotebookRead(path string) ([]byte, error)
- func (w *WikiWorker) NotebookSearch(slug, pattern string) ([]WikiSearchHit, error)
- func (w *WikiWorker) NotebookWrite(ctx context.Context, slug, path, content, mode, commitMsg string) (string, int, error)
- func (w *WikiWorker) NotifyArchived(ctx context.Context, paths []string)
- func (w *WikiWorker) QueueLength() int
- func (w *WikiWorker) ReadArticle(relPath string) ([]byte, error)
- func (w *WikiWorker) Repo() *Repo
- func (w *WikiWorker) SetExtractor(e ExtractorHook)
- func (w *WikiWorker) Start(ctx context.Context)
- func (w *WikiWorker) Stop()
- func (w *WikiWorker) SubmitFacts(ctx context.Context, facts []TypedFact, entities []IndexEntity) error
- func (w *WikiWorker) WaitForIdle()
- type Win
- type Workspace
Constants ¶
const ( RecordDecisionMerge recordDecisionAction = "merge" RecordDecisionRequestChanges recordDecisionAction = "request_changes" RecordDecisionBlock recordDecisionAction = "block" RecordDecisionDefer recordDecisionAction = "defer" )
const ( // RouteMethodAny marks legacy routes that do not enforce a method yet. RouteMethodAny = "*" // RouteMethodGetPost marks routes that intentionally support GET and POST. RouteMethodGetPost = "GET|POST" // RouteAuthNone marks routes that are intentionally available without a bearer token. RouteAuthNone = "none" // RouteAuthBearer marks routes protected by the broker bearer token. RouteAuthBearer = "bearer" // RouteAuthLoopback marks routes guarded by loopback RemoteAddr and Host checks. RouteAuthLoopback = "loopback" )
const ( CapabilityKeyTmux = "tmux" CapabilityKeyClaude = "claude" CapabilityKeyCodex = "codex" CapabilityKeyOpencode = "opencode" CapabilityKeyOfficeRuntime = "office_runtime" CapabilityKeyDirectRuntime = "direct_runtime" CapabilityKeyMemory = "memory" CapabilityKeyNex = CapabilityKeyMemory CapabilityKeyConnections = "connections" CapabilityKeyActions = "actions" CapabilityKeyWorkflows = "workflows" CapabilityKeyOfficeActions = "office_actions" CapabilityKeyDirectActions = "direct_actions" )
const ( HeadlessEventKind = "headless_event" HeadlessEventTypeStatus = "status" HeadlessEventTypeText = "text" HeadlessEventTypeToolUse = "tool_use" HeadlessEventTypeToolResult = "tool_result" HeadlessEventTypeIdle = "idle" HeadlessEventTypeError = "error" HeadlessEventTypeManifest = "manifest" HeadlessProviderClaude = "claude" HeadlessProviderCodex = "codex" HeadlessProviderOpencode = "opencode" HeadlessProviderOpenAICompat = "openai-compat" )
Constants for the discriminator and stable Type values. Wire-format strings — keep in lockstep with the frontend's HeadlessEventView.
const ( HumanWikiIntentRemember humanWikiIntentKind = "remember" HumanWikiIntentSaveMem humanWikiIntentKind = "save_memory" HumanWikiIntentWriteKB humanWikiIntentKind = "write_kb" HumanWikiIntentWikiThis humanWikiIntentKind = "wiki_this" HumanWikiIntentCanonical humanWikiIntentKind = "canonical" )
const ( TeamLearningsJSONLPath = "team/learnings/index.jsonl" TeamLearningsPagePath = "team/learnings/index.md" )
const ( MaxLearningInsightLen = 4000 MaxLearningKeyLen = 80 MaxLearningScopeLen = 128 DefaultLearningLimit = 20 MaxLearningLimit = 100 )
const ( MemoryWorkflowStatusNotRequired = "not_required" MemoryWorkflowStatusPending = "pending" MemoryWorkflowStatusSatisfied = "satisfied" MemoryWorkflowStatusOverridden = "overridden" )
const ( MemoryWorkflowStepStatusPending = "pending" MemoryWorkflowStepStatusSatisfied = "satisfied" )
const ( // PromotionIdleExpiry is how long a non-terminal promotion may sit // without activity before auto-expiring. PromotionIdleExpiry = 14 * 24 * time.Hour // PromotionApprovedArchive is how long an approved promotion stays // visible in the review feed before auto-archiving. PromotionApprovedArchive = 7 * 24 * time.Hour )
Idle timeouts — surfaced as constants so tests can reference them and downstream operators can grep for the policy in one place.
const ( SessionModeOffice = "office" SessionModeOneOnOne = "1o1" DefaultOneOnOneAgent = "ceo" )
const ArchivistAuthor = "archivist"
ArchivistAuthor is the synthetic commit author for every brief update. Not a roster member — pure git identity.
const BrokerPort = brokeraddr.DefaultPort
const CatalogSortLastRead = "last_read"
CatalogSortLastRead is the sort key accepted by BuildCatalog to sort articles by access time, oldest-accessed first.
const CatalogSortPruneScore = "prune_score"
CatalogSortPruneScore is the sort key accepted by BuildCatalog to sort articles descending by prune_score (most prunable first). Prune score is (words * daysUnread) / readWeight — high for verbose + stale + under-read articles. See BuildCatalog for the formula.
const ( // ChannelIntentContextAsk fires on question-form context-seeking phrases: // "who has context on …", "does anyone know …", "what do we have on …". ChannelIntentContextAsk channelIntentKind = "context_ask" )
const CompressPromptSystem = `` /* 295-byte string literal not displayed */
CompressPromptSystem is the locked system prompt for compression. Wording is part of the spec — do not edit without updating the ICP doc.
const DLQDefaultMaxRetries = 5
DLQDefaultMaxRetries is the default retry ceiling before an entry is promoted to permanent-failures.jsonl.
const DLQValidationMaxRetries = 1
DLQValidationMaxRetries is the max retry ceiling for programming-error categories (validation): never retry past the first attempt.
const ( // DefaultArchiveCutoffDays is the number of days an article must have been // unread before it is eligible for archival. DefaultArchiveCutoffDays = 90 )
const DefaultClusterMinEntities = 3
DefaultClusterMinEntities is the v2 prompt threshold for surfacing a "pattern across entities". Mirrors WIKI-SLICE2-PLAN.md Thread C bullet 1: ≥3 distinct entities sharing a reinforced (predicate, object) pair.
const DefaultCompressTimeout = 30 * time.Second
DefaultCompressTimeout bounds a single LLM shell-out for compression.
const DefaultPamTimeout = 90 * time.Second
DefaultPamTimeout bounds a single Pam sub-process. Web enrichment can legitimately take longer than an entity-brief synthesis (the LLM fans out to WebSearch + WebFetch), so this is wider than the synthesizer default.
const DefaultPlaybookSynthesisThreshold = 3
DefaultPlaybookSynthesisThreshold is the number of new executions that must accumulate before an automatic synthesis is triggered. Configurable per deployment via WUPHF_PLAYBOOK_SYNTHESIS_THRESHOLD.
const DefaultPlaybookSynthesisTimeout = 45 * time.Second
DefaultPlaybookSynthesisTimeout bounds a single LLM shell-out. Configurable via WUPHF_PLAYBOOK_SYNTHESIS_TIMEOUT (seconds).
const DefaultSynthesisThreshold = 3
DefaultSynthesisThreshold is the number of new facts that must accumulate before an automatic synthesis is triggered. Configurable per deployment via WUPHF_ENTITY_BRIEF_THRESHOLD.
const DefaultSynthesisTimeout = 30 * time.Second
DefaultSynthesisTimeout bounds a single LLM shell-out. Configurable via WUPHF_ENTITY_BRIEF_TIMEOUT (seconds).
const EntityGraphPath = "team/entities/.graph.jsonl"
EntityGraphPath is the wiki-root-relative path to the graph log.
const HumanAuthor = "human"
HumanAuthor is the synthetic commit author slug used when no richer human identity has been registered (no `git config --global user.name` / `user.email` on this machine). Yields `human <human@wuphf.local>` via runGitLocked's identity derivation. Distinct from every agent slug and from the other synthetic identities (archivist, wuphf-bootstrap, wuphf-recovery, system) so audit views can colour human edits distinctly.
v1.5: when the broker has probed a real git identity, the slug on disk becomes the user's derived slug (see deriveSlug in human_identity.go) and commits land with their real name + email. `HumanAuthor` remains the fallback.
const MaxBriefSize = 32 * 1024
MaxBriefSize caps the LLM output bytes we are willing to commit. Any larger response is treated as a malformed synthesis and dropped.
const MaxClustersForPrompt = 10
MaxClustersForPrompt caps how many clusters the v2 prompt carries. Keeps the LLM input bounded when the wiki has hundreds of reinforced patterns. Clusters are sorted strongest-first (count desc), so the head window is the most informative slice.
const MaxCompressQueue = 32
MaxCompressQueue is the buffered channel size for pending compress jobs.
const MaxExecutionNotesLen = 4000
MaxExecutionNotesLen bounds the optional free-form notes field.
const MaxExecutionSummaryLen = 4000
MaxExecutionSummaryLen is the hard cap on the summary field. Picked to leave room for a real debrief paragraph without blowing up prompt budgets when the next agent reads the log.
const MaxExecutionsForPrompt = 20
MaxExecutionsForPrompt is the hard cap on how many recent execution entries we feed into a single synthesis prompt. Keeps the prompt bounded even when a playbook has hundreds of runs.
const MaxFactTextLen = 4000
MaxFactTextLen is the hard cap on a single fact's text. Picked to keep lines comfortable for manual review in any editor and to bound how much prompt tokens a single append can cost on the next synthesis.
const MaxPamOutputSize = 128 * 1024
MaxPamOutputSize caps a single run at 128 KiB to bound blast radius if the LLM produces runaway output and to keep git objects small.
const MaxPamQueue = 16
MaxPamQueue is the buffered channel size for pending Pam jobs.
const MaxPlaybookBodySize = 64 * 1024
MaxPlaybookBodySize caps the LLM output bytes we are willing to commit. Playbooks can be longer than entity briefs (more structured steps), so the ceiling is doubled from MaxBriefSize.
const MaxPlaybookSynthQueue = 32
MaxPlaybookSynthQueue is the buffered channel size for pending jobs.
const MaxRelatedEntries = 10
MaxRelatedEntries bounds the number of "## Related" bullets rendered in a synthesized brief. Ten was the v1 ceiling in the roadmap — enough for a glance, not enough to dominate a narrow article.
const MaxSkillFileBytes = 1024 * 1024
MaxSkillFileBytes caps the size of a sub-resource file an agent may write under team/skills/{name}/. 1 MiB is enough for a long template, generous enough that we don't stop legit content, and small enough to refuse accidental binary uploads.
const MaxSynthesisQueue = 32
MaxSynthesisQueue is the buffered channel size for pending jobs. Overflow surfaces ErrSynthesisQueueSaturated.
const PamSlug = "pam"
PamSlug is the identity used for Pam's sub-process dispatch. Kept distinct from ArchivistAuthor so the git commit author stays "archivist" while the runtime routing can reference "pam" (e.g. tmux window names, log files).
const PlaybookCompiledDirRel = "team/playbooks/.compiled"
PlaybookCompiledDirRel is the directory (under the wiki root) where the compiled SKILL.md files live. Exposed so tests + handlers can resolve skill paths without re-deriving the layout.
const PlaybookSynthesisPromptSystem = `` /* 1329-byte string literal not displayed */
PlaybookSynthesisPromptSystem is the system prompt sent on every call. Locked here so the behavior is reviewable — do not edit casually.
const ReaderHuman = "web"
ReaderHuman is the reader identifier used when a human opens an article in the web UI. Any other non-empty reader value is treated as an agent slug.
const SectionDiscoveryTimeout = 10 * time.Second
SectionDiscoveryTimeout bounds one DiscoverSections call. git log discovery is bounded so a pathological filesystem or large wiki history cannot stall the broker forever.
const SectionsRefreshDebounce = 500 * time.Millisecond
SectionsRefreshDebounce bounds how often DiscoverSections runs under a burst of wiki:write events. Chosen to match the entity-synth debounce cadence so the two background loops don't contend.
const SynthesisPromptSystem = `` /* 473-byte string literal not displayed */
SynthesisPromptSystem is the exact system prompt the worker sends. Wording locked by project_entity_briefs_v1_2.md — do not edit without updating the memo.
The trailing "## Related" section is managed deterministically by the synthesizer from the cross-entity graph log — never invent related-entity bullets. If the LLM output contains a "## Related" section, it is stripped before the authoritative one is appended.
const WhatWeveLearnedHeading = "## What we've learned"
WhatWeveLearnedHeading is the exact heading the synthesizer maintains. Changing this invalidates prior-synthesis section replacement — in-flight playbooks would grow a duplicate heading.
Variables ¶
var ( ErrInvalidLearning = errors.New("team learnings: invalid learning") ErrLearningLogNotRunning = errors.New("team learnings: worker is not attached") )
var ( // ErrIllegalTransition is returned when ApplyTransition is called with // a from/to pair that violates the state matrix. ErrIllegalTransition = errors.New("promotion: illegal state transition") // ErrPromotionNotFound is returned when a lookup hits an unknown ID. ErrPromotionNotFound = errors.New("promotion: not found") // ErrHumanOnlyReviewRequired fires when an agent attempts to approve a // promotion whose resolver returned the human-only sentinel. ErrHumanOnlyReviewRequired = errors.New("promotion: human-only review required; agent approvals are disabled for this path") // ErrWrongReviewer fires when a non-assigned agent tries to act as // reviewer. Humans bypass this check by passing an empty actor slug. ErrWrongReviewer = errors.New("promotion: actor is not the assigned reviewer") // ErrWrongAuthor fires when a non-author tries to resubmit/withdraw. ErrWrongAuthor = errors.New("promotion: actor is not the author") // ErrPromotionAlreadyApproved fires when a second reviewer tries to // approve an already-approved promotion. ErrPromotionAlreadyApproved = errors.New("promotion: already approved") )
Errors surfaced by the state machine.
var ( // ErrCompileCoalesced indicates a compile request collapsed into the // in-flight pass. The pending request was queued; the in-flight pass // will run one extra cycle before exiting. ErrCompileCoalesced = errors.New("compile coalesced into in-flight run") // ErrCompileCooldown indicates the cron tick was suppressed because a // recent compile pass finished within the cooldown window. Manual // triggers are not subject to cooldown. ErrCompileCooldown = errors.New("compile skipped: within cooldown window") )
Sentinel errors callers can branch on.
var AllMaintenanceActions = []MaintenanceAction{ MaintActionSummarize, MaintActionAddCitation, MaintActionExtractFacts, MaintActionLinkRelated, MaintActionSplitLong, MaintActionRefreshStale, MaintActionResolveContradiction, }
AllMaintenanceActions enumerates the supported actions, in display order.
var ErrBackupMissing = errors.New("wiki: backup mirror does not exist")
ErrBackupMissing is returned by RestoreFromBackup when no backup mirror exists to restore from.
var ErrChannelNotFound = errors.New("channel not found")
ErrChannelNotFound is returned by PostInboundSurfaceMessage when the declared channel does not exist in the broker.
var ErrCompressQueueSaturated = errors.New("wiki compress: queue saturated")
ErrCompressQueueSaturated is returned by EnqueueCompress when the buffered channel is full.
var ErrCompressorStopped = errors.New("wiki compress: not running")
ErrCompressorStopped is returned when EnqueueCompress is called after the worker has been stopped.
var ErrDecisionPacketCorrupt = errors.New("decision packet on-disk file is corrupt")
ErrDecisionPacketCorrupt is returned by readDecisionPacketLocked when the on-disk JSON is unparseable. Used internally to drive the regenerate-or-mark-unknown branch.
var ErrDecisionPacketNotFound = errors.New("decision packet not found")
ErrDecisionPacketNotFound is returned by GetDecisionPacket when no packet has been seeded for the task. Callers can distinguish missing from corrupt by inspecting the error.
var ErrEntityGraphNotRunning = errors.New("entity graph: worker is not attached")
ErrEntityGraphNotRunning mirrors ErrFactLogNotRunning — returned when a graph operation runs without a wiki worker attached.
var ErrExecutionLogNotRunning = errors.New("playbook executions: worker is not attached")
ErrExecutionLogNotRunning is returned when Append is called without a wiki worker. The broker wires these together in ensurePlaybookCompiler; tests instantiate ExecutionLog directly with a live worker.
var ErrFactLogNotRunning = errors.New("entity facts: worker is not attached")
ErrFactLogNotRunning is returned when Append is called without a wiki worker. The broker wires these together in ensureWikiWorker; tests using FactLog directly must pass a worker explicitly.
ErrGitUnavailable is returned by Init when the `git` binary cannot be located on $PATH. Callers should surface a banner to the user and fall back to --memory-backend none.
var ErrInboxFilterUnknown = errors.New("inbox: unknown filter")
ErrInboxFilterUnknown is returned by Inbox when the caller passes a filter value not in the InboxFilter* constant set. Surfaces as a 400 from the REST handler.
var ErrIntakeNoProvider = errIntakeNoProvider
ErrIntakeNoProvider is the public alias of errIntakeNoProvider so the Lane F CLI can match against it without importing the internal symbol.
var ErrMaintenanceNoWorker = errors.New("wiki maintenance: no worker")
ErrMaintenanceNoWorker is returned when the assistant is constructed without a wiki worker (markdown backend disabled).
var ErrNotAPlaybook = errors.New("playbook: path must be team/playbooks/{slug}.md (and not under .compiled/)")
ErrNotAPlaybook is returned when CompilePlaybook is called on a path that is not a team/playbooks/*.md article.
var ErrNotebookPathNotAuthorOwned = errors.New("notebook_path_not_author_owned: write path must live under agents/{my_slug}/notebook/")
ErrNotebookPathNotAuthorOwned is returned when an agent tries to write to another agent's notebook directory. Notebooks are author-only on the write side; reads and searches are cross-agent by design (see DESIGN-NOTEBOOK.md).
var ErrPamArticleMissing = errors.New("pam: target article does not exist")
ErrPamArticleMissing is returned when the target article does not exist.
var ErrPamQueueSaturated = errors.New("pam: queue saturated")
ErrPamQueueSaturated is returned by Enqueue when the buffered channel is full. Callers surface as 429.
var ErrPamStopped = errors.New("pam: not running")
ErrPamStopped is returned when Enqueue is called after Stop.
var ErrPlaybookSourceMissing = errors.New("playbook synth: source playbook does not exist")
ErrPlaybookSourceMissing is surfaced when the source article no longer exists. Treat as an idempotent skip — deletion of the authored body makes learnings moot.
var ErrPlaybookSynthNoNewExecutions = errors.New("playbook synth: no new executions since last synthesis")
ErrPlaybookSynthNoNewExecutions is surfaced for observability when a job runs with zero un-synthesized executions. Not a hard failure — skips.
var ErrPlaybookSynthQueueSaturated = errors.New("playbook synth: queue saturated")
ErrPlaybookSynthQueueSaturated is returned when the buffered channel is full.
var ErrPlaybookSynthesizerStopped = errors.New("playbook synth: not running")
ErrPlaybookSynthesizerStopped is returned when Enqueue is called after Stop.
var ErrPolicyRuleEmpty = errors.New("rule cannot be empty")
ErrPolicyRuleEmpty is the sentinel returned by RecordPolicy when the caller passes a blank rule. Callers (HTTP handlers) match on it via errors.Is to map the validation case to 400 — a sentinel keeps that dispatch from drifting if the underlying message ever changes.
var ErrPromotionDemandInvalid = errors.New("promotion_demand: invalid event")
ErrPromotionDemandInvalid is returned by Record when the event is malformed.
var ErrPromotionTargetExists = errors.New("promotion: target wiki path already exists")
ErrPromotionTargetExists is returned when the target wiki path already has content. The state machine maps this to `changes-requested` so the reviewer can work with the author on a different target.
var ErrQueueSaturated = errors.New("wiki: queue saturated, retry on next turn")
ErrQueueSaturated is returned by Enqueue when the buffered request channel is full. Callers (MCP handlers) should surface this to the agent as "wiki queue saturated, retry on next turn" — no hidden retries.
var ErrRepoCorrupt = errors.New("wiki: repo integrity check failed")
ErrRepoCorrupt is returned by Fsck when the underlying git repo has detectable corruption (bad objects, missing refs, etc.).
var ErrSynthCoalesced = errors.New("synth coalesced into in-flight run")
ErrSynthCoalesced indicates a synth request collapsed into an in-flight pass. Callers can branch on this to avoid surfacing a false error.
var ErrSynthesisNoNewFacts = errors.New("entity synth: no new facts since last synthesis")
ErrSynthesisNoNewFacts is surfaced for observability when a job runs with zero un-synthesized facts. Not a hard failure — the job simply skips.
var ErrSynthesisQueueSaturated = errors.New("entity synth: queue saturated")
ErrSynthesisQueueSaturated is returned by EnqueueSynthesis when the buffered channel is full. Callers surface this as a retry-later.
var ErrSynthesizerStopped = errors.New("entity synth: not running")
ErrSynthesizerStopped is returned when EnqueueSynthesis is called after the worker has been stopped.
var ErrUnknownDecisionAction = errors.New("record decision: action is not canonical")
ErrUnknownDecisionAction is returned when rawAction does not map to a canonical recordDecisionAction value. Wraps the HTTP layer that surfaces it as 400 to distinguish from internal failures (500).
var ErrUnknownPamAction = errors.New("pam: unknown action")
ErrUnknownPamAction is returned by LookupPamAction when the id is not registered. Callers surface this as a 400.
var ErrWikiSHAMismatch = errors.New("wiki: article changed since it was opened")
ErrWikiSHAMismatch is returned by CommitHuman when the caller's expected_sha does not match the current HEAD SHA for the article. The HTTP handler surfaces this as 409 Conflict + the current article body so the client can show the re-load prompt without a second round trip.
var ErrWorkerStopped = errors.New("wiki: worker is not running")
ErrWorkerStopped is returned when Enqueue is called after the worker has been stopped (context cancelled).
var FallbackHumanIdentity = HumanIdentity{ Name: HumanAuthor, Email: HumanAuthor + "@wuphf.local", Slug: HumanAuthor, }
FallbackHumanIdentity is returned when no git config is set and no cached local identity exists. It matches the v1.4 behaviour so audit history keeps its meaning for single-user installs.
var (
SessionName = nameWithPortSuffix(baseSessionName)
)
SessionName and tmuxSocketName are derived at package init from the broker port resolved via brokeraddr. On the default port they keep their historical values ("wuphf-team", "wuphf"); on any non-default port they gain a "-<port>" suffix. This isolation is what prevents the "spawn first agent: exit status 1" race seen when two WUPHF instances tried to share a single tmux socket + session name.
Functions ¶
func ArtifactKind ¶
ArtifactKind parses the {source} segment of an artifact path. Returns ("", false) when the path does not match the canonical layout.
func ArtifactSHAFromPath ¶
ArtifactSHAFromPath parses the {sha} segment (sans .md extension). Returns ("", false) when the path does not match.
func CleanupPersistedTaskWorktrees ¶
func CleanupPersistedTaskWorktrees() error
func ClearPersistedBrokerState ¶
func ClearPersistedBrokerState() error
func CompilePlaybook ¶
CompilePlaybook reads a team/playbooks/{slug}.md article and writes the corresponding SKILL.md under team/playbooks/.compiled/{slug}/SKILL.md.
The write goes directly to disk (caller is responsible for committing it via the wiki worker — see WikiWorker.EnqueuePlaybookCompile). Returns the wiki-relative path AND the rendered skill bytes. Callers that need to commit the output must use the returned bytes — reading the file back from disk is racy under filesystem pressure in CI: an empty buffer has been observed between WriteFile and a subsequent ReadFile of the same path, which then fails downstream as "content is required". Eliminating the round-trip is strictly cheaper than hardening it.
Idempotency: invoking CompilePlaybook with unchanged source input produces byte-identical output. The downstream git layer collapses byte-identical writes into a no-op, so the audit log stays clean.
func CompilePlaybookAndCommit ¶
func CompilePlaybookAndCommit(ctx context.Context, repo *Repo, wikiPath string) (string, string, error)
CompilePlaybookAndCommit runs CompilePlaybook and, when the output is new (not byte-identical to HEAD), commits it under the archivist identity via the supplied wiki worker. Returns the compiled path + short SHA.
Prefer WikiWorker.EnqueuePlaybookCompile from handler code — that route hits the single-writer queue. This helper is here so the worker's drain goroutine can reuse the same compile-and-commit logic.
func CompiledSkillRelPath ¶
CompiledSkillRelPath returns the wiki-relative path to the compiled skill for the given slug. Does not guarantee the file exists.
func ComputeFactID ¶
func ComputeFactID(artifactSHA string, sentenceOffset int, subject, predicate, object string) string
ComputeFactID is the deterministic fact ID hash from §7.3:
sha256(artifact_sha + "/" + sentence_offset + "/" + norm(subject) +
"/" + norm(predicate) + "/" + norm(object))[:16]
Same artifact + same extraction → same ID. Substrate guarantee.
func DMTargetAgent ¶
DMTargetAgent extracts the agent slug from a DM channel slug. Returns "" if the slug is not a DM.
func DisableRealTaskWorktreeForTests ¶
func DisableRealTaskWorktreeForTests()
DisableRealTaskWorktreeForTests replaces the package-level prepare/cleanup task worktree funcs with no-op stubs and flips the broker-state-load + real-worktree guards so that tests which exercise the local_worktree dispatch path (handleTeamTask etc.) cannot reach `git worktree add` against the developer's wuphf repo, nor load stale state from the user's real ~/.wuphf/.
Intended for TestMain in packages that depend on team and exercise this codepath via integration tests. Currently only internal/teammcp/testmain_test.go. Grep for `ExecutionMode: "local_worktree"` in internal/*/\*_test.go to find additional candidates.
Guarded by testing.Testing() so a production caller panics immediately instead of silently corrupting the real task dispatcher for the lifetime of the process. The in-package tests inside the team package get equivalent guards from worktree_guard_test.go's init.
func EscapeForPromptBody ¶
EscapeForPromptBody neutralises the known prompt-injection vectors in s so it is safe to interpolate into an LLM prompt body. The function is idempotent: EscapeForPromptBody(EscapeForPromptBody(s)) == EscapeForPromptBody(s).
The escape is conservative and intentionally visible so extractors and reviewers can tell when a string was altered. Never silently drops content.
Applied at every LLM interpolation site that accepts attacker-influenced text:
- extract_entities_lite.tmpl Body field (wiki_extractor.go:renderPrompt)
- answer_query.tmpl Query field (wiki_query.go:Answer) — hop zero, authenticated user input
- answer_query.tmpl each Source.Excerpt (wiki_query.go:Answer)
- synthesis body + existing brief (entity_synthesizer.go:synthesize)
func ExecutionLogRelPath ¶
ExecutionLogRelPath returns the wiki-relative path to the append-only execution JSONL log for a given slug.
func FactLogAppendSHA ¶
FactLogAppendSHA synthesises the DLQ row key for a fact-log append failure. One artifact extraction can produce append failures for multiple (kind, slug) groups; using the raw artifact SHA would collide across them in readLatestStateLocked. The synthesized form "factlog:{kind}:{slug}:{artifactSHA}" is unique per target file, never collides with an extraction-class entry, and keeps the rest of the DLQ contract unchanged (tombstones, retry bookkeeping, last-write-wins).
func FactLogPath ¶
func FactLogPath(kind EntityKind, slug string) string
FactLogPath returns the path, relative to the wiki root, where the jsonl for a single entity is stored. Exported for tests + handlers.
func FormatChannelView ¶
func FormatChannelView(messages []channelMessage) string
func FormatLookupMessage ¶
func FormatLookupMessage(ans QueryAnswer) string
FormatLookupMessage renders a QueryAnswer as a wiki-shaped chat message per DESIGN-WIKI.md anti-pattern 12:
- Leading hatnote-style italic note ("From the wiki")
- Body: AnswerMarkdown verbatim (contains <sup>[n]</sup> citations)
- Trailing numbered sources list
- PageFooter action-links style: "Last updated: {most-recent valid_from}"
NO card, NO callout, NO alert block (anti-pattern 12). The returned string is plain markdown ready for a chat message content field.
func GitCleanEnv
deprecated
func GitCleanEnv() []string
GitCleanEnv is a thin backwards-compatibility shim that delegates to gitexec.CleanEnv. The canonical implementation (and the full godoc describing the GIT_DIR / GIT_CONFIG_* strip policy) now lives in internal/gitexec. Kept exported for one release as a safety net for any out-of-tree callers; new code should import gitexec directly.
Deprecated: use gitexec.CleanEnv directly.
func HasLiveTmuxSession ¶
func HasLiveTmuxSession() bool
HasLiveTmuxSession returns true if a wuphf-team tmux session is running. Routes through paneLifecycle (PLAN.md §C5b) so tests can drive it via setTmuxRunnerForTest without a real tmux server.
func InferAgentDomain ¶
InferAgentDomain maps an agent slug to its primary work domain.
func InferTextDomain ¶
InferTextDomain classifies a text snippet into a work domain.
func IsArtifactPath ¶
IsArtifactPath reports whether relPath matches the canonical artifact layout. Exported so the extractor + tests can guard against non-artifact paths slipping into the extraction hook.
func IsDraining ¶ added in v0.92.0
func IsDraining() bool
IsDraining reports whether any Launcher is currently draining. Pane dispatch and notify loops consult this to short-circuit new work after /admin/pause has been accepted.
func IsPlaybookPath ¶
IsPlaybookPath returns true when relPath is a source playbook article.
func IsSafeTaskID ¶ added in v0.193.0
IsSafeTaskID guards task IDs that flow into filesystem paths or process launchers. The allow-list (alphanumeric + `-` + `_`, ≤128 chars) was originally introduced in the CLI to keep `open`/`xdg-open` from re-parsing shell-meta characters; it is now also enforced at the broker HTTP layer + the on-disk Decision Packet store to close the path-traversal vector that an authenticated WUPHF_BROKER_TOKEN caller could otherwise reach by POSTing `/tasks/..%2F..%2Fetc%2Fx/block`.
func IsSkillAuthoringTool ¶ added in v0.87.0
IsSkillAuthoringTool reports whether toolName corresponds to one of the skill-authoring MCP tools whose invocation should reset the counter instead of incrementing it. Lives here (not in teammcp) so the broker hot path stays inside the team package and avoids an import cycle.
func JaroWinkler ¶
JaroWinkler returns the Jaro-Winkler similarity in [0.0, 1.0] between a and b. Higher = more similar; 1.0 = identical, 0.0 = no similarity.
The scaling factor p is fixed at 0.1 (Winkler's standard). The prefix bonus is capped at 4 characters per the original definition.
func MinimalBrief ¶
func MinimalBrief(ent IndexEntity) string
MinimalBrief returns the canonical placeholder brief content for a freshly-minted (ghost) entity that has no synthesized facts yet. The output is deterministic for a given IndexEntity — same input always produces byte-identical output, so substrate-rebuild round-trips (§7.4) hold.
Fields included: frontmatter (slug, canonical_slug, kind, aliases sorted ascending, signals normalized), a single H1, a Signals stub, and an archivist byline. No timestamps in the body — the frontmatter's created_at is the only time field, and it pins to the IndexEntity's CreatedAt (already deterministic at the call site).
func NewDefaultIntakeProvider ¶ added in v0.193.0
func NewDefaultIntakeProvider() *defaultIntakeProvider
NewDefaultIntakeProvider returns the production IntakeProvider with the design-doc default 30s timeout. Callers that need a shorter ceiling (tests, manual smoke tools) construct the struct directly.
func NewDefaultLLMProvider ¶ added in v0.86.0
func NewDefaultLLMProvider(systemPromptPath string) *defaultLLMProvider
NewDefaultLLMProvider returns a provider that classifies articles via the configured LLM CLI. systemPromptPath is the on-disk path of the skill-creator.md system prompt (typically <wikiRoot>/team/skills/.system/skill-creator.md). When empty or missing the embedded default prompt is used.
func NewDefaultStageBLLMProvider ¶ added in v0.86.0
func NewDefaultStageBLLMProvider(b *Broker) *defaultStageBLLMProvider
NewDefaultStageBLLMProvider constructs a provider bound to broker b. The system prompt is loaded lazily on first SynthesizeSkill call so test brokers without a wiki worker pay no startup cost.
func NormalizeForFactID ¶
NormalizeForFactID normalizes a triplet component per §7.3: NFC-normalize (so NFD vs NFC forms of the same glyph produce the same hash), then lowercase, trim, replace non-alphanumeric runs with a single dash.
func NormalizeOneOnOneAgent ¶
func NormalizeSessionMode ¶
func PlaybookSlugFromPath ¶
PlaybookSlugFromPath returns the slug of a team/playbooks/{slug}.md path, or ("", false) when the path is not a source playbook.
func PromotionDemandSignalLabel ¶ added in v0.139.0
func PromotionDemandSignalLabel(s PromotionDemandSignal) string
PromotionDemandSignalLabel is the exported alias of signalLabel. PR 4 (teammcp.team_notebook_review) needs the rendered label string for the CEO-facing JSON, and the teammcp package can't see unexported helpers.
func RegisterTransports ¶ added in v0.114.0
RegisterTransports registers all configured transport adapters against the broker. Called once per launch after broker.Start() succeeds. Returns a cleanup function that cancels all running adapters; always non-nil and safe to call even on the error path. The error return is reserved for future required adapters; all current adapters are optional and log failures rather than returning them.
func RegistryKeyForActionCapability ¶
func RegistryKeyForActionCapability(cap action.Capability) string
func RelativeJoinURL ¶ added in v0.127.0
RelativeJoinURL is the degenerate builder used when no absolute host is known (e.g. the launcher does not yet know the share controller's bind address). It returns "/join/<token>" so callers can prepend their own host and the contract is satisfied with a non-empty result.
func RenderSkillMarkdown ¶ added in v0.86.0
func RenderSkillMarkdown(fm SkillFrontmatter, body string) ([]byte, error)
RenderSkillMarkdown serialises fm and body into a markdown document with YAML frontmatter delimiters. Name and Description must be non-empty. The body is trimmed of leading/trailing whitespace.
func RenderTeamLearningsMarkdown ¶ added in v0.97.0
func RenderTeamLearningsMarkdown(records []LearningRecord) string
func ResetBrokerState ¶
func ResetBrokerState() error
func ResolveActionProviderForCapability ¶
func ResolveActionProviderForCapability(cap action.Capability) (action.Provider, error)
func ReviewLogPath ¶
ReviewLogPath returns the canonical JSONL path for the given wiki root.
func SendTelegramMessage ¶
SendTelegramMessage sends a text message to a Telegram chat using the given bot token.
func SendTypingAction ¶
SendTypingAction sends a "typing" chat action to a Telegram chat.
The 30s deadline is derived from the caller's ctx — transport drain and typing loops pass their parent ctx so a transport shutdown cancels any in-flight chat-action call.
func SetTargetBrokerURLResolver ¶ added in v0.92.0
SetTargetBrokerURLResolver is retained as a no-op public API for callers that wired the previous hand-rolled pause-proxy seam. The orchestrator now owns cross-broker resolution; this function exists solely to avoid breaking compile-time consumers (cmd/wuphf/main.go) until they are migrated. Safe to remove once main.go drops the call.
func SlugifyTelegramTitle ¶ added in v0.99.10
SlugifyTelegramTitle is the canonical slug rule for Telegram-bridged channels. Both the TUI's `/connect telegram` and the web wizard route through this so the two paths can never produce different slugs for the same chat title.
func Staleness ¶
Staleness applies the §8.1 formula:
staleness = (days_old × type_weight) − (confidence × 10) − reinforcement_bonus
type_weight: status=1.0 observation=0.5 relationship=0.2 background=0.1 reinforcement_bonus = 5.0 × max(0, 1 - days_since_reinforced / 30)
Returns a read-time visibility score. Higher = staler. Query-time filter excludes staleness > 20 for status/recency queries (§8.1).
func StartOpenclawRouter ¶
func StartOpenclawRouter(ctx context.Context, broker *Broker, bridge *OpenclawBridge) <-chan struct{}
StartOpenclawRouter starts the mention+DM routing goroutine. Exported so out-of-package callers (e.g. bridge probes) can opt into the same routing behavior production WUPHF runs via launcher.go. The goroutine exits when ctx is cancelled. The returned channel is closed when the goroutine has fully exited — callers should block on it before stopping the bridge to avoid races between in-flight broker writes and broker shutdown.
func ValidateExecutionInput ¶
func ValidateExecutionInput(slug string, outcome PlaybookOutcome, summary, notes, recordedBy string) error
ValidateExecutionInput checks every field of a prospective execution entry. Returns nil when acceptable to persist.
func ValidateFactInput ¶
func ValidateFactInput(kind EntityKind, slug, text, sourcePath, recordedBy string) error
ValidateFactInput checks every field of a prospective fact. Returns nil when the fact is acceptable to persist. Exported so HTTP handlers can validate before they format a response.
func ValidateLearningInput ¶ added in v0.97.0
func ValidateLearningInput(rec LearningRecord) error
func VerifyBot ¶
VerifyBot checks the bot token by calling getMe and returns the bot's display name.
func VerifyChat ¶
VerifyChat checks if a chat ID is valid and returns its title.
func WikiBackupDir ¶
func WikiBackupDir() string
WikiBackupDir returns the path to the lightweight backup mirror.
func WikiRootDir ¶
func WikiRootDir() string
WikiRootDir returns the canonical on-disk path for the team wiki. It honours config.RuntimeHomeDir so dev runs stay isolated from prod.
func WriteSharedMemory ¶
func WriteSharedMemory(ctx context.Context, note SharedMemoryWrite) (string, error)
Types ¶
type ACItem ¶ added in v0.193.0
type ACItem struct {
Statement string `json:"statement,omitempty"`
Done bool `json:"done,omitempty"`
}
ACItem is one acceptance-criterion checklist row. Done is always false when emitted by the intake agent; the owner agent flips it when the session report commits.
type AgentLogEntriesResponse ¶ added in v0.105.2
type AgentLogEntriesResponse struct {
Task string `json:"task"`
Entries []agent.TaskLogEntry `json:"entries"`
}
type AgentLogTasksResponse ¶ added in v0.105.2
type AgentLogTasksResponse struct {
Tasks []agent.TaskLogSummary `json:"tasks"`
}
type ArticleMeta ¶
type ArticleMeta struct {
Path string `json:"path"`
Title string `json:"title"`
Content string `json:"content"`
LastEditedBy string `json:"last_edited_by"`
LastEditedTs string `json:"last_edited_ts"`
// CommitSHA is the short SHA of the most recent commit touching this
// article. The editor sends it back as expected_sha on save so the
// broker can detect conflicting writes that landed after the editor
// opened. Empty when the article has no commit history yet.
CommitSHA string `json:"commit_sha"`
Revisions int `json:"revisions"`
Contributors []string `json:"contributors"`
Backlinks []Backlink `json:"backlinks"`
WordCount int `json:"word_count"`
Categories []string `json:"categories"`
// Read tracking — populated when BuildArticle is called with a non-empty reader.
// LastRead is nil when the article has never been accessed by anyone.
LastRead *time.Time `json:"last_read,omitempty"`
HumanReadCount int `json:"human_read_count"`
AgentReadCount int `json:"agent_read_count"`
// DaysUnread is whole days since LastRead; 0 when accessed today or never.
DaysUnread int `json:"days_unread"`
// Ghost is true when the article's frontmatter contains ghost: true —
// a placeholder stub written by persistGhostBriefs, not yet LLM-synthesized.
Ghost bool `json:"ghost,omitempty"`
// SynthesisQueued is true when a synthesis job is in-flight or pending for
// this entity. Computed at serve time from the EntitySynthesizer coalescing
// set — never persisted. Only true when Ghost is also true.
SynthesisQueued bool `json:"synthesis_queued,omitempty"`
}
ArticleMeta is the rich view sent to the UI for an article. The JSON shape matches web/src/api/wiki.ts WikiArticle.
type AuditEntry ¶
type AuditEntry struct {
SHA string
Author string
Timestamp time.Time
Message string
// Paths are the git pathspecs modified in this commit, forward-slashed.
// Merge commits and commits that only touched index/ will appear with
// whatever git log --name-only reports; callers can filter as needed.
Paths []string
}
AuditEntry is a single cross-article commit surfaced by AuditLog. Unlike CommitRef (which powers per-article history), this carries the list of files touched by the commit so reviewers can reconstruct the full diff surface without running a second `git show`.
type AutoAssignCountdown ¶ added in v0.193.0
type AutoAssignCountdown struct {
// contains filtered or unexported fields
}
AutoAssignCountdown is the cancellable 3-second timer Lane F drives when Spec.AutoAssign is non-empty. The CLI calls Wait() in one goroutine and Cancel() from the keypress goroutine; whichever fires first decides the outcome. Wait returns true when the timer elapsed without cancellation, false when Cancel landed first or the parent context was cancelled.
Method semantics are idempotent and goroutine-safe: callers may invoke Cancel() from any goroutine, multiple times. The internal channel is closed exactly once.
func NewAutoAssignCountdown ¶ added in v0.193.0
func NewAutoAssignCountdown() *AutoAssignCountdown
NewAutoAssignCountdown returns a fresh countdown configured for the design-doc-mandated 3 seconds. Tests inject shorter durations via the test-only newAutoAssignCountdownWithDuration helper.
func (*AutoAssignCountdown) Cancel ¶ added in v0.193.0
func (c *AutoAssignCountdown) Cancel()
Cancel signals that the user pressed a key during the countdown. Safe to call multiple times; only the first call closes the channel. After Cancel, Wait returns false.
func (*AutoAssignCountdown) Duration ¶ added in v0.193.0
func (c *AutoAssignCountdown) Duration() time.Duration
Duration returns the configured countdown duration. Used by tests and surfaced for the CLI's elapsed-time display.
func (*AutoAssignCountdown) Wait ¶ added in v0.193.0
func (c *AutoAssignCountdown) Wait(ctx context.Context) bool
Wait blocks until the countdown elapses, the parent context cancels, or Cancel is called. Returns true when the countdown elapsed cleanly, false otherwise (interrupted or context cancellation).
type AutoNotebookCounters ¶ added in v0.130.0
type AutoNotebookCounters struct {
Enqueued int64
Written int64
Deduped int64
Redacted int64
NonRoster int64
WriteFailed int64
QueueSaturated int64
NoopTransition int64
}
AutoNotebookCounters is a snapshot of the writer's observability counters. Returned by Counters() for tests and (eventually) the TODO #18 metrics surface.
type AutoNotebookEventKind ¶ added in v0.130.0
type AutoNotebookEventKind string
AutoNotebookEventKind identifies which broker hook produced an event. The string values are written into entry filenames and section headers, so they participate in the public format and must stay stable.
const ( AutoNotebookEventMessagePosted AutoNotebookEventKind = "message_posted" AutoNotebookEventTaskTransitioned AutoNotebookEventKind = "task_transitioned" )
type AutoNotebookWriter ¶ added in v0.130.0
type AutoNotebookWriter struct {
// contains filtered or unexported fields
}
AutoNotebookWriter ingests broker events and writes notebook entries. Lifecycle mirrors WikiWorker: NewAutoNotebookWriter → Start(ctx) → Stop(timeout). Safe for concurrent Handle() callers.
func NewAutoNotebookWriter ¶ added in v0.130.0
func NewAutoNotebookWriter(wiki autoNotebookWriterClient, roster autoNotebookRoster) *AutoNotebookWriter
NewAutoNotebookWriter constructs an idle writer. Call Start to begin processing. Either argument may be nil for tests; nil wiki disables writes, nil roster disables the membership filter.
func (*AutoNotebookWriter) Counters ¶ added in v0.130.0
func (w *AutoNotebookWriter) Counters() AutoNotebookCounters
Counters returns a thread-safe snapshot of the writer's atomic counters.
func (*AutoNotebookWriter) Handle ¶ added in v0.130.0
func (w *AutoNotebookWriter) Handle(evt autoNotebookEvent)
Handle is the broker-side ingress. Roster-filters and validates, then does a non-blocking enqueue. Drops with a counter increment when the queue is full (decision S3A). Always cheap to call from a hot path.
func (*AutoNotebookWriter) Start ¶ added in v0.130.0
func (w *AutoNotebookWriter) Start(ctx context.Context)
Start launches the drain goroutine. Idempotent: a second call is a no-op.
func (*AutoNotebookWriter) Stop ¶ added in v0.130.0
func (w *AutoNotebookWriter) Stop(timeout time.Duration)
Stop signals the drain goroutine to exit and waits up to timeout for it to finish. Idempotent. Returns even if the deadline elapses with events still in flight — caller may inspect counters to detect drops.
Implementation note: w.queue is intentionally NOT closed. Concurrent Handle() callers may already be past the running.Load() fast-path check when Stop runs, and a send-to-closed-chan would panic. Closing stopCh and letting run() bail on it covers shutdown without that hazard. The queue itself becomes garbage once all references are gone.
func (*AutoNotebookWriter) WaitForCondition ¶ added in v0.130.0
func (w *AutoNotebookWriter) WaitForCondition(ctx context.Context, predicate func() bool) error
WaitForCondition blocks until predicate returns true, ctx is cancelled, or the writer stops. Returns ctx.Err() on timeout/cancel and nil on success. Test-only entry point — production code never waits on the writer.
type Backlink ¶
type Backlink struct {
Path string `json:"path"`
Title string `json:"title"`
AuthorSlug string `json:"author_slug"`
}
Backlink represents another article that wikilinks to this article. The JSON shape matches web/src/api/wiki.ts WikiArticle.backlinks[].
type BleveTextIndex ¶
type BleveTextIndex struct {
// contains filtered or unexported fields
}
BleveTextIndex implements TextIndex via blevesearch/bleve/v2.
func NewBleveTextIndex ¶
func NewBleveTextIndex(dir string) (*BleveTextIndex, error)
NewBleveTextIndex opens (or creates) the bleve index at dir. The caller must call Close() when done.
func (*BleveTextIndex) Close ¶
func (b *BleveTextIndex) Close() error
Close releases the bleve index handle.
func (*BleveTextIndex) Delete ¶
func (b *BleveTextIndex) Delete(_ context.Context, factID string) error
Delete removes a fact from the bleve index by its ID.
func (*BleveTextIndex) Index ¶
func (b *BleveTextIndex) Index(_ context.Context, f TypedFact) error
Index adds or replaces a fact in the bleve index.
func (*BleveTextIndex) Search ¶
Search runs a BM25 query against the `text` field and returns up to topK hits ordered by descending relevance score. topK is clamped at bleveMaxTopK.
Uses a MatchQuery with the English analyser so stemmed terms ("promoted", "promotion" → "promot") match correctly against the indexed tokens.
type BridgeBackoff ¶
type BridgeBackoff struct {
// contains filtered or unexported fields
}
BridgeBackoff produces exponential-with-jitter delays for reconnect loops. Suitable for any bridge (Telegram, OpenClaw, future).
func NewBridgeBackoff ¶
func NewBridgeBackoff(base, cap time.Duration) *BridgeBackoff
func (*BridgeBackoff) Next ¶
func (b *BridgeBackoff) Next() time.Duration
Next returns the next delay; safe for concurrent callers.
type BriefSummary ¶
type BriefSummary struct {
Kind EntityKind `json:"kind"`
Slug string `json:"slug"`
Title string `json:"title"`
FactCount int `json:"fact_count"`
LastSynthesizedTS string `json:"last_synthesized_ts"`
LastSynthesizedSHA string `json:"last_synthesized_sha"`
PendingDelta int `json:"pending_delta"`
}
BriefSummary is one row returned by GET /entity/briefs.
type Broker ¶
type Broker struct {
// contains filtered or unexported fields
}
Broker is a lightweight HTTP message broker for the team channel. All agent MCP instances connect to this shared broker.
func NewBroker ¶
func NewBroker() *Broker
NewBroker constructs a Broker bound to defaultBrokerStatePath() resolved at call time. Production code uses this so the CLI resumes from the default ~/.wuphf/team/broker-state.json (or its WUPHF_BROKER_STATE_PATH / WUPHF_RUNTIME_HOME override). Tests should prefer NewBrokerAt or the newTestBroker(t) helper — both pin a per-test path explicitly.
func NewBrokerAt ¶
NewBrokerAt constructs a Broker whose state is persisted to statePath. The path is bound at construction time and stored on the Broker, so late-arriving goroutines (or sibling brokers built at other paths in the same process) cannot retarget this broker's saves. Use this instead of NewBroker() everywhere that needs path isolation — notably tests that want to pin state under t.TempDir.
Panics on an empty statePath. With "" the broker would silently write `.last-good` and `<empty>.tmp.<rand>` files into the process cwd, which is the kind of foot-gun that only surfaces in production when a CI runner happens to execute from a writable directory.
func (*Broker) AckTask ¶ added in v0.105.2
func (b *Broker) AckTask(req TaskAckRequest) (TaskResponse, error)
func (*Broker) AgentIssues ¶ added in v0.89.0
func (b *Broker) AgentIssues() []agentIssueRecord
func (*Broker) AgentStream ¶
AgentStream returns (or lazily creates) the stream buffer for a given agent slug. It is safe to call concurrently.
func (*Broker) AllMessages ¶
func (b *Broker) AllMessages() []channelMessage
AllMessages returns a copy of all messages across all channels, ordered by creation time. Use this when the caller needs to search across channels rather than in a single known channel.
func (*Broker) AllTasks ¶
func (b *Broker) AllTasks() []teamTask
AllTasks returns a copy of all tasks across all channels. Use this when the caller needs to search across channels rather than in a single known channel.
func (*Broker) AppendDiffSummary ¶ added in v0.193.0
func (b *Broker) AppendDiffSummary(taskID string, files []DiffSummary) error
AppendDiffSummary replaces the ChangedFiles list with the supplied slice. The owner agent recomputes the full diff at session-report time, so a wholesale replace matches the producer's mental model.
func (*Broker) AppendReviewerGrade ¶ added in v0.193.0
func (b *Broker) AppendReviewerGrade(taskID string, grade ReviewerGrade) error
AppendReviewerGrade is the multi-writer hot path. Each reviewer agent calls in once with their grade; the broker serialises the appends via b.mu. Emits review.submitted. Mirrors into Lane D's routing-side store so convergence runs on every grade.
func (*Broker) AppendSessionReport ¶ added in v0.193.0
func (b *Broker) AppendSessionReport(taskID string, report SessionReport) error
AppendSessionReport replaces the SessionReport on the packet for taskID. The owner agent commits one session report per session; resumes (changes_requested → running) replace the prior report. Emits artifact.ready.
func (*Broker) AppendTaskDetail ¶
AppendTaskDetail appends non-duplicate detail text to an existing task without changing ownership or status.
func (*Broker) AssignReviewers ¶ added in v0.193.0
AssignReviewers stamps the resolved (or manually overridden) reviewer slug list onto the task and stamps ReviewStartedAt with the broker's clock. Idempotent: re-calling with the same slugs is a no-op for the reviewer list but always re-stamps the start time so a re-entered review window has a fresh deadline.
The caller is expected to be the lifecycle transition layer's running → review hook. Lane D's broker_reviewer_routing.go does not register that hook itself; Lane A's transition layer is the canonical invocation point. For tests, AssignReviewers can be called directly.
func (*Broker) AttachOpenclawBridge ¶
func (b *Broker) AttachOpenclawBridge(bridge *OpenclawBridge)
AttachOpenclawBridge wires the OpenClaw bridge into the broker so handleOfficeMembers can drive live subscribe/unsubscribe/sessions.create/ sessions.end calls as members are hired and fired. Called by the launcher after StartOpenclawBridgeFromConfig succeeds. Safe to call with nil to detach (tests).
func (*Broker) BlockTask ¶
BlockTask transitions taskID to LifecycleStateBlockedOnPRMerge and records `blockerID` in task.BlockedOn so the unblock cascade fires automatically when the blocker merges.
Pass blockerID="" to block without a typed blocker (legacy callers that just want to pause a task without naming the dependency). Multiple BlockedOn entries are supported; this call appends without duplicating an existing entry.
func (*Broker) ChannelMessages ¶
func (*Broker) ChannelStore ¶
ChannelStore returns the channel store for DM type checks and member lookups.
func (*Broker) ChannelTasks ¶
func (*Broker) CreateRequest ¶
func (*Broker) CreateWatchdogAlert ¶
func (*Broker) DMPartner ¶
DMPartner returns the non-human member slug of a 1:1 DM channel. Returns "" if the channel is not a DM, does not exist, or is a group DM. Used by surface bridges to resolve who the human is talking to when routing DM posts to the right agent without requiring an @mention.
func (*Broker) DisabledMembers ¶
DisabledMembers returns the slugs explicitly disabled for a channel — members who were present in ch.Members at some point but have been muted for this channel. Callers use this to distinguish "never added" (which an explicit @-tag can bypass) from "deliberately muted" (which an @-tag must respect — muting an agent is the user's explicit intent to silence them).
func (*Broker) DueSchedulerJobs ¶
func (b *Broker) DueSchedulerJobs() []schedulerJob
func (*Broker) EnabledMembers ¶
func (*Broker) EnqueueSectionsRefresh ¶
func (b *Broker) EnqueueSectionsRefresh()
EnqueueSectionsRefresh is the broker-level adapter the wiki worker calls after a successful team wiki write. Implements wikiSectionsNotifier. No-op when the cache is not attached (tests, non-markdown backend).
func (*Broker) EnsureBridgedMember ¶
EnsureBridgedMember registers a bridged external agent as an office member so it appears in the sidebar and can be @mentioned. Idempotent — calling with an existing slug is a no-op. CreatedBy tags the source (e.g. "openclaw") so the UI can distinguish bridged agents from built-ins or user-generated ones.
func (*Broker) EnsureDirectChannel ¶
EnsureDirectChannel opens (or returns) the 1:1 DM channel between the default human member and agentSlug. Returns the canonical channel slug (pair-sorted via channel.DirectSlug). Safe to call repeatedly; the DM row is upserted in both the channel store and the in-memory broker table so it shows up in the sidebar and findChannelLocked resolves it.
func (*Broker) EnsurePlannedTask ¶
func (*Broker) EnsureTask ¶
func (*Broker) EntityGraph ¶
func (b *Broker) EntityGraph() *EntityGraph
EntityGraph returns the active cross-entity graph or nil.
func (*Broker) EntitySynthesizer ¶
func (b *Broker) EntitySynthesizer() *EntitySynthesizer
EntitySynthesizer returns the active synthesizer or nil.
func (*Broker) EvaluateConvergence ¶ added in v0.193.0
EvaluateConvergence is the public wrapper around evaluateConvergenceLocked. Acquires b.mu. Used by tests and by the background sweeper goroutine.
func (*Broker) ExternalQueue ¶
ExternalQueue returns messages that need to be sent to external surfaces for the given provider. Each message is returned at most once.
func (*Broker) FindRequest ¶
func (*Broker) FocusModeEnabled ¶
func (*Broker) GetDecisionPacket ¶ added in v0.193.0
func (b *Broker) GetDecisionPacket(taskID string) (DecisionPacket, error)
GetDecisionPacket returns a copy of the in-memory packet for taskID. On a cache miss, attempts to read from disk. If the on-disk file is corrupt, the broker logs a warning and returns ErrDecisionPacketNotFound — callers fall back to regenerate-or-unknown via OnDecisionPacketCorrupt.
func (*Broker) HasBlockingRequest ¶
func (*Broker) HasPendingInterview ¶
func (*Broker) HasRecentlyTaggedAgents ¶
HasRecentlyTaggedAgents returns true if any agent was @mentioned within the given duration and has not yet replied (i.e. is presumably "typing").
func (*Broker) HumanHasPosted ¶ added in v0.131.0
HumanHasPosted reports whether any human-authored message has reached the broker since process start (with bootstrap from the persisted log). Used by /office-members to publish the meta.humanHasPosted flag the frontend reads.
func (*Broker) InFlightTasks ¶
func (b *Broker) InFlightTasks() []teamTask
InFlightTasks returns tasks that have an assigned owner and a non-terminal status (anything except "done", "completed", "canceled", or "cancelled").
func (*Broker) Inbox ¶ added in v0.193.0
func (b *Broker) Inbox(filter InboxFilter) (InboxPayload, error)
Inbox returns the indexed inbox payload for the given filter without any auth filtering. Callers are expected to have already authorized; the REST handler in broker_inbox_handler.go composes auth on top via inboxForActor.
O(1) for counts (reads b.lifecycleIndex bucket lengths). O(N) only over the rows being returned — never iterates b.tasks as a whole.
func (*Broker) InsightsCursor ¶
func (*Broker) IntakeSpec ¶ added in v0.193.0
IntakeSpec returns the persisted Spec for a task created via StartIntake. The (Spec, ok) shape lets callers distinguish "no spec recorded" from "empty spec recorded"; v1 only writes specs that pass validation, so a missing entry is the absence signal.
func (*Broker) IsAgentMemberSlug ¶ added in v0.130.0
IsAgentMemberSlug returns true when `slug` matches a registered office member and is not a human/system slug. Acquires b.mu — DO NOT call from a path that already holds it; use isAgentMemberSlugLocked instead.
func (*Broker) LifecycleIndexSnapshot ¶ added in v0.185.1
func (b *Broker) LifecycleIndexSnapshot() map[LifecycleState][]string
LifecycleIndexSnapshot returns a copy of the indexed lookup map, useful for test assertions. Acquires b.mu.
func (*Broker) ListPolicies ¶
func (b *Broker) ListPolicies() []officePolicy
ListPolicies returns all active policies.
func (*Broker) ListTasks ¶ added in v0.105.2
func (b *Broker) ListTasks(req TaskListRequest) (TaskListResponse, error)
func (*Broker) MarkRoutingTargets ¶
MarkRoutingTargets records implicit routing recipients as active so the UI can show typing/thinking state without persisting a routing banner message.
func (*Broker) MemberProviderBinding ¶
func (b *Broker) MemberProviderBinding(slug string) provider.ProviderBinding
MemberProviderBinding returns the per-agent provider binding for slug, or the zero value if the member does not exist. Safe to call from outside the broker; takes the mutex internally.
func (*Broker) MemberProviderKind ¶
MemberProviderKind returns the per-member runtime kind for the given slug, or "" if the member does not exist or has no explicit binding. Callers should fall back to the global runtime when the return value is empty. Used by the launcher's dispatch switch so each agent can run on its own provider (e.g., one Codex agent + one Claude Code agent in the same team).
func (*Broker) Messages ¶
func (b *Broker) Messages() []channelMessage
Messages returns all channel messages (for the Go TUI channel view).
func (*Broker) MigrateLifecycleStatesOnce ¶ added in v0.185.1
func (b *Broker) MigrateLifecycleStatesOnce()
MigrateLifecycleStatesOnce is the broker startup entry point. Safe to call from any number of init hooks; the underlying migration runs exactly once per Broker pointer. Acquires b.mu internally.
func (*Broker) MutateTask ¶ added in v0.108.5
func (b *Broker) MutateTask(body TaskPostRequest) (TaskResponse, error)
func (*Broker) NotebookSearchAll ¶ added in v0.140.0
func (b *Broker) NotebookSearchAll(_ context.Context, query string) ([]WikiSearchHit, []string, error)
NotebookSearchAll runs a substring search across every notebook shelf (slug=all) and returns the merged hits with a parallel slice of owner slugs. This is the broker-side adapter for ChannelIntentDispatcher and any future caller that needs the same cross-shelf rollup outside the HTTP path.
Lock invariant: this method does NOT acquire b.mu. It calls notebookSearchSlugs (which itself calls b.OfficeMembers and so does briefly take b.mu), then iterates worker.NotebookSearch — both are safe to invoke from a goroutine.
func (*Broker) NotificationCursor ¶
func (*Broker) OfficeMembers ¶
func (b *Broker) OfficeMembers() []officeMember
func (*Broker) OnDecisionRecorded ¶ added in v0.185.1
OnDecisionRecorded is the registered handler for the future decision.recorded manifest event (emitted by Lane C). The handler extends unblockDependentsLocked over the union of DependsOn and BlockedOn so tasks waiting on a PR merge transition into review the instant the blocking decision lands. Acquires b.mu and persists; the auto-notebook publish runs after persistence to mirror the existing cascade pattern.
func (*Broker) OnReviewerConvergence ¶ added in v0.193.0
OnReviewerConvergence is the hook Lane D's convergence rule calls when all assigned reviewers have graded (or the timeout fired). The hook emits decision.required and transitions the task into the decision lifecycle state. Lane D owns the rule; Lane C owns the transition + event.
func (*Broker) PamDispatcher ¶
func (b *Broker) PamDispatcher() *PamDispatcher
PamDispatcher returns the live dispatcher or nil. Kept for the SSE subscribe path and other callers that want to check liveness without triggering lazy construction.
func (*Broker) PlaybookExecutionLog ¶
func (b *Broker) PlaybookExecutionLog() *ExecutionLog
PlaybookExecutionLog returns the active ExecutionLog, or nil before ensurePlaybookExecutionLog has run. Exposed so handler code can share the one instance the worker initialized.
func (*Broker) PlaybookSynthesizer ¶
func (b *Broker) PlaybookSynthesizer() *PlaybookSynthesizer
PlaybookSynthesizer returns the active synthesizer or nil before Start has wired it.
func (*Broker) PostAutomationMessage ¶
func (*Broker) PostInboundSurfaceMessage ¶
func (b *Broker) PostInboundSurfaceMessage(from, channel, content, provider string) (channelMessage, error)
PostInboundSurfaceMessage posts a message from an external surface into the broker channel.
func (*Broker) PostMessage ¶
func (*Broker) PostSystemMessage ¶
PostSystemMessage posts a lightweight system message that shows progress without blocking.
func (*Broker) PublishEntityBriefSynthesized ¶
func (b *Broker) PublishEntityBriefSynthesized(evt EntityBriefSynthesizedEvent)
PublishEntityBriefSynthesized fans out a synthesis event. Implements the entityEventPublisher interface consumed by EntitySynthesizer.
func (*Broker) PublishEntityFactRecorded ¶
func (b *Broker) PublishEntityFactRecorded(evt EntityFactRecordedEvent)
PublishEntityFactRecorded fans out a fact-recorded event.
func (*Broker) PublishNotebookEvent ¶
func (b *Broker) PublishNotebookEvent(evt notebookWriteEvent)
PublishNotebookEvent fans out a commit notification to all SSE subscribers. Implements the notebookEventPublisher interface consumed by WikiWorker.
func (*Broker) PublishPamActionDone ¶
func (b *Broker) PublishPamActionDone(evt PamActionDoneEvent)
PublishPamActionDone implements pamEventPublisher.
func (*Broker) PublishPamActionFailed ¶
func (b *Broker) PublishPamActionFailed(evt PamActionFailedEvent)
PublishPamActionFailed implements pamEventPublisher.
func (*Broker) PublishPamActionStarted ¶
func (b *Broker) PublishPamActionStarted(evt PamActionStartedEvent)
PublishPamActionStarted implements pamEventPublisher.
func (*Broker) PublishPlaybookExecutionRecorded ¶
func (b *Broker) PublishPlaybookExecutionRecorded(evt PlaybookExecutionRecordedEvent)
PublishPlaybookExecutionRecorded fans out the SSE event. Implements the playbookEventPublisher interface consumed by WikiWorker.
func (*Broker) PublishPlaybookSynthesized ¶
func (b *Broker) PublishPlaybookSynthesized(evt PlaybookSynthesizedEvent)
PublishPlaybookSynthesized fans out a synthesis event. Implements the playbookSynthEventPublisher interface consumed by PlaybookSynthesizer.
func (*Broker) PublishWikiEvent ¶
func (b *Broker) PublishWikiEvent(evt wikiWriteEvent)
PublishWikiEvent fans out a commit notification to all SSE subscribers. Implements the wikiEventPublisher interface consumed by WikiWorker.
func (*Broker) PublishWikiSectionsUpdated ¶
func (b *Broker) PublishWikiSectionsUpdated(evt WikiSectionsUpdatedEvent)
PublishWikiSectionsUpdated fans out a section-updated event to every current SSE subscriber. Non-blocking per subscriber: a slow consumer loses events rather than stalling the publisher.
func (*Broker) Purge ¶ added in v0.176.0
func (b *Broker) Purge()
Purge clears all tasks from the broker's in-memory state and flushes the empty list to disk. Tests that inject task fixtures with StartOnPort should call this in t.Cleanup to prevent in-progress fixtures from leaking via background saves or shared notification paths after the test exits.
func (*Broker) QueueSnapshot ¶
func (b *Broker) QueueSnapshot() queueSnapshot
func (*Broker) RecentHumanMessages ¶
RecentHumanMessages returns up to limit messages sent by a human or human-facing external sender ("you", "human", or "nex"). The returned slice contains the most recent messages in chronological order (earliest first).
func (*Broker) ReconcileMemoryWorkflows ¶ added in v0.95.0
func (b *Broker) ReconcileMemoryWorkflows(ctx context.Context) (MemoryWorkflowReconcileReport, error)
func (*Broker) RecordAction ¶
func (*Broker) RecordAgentUsage ¶
func (b *Broker) RecordAgentUsage(slug, model string, usage provider.ClaudeUsage)
RecordAgentUsage records token usage from a provider stream result for a given agent.
model is currently unused; it's kept on the signature so callers can pass the model name without a future per-model attribution change rippling through every headless launcher's call site.
func (*Broker) RecordDecision ¶
func (*Broker) RecordDemandSignal ¶ added in v0.140.0
func (b *Broker) RecordDemandSignal(evt PromotionDemandEvent) error
RecordDemandSignal funnels an event into the demand index. nil-safe: when the index is not wired the call is a silent no-op so the dispatcher can run on a partially-initialised broker without crashing.
func (*Broker) RecordPolicy ¶
RecordPolicy adds a new active policy or reactivates an existing one. Deduplicates by case-insensitive rule text — re-recording the same rule (any casing) returns the original record with Active flipped back on rather than minting a duplicate.
func (*Broker) RecordSignals ¶
func (*Broker) RecordTaskDecision ¶ added in v0.193.0
RecordTaskDecision records a decision attributed to actorSlug. When actorSlug is empty the broker stamps "system" so the audit trail is always populated — the HTTP handler passes the authenticated requestActor.Slug; internal callers (timeout sweeper, tests) can pass "" to opt into the system fallback.
func (*Broker) RecordTaskMemoryCapture ¶ added in v0.95.0
func (*Broker) RecordTaskMemoryLookup ¶ added in v0.95.0
func (*Broker) RecordTaskMemoryPromotion ¶ added in v0.95.0
func (*Broker) RecordTelegramGroup ¶
RecordTelegramGroup saves a group chat ID and title seen by the transport.
TODO(broker-split): RecordTelegramGroup, SeenTelegramGroups, and MarkRoutingTargets below are transport-bridging adjacent rather than pure messaging. They co-locate here for now because the telegram transport flows them through PostInboundSurfaceMessage; a future pass should consider a broker_transport.go.
func (*Broker) RegenerateOrMarkUnknown ¶ added in v0.193.0
RegenerateOrMarkUnknown handles the corrupt-on-disk recovery path. If broker memory still has the packet, it is rewritten to disk (regeneration). If memory is also empty (cold restart after crash), the task is transitioned to LifecycleStateUnknown so the operator surfaces the missing state explicitly. Build-time gate #6 (path 4).
func (*Broker) ReportAgentIssue ¶ added in v0.89.0
func (*Broker) RequestSelfHealing ¶
func (*Broker) ResolveReviewers ¶ added in v0.193.0
ResolveReviewers returns the set of agent slugs whose Watching set intersects with the task's current signals. Order is stable (lexicographic) so callers can assert on the slice in tests.
Tunnel-invited humans are not auto-assigned by this function — they are appended manually via the CLI (`wuphf task review --invite <slug>`) and stored on teamTask.Reviewers alongside the agent slugs.
func (*Broker) RestartBrokerListener ¶ added in v0.114.1
func (b *Broker) RestartBrokerListener() (WebBrokerRestartStatus, error)
func (*Broker) ResumeTask ¶
func (*Broker) ReviewLog ¶
ReviewLog returns the active ReviewLog or nil when the markdown backend is not wired. Exposed for tests and for future admin tooling.
func (*Broker) ReviewerGrades ¶ added in v0.193.0
func (b *Broker) ReviewerGrades(taskID string) []ReviewerGrade
ReviewerGrades returns a copy of the grades recorded against a task. Used by tests; production consumers go through Lane C's Decision Packet read path.
func (*Broker) RevokeHumanInvite ¶ added in v0.127.0
RevokeHumanInvite marks the invite RevokedAt (so no further accepts can admit a new session against it) and returns the IDs of sessions that were still active under this invite at revoke time. It does NOT revoke the sessions themselves — that belongs to the caller (typically transport.Host.RevokeParticipant via the OfficeBoundTransport adapter), so the same per-session teardown path runs whether triggered through the adapter or directly via revokeHumanSession.
Returns an error when the invite is unknown or when persisting the mutation fails. On a save failure the in-memory mutation is rolled back so a restart will not see a half-revoked state — without this rollback an attacker who still has the invite token could re-join after the next restart because the persisted state would still show the invite live. An already-revoked invite is a no-op success that returns any sessions still active under it.
func (*Broker) ScheduleRecheck ¶
func (*Broker) ScheduleRequestFollowUp ¶
func (*Broker) ScheduleTaskFollowUp ¶
func (*Broker) SchedulerJobControl ¶ added in v0.90.0
func (b *Broker) SchedulerJobControl(slug string, defaultInterval time.Duration) (bool, time.Duration)
SchedulerJobControl returns (enabled, effective interval) for the named cron slug. effectiveInterval is IntervalOverride when non-zero, else the caller's defaultInterval. Run-loops call this once per tick (PR 8 Lane G):
enabled, interval := l.broker.SchedulerJobControl("nex-insights", config-default)
if !enabled { time.Sleep(interval); continue }
... do work ...
time.Sleep(interval)
When the slug isn't registered (e.g. broker not yet seeded), returns (true, defaultInterval) so callers fall back to legacy behavior. ok=false is reserved for "slug found but caller passed an invalid default".
func (*Broker) SeedDefaultSkills ¶
func (b *Broker) SeedDefaultSkills(specs []agent.PackSkillSpec)
SeedDefaultSkills pre-populates the broker with the given skill specs. It is idempotent: skills whose name already exists (by slug) are skipped. No production callers remain; tests use it to set up broker skill state.
func (*Broker) SeenTelegramGroups ¶
SeenTelegramGroups returns all group chats the transport has seen.
func (*Broker) ServeWebUI ¶
func (*Broker) SessionModeState ¶
func (*Broker) SetAdminPauseExitFn ¶ added in v0.92.0
SetAdminPauseExitFn overrides the function called after /admin/pause completes its drain. Production wires this to os.Exit(0); tests wire it to a recorder so the test binary doesn't terminate.
func (*Broker) SetAgentLogRoot ¶
SetAgentLogRoot overrides where /agent-logs reads task JSONL from. Used by tests; production uses agent.DefaultTaskLogRoot().
func (*Broker) SetDecisionPacketStore ¶ added in v0.193.0
func (b *Broker) SetDecisionPacketStore(store decisionPacketStore)
SetDecisionPacketStore swaps the persistence backend on a broker. Used by tests to inject a mock store that simulates disk-full / corrupt JSON. Production code never calls this.
func (*Broker) SetDependencies ¶ added in v0.193.0
func (b *Broker) SetDependencies(taskID string, deps Dependencies) error
SetDependencies replaces the Dependencies block on the packet. Sub- issue parents and BlockedOn entries are managed at the task level by existing broker code; this mutator just mirrors them onto the human-facing artifact.
func (*Broker) SetEntityGraph ¶
func (b *Broker) SetEntityGraph(graph *EntityGraph)
SetEntityGraph wires a graph from tests. Must be called after the wiki worker is attached (graph writes ride the worker queue).
func (*Broker) SetEntitySynthesizer ¶
func (b *Broker) SetEntitySynthesizer(factLog *FactLog, synth *EntitySynthesizer)
SetEntitySynthesizer wires a synthesizer from tests. Must be called after ensureEntitySynthesizer would have run (i.e., wikiWorker already attached).
func (*Broker) SetFocusMode ¶
func (*Broker) SetGenerateChannelFn ¶
func (*Broker) SetGenerateMemberFn ¶
func (*Broker) SetHumanAdmitHook ¶ added in v0.128.0
func (b *Broker) SetHumanAdmitHook(hook humanAdmitHookFn)
SetHumanAdmitHook installs (or clears, when hook is nil) the per-broker callback fired after handleHumanInviteAccept admits a session. Called from ShareTransport.Run on adapter startup and shutdown. The pointer is read atomically on the HTTP hot path; passing nil is the documented way for the adapter to detach on shutdown.
func (*Broker) SetInsightsCursor ¶
func (*Broker) SetLauncherDrainer ¶ added in v0.92.0
func (b *Broker) SetLauncherDrainer(d launcherDrainer)
SetLauncherDrainer wires the Launcher's Drain hook so /admin/pause can shut down dispatch subsystems before exiting. nil drains nothing — the process still exits, but in-flight work is cancelled at the OS level rather than gracefully.
func (*Broker) SetMemberProvider ¶
func (b *Broker) SetMemberProvider(slug string, binding provider.ProviderBinding) error
SetMemberProvider attaches or replaces the ProviderBinding on the given office member and persists broker state. Used by the OpenClaw bootstrap migration (moving legacy config.OpenclawBridges onto members) and by the handleOfficeMembers update path. Returns an error if the member doesn't exist; callers should ensure the member exists first.
func (*Broker) SetNotificationCursor ¶
func (*Broker) SetPlaybookExecutionLog ¶
func (b *Broker) SetPlaybookExecutionLog(log *ExecutionLog)
SetPlaybookExecutionLog wires a log from tests.
func (*Broker) SetPlaybookSynthesizer ¶
func (b *Broker) SetPlaybookSynthesizer(synth *PlaybookSynthesizer)
SetPlaybookSynthesizer wires a synthesizer from tests. Must be called after ensurePlaybookExecutionLog would have run.
func (*Broker) SetReviewerResolver ¶
func (b *Broker) SetReviewerResolver(resolver ReviewerResolver)
SetReviewerResolver lets the launcher inject the active blueprint's ResolveReviewer when the markdown backend comes online. Safe to call before ensureReviewLog — the resolver is captured at ReviewLog construction time.
func (*Broker) SetSchedulerJob ¶
func (*Broker) SetSessionMode ¶
func (*Broker) SetShareTransport ¶ added in v0.134.0
func (b *Broker) SetShareTransport(t *ShareTransport)
SetShareTransport registers (or clears, when t is nil) the office-bound share adapter so the in-process share controller can route invite creation through it. Called by RegisterTransports during launcher boot and cleared on shutdown. Stored atomically because the controller reads it on its start path while RegisterTransports may still be installing other adapters.
func (*Broker) SetSkillCounter ¶ added in v0.87.0
func (b *Broker) SetSkillCounter(c *SkillCounter)
SetSkillCounter replaces the broker's counter — used by tests to inject a counter with a specific threshold / cooldown / clock without going through env vars.
func (*Broker) SetSkillScanner ¶ added in v0.86.0
func (b *Broker) SetSkillScanner(s *SkillScanner)
SetSkillScanner replaces the broker's scanner — used by tests to inject a fake provider.
func (*Broker) SetSkillSynthesizer ¶ added in v0.86.0
func (b *Broker) SetSkillSynthesizer(s *SkillSynthesizer)
SetSkillSynthesizer replaces the broker's Stage B synthesizer — used by tests to inject a fake aggregator + provider.
func (*Broker) SetSpec ¶ added in v0.193.0
SetSpec replaces the Spec on the packet for taskID. Lane B's intake driver calls this once per task; subsequent intake retries replace the prior Spec wholesale. Emits the spec.created lifecycle manifest.
func (*Broker) SetTeamLearningLog ¶ added in v0.97.0
func (b *Broker) SetTeamLearningLog(log *LearningLog)
func (*Broker) SetWebShareController ¶ added in v0.112.0
func (b *Broker) SetWebShareController(start func() (WebShareStatus, error), status func() WebShareStatus, stop func() error)
SetWebShareController registers host-only share controls for ServeWebUI. It is intentionally optional so tests and non-web launches do not need to construct the share controller.
func (*Broker) SetWebTunnelController ¶ added in v0.157.0
func (b *Broker) SetWebTunnelController(start func() (WebTunnelStatus, error), status func() WebTunnelStatus, stop func() error)
SetWebTunnelController registers host-only public-tunnel controls. Like SetWebShareController, it is optional so non-web launches can skip it.
func (*Broker) SetWikiCompressor ¶ added in v0.101.0
func (b *Broker) SetWikiCompressor(c *WikiCompressor)
SetWikiCompressor wires a compressor from tests. Must be called after the wiki worker is attached (the compressor depends on the worker's queue).
func (*Broker) SetWorkspaceOrchestrator ¶ added in v0.92.0
func (b *Broker) SetWorkspaceOrchestrator(o workspaceOrchestrator)
SetWorkspaceOrchestrator wires a concrete orchestrator after broker construction. The default (nil) yields 503s on /workspaces/* — which is the right behavior on a broker started without multi-workspace support (e.g., tests, headless one-shots).
Goroutine-safe: writes happen at startup before any HTTP traffic. Reads from handlers go through orchestrator() which takes b.mu.
func (*Broker) ShareTransport ¶ added in v0.134.0
func (b *Broker) ShareTransport() *ShareTransport
ShareTransport returns the registered office-bound share adapter, or nil when no adapter has been registered yet. Used by the in-process share controller to obtain a handle for invite creation; callers must tolerate a nil return and fall back to their legacy path.
func (*Broker) StartIntake ¶ added in v0.193.0
func (b *Broker) StartIntake(ctx context.Context, intent string, provider IntakeProvider) (IntakeOutcome, error)
StartIntake is the public entry point Lane F's CLI calls. It:
- Creates a placeholder task in the LifecycleStateIntake state so the inbox + telemetry can observe the in-flight intake without depending on a synchronous response.
- Calls provider.CallSpecLLM with the hardcoded system prompt and a user-turn-wrapped intent.
- Parses + validates the JSON-fenced Spec block.
- On success, persists Spec into broker memory and transitions the task intake → ready via b.TransitionLifecycle.
- On failure, transitions the placeholder out (cleanup) and surfaces the raw error.
On Spec.AutoAssign != "" the returned outcome includes an AutoAssignCountdown the CLI must drive to either auto-confirm or fall back to manual y/n. The transition to running is OWNED BY THE CLI (Lane F); Lane B only validates the spec and the intake → ready hop.
func (*Broker) StartOnPort ¶
StartOnPort launches the broker on the given port. Use 0 for an OS-assigned port.
func (*Broker) StartReviewConvergenceSweeper ¶ added in v0.193.0
StartReviewConvergenceSweeper launches the 30-second background goroutine that re-evaluates convergence for every review-state task. Returns a stop function the caller invokes on broker shutdown. Tests avoid this entry point and drive convergence by calling EvaluateConvergence / SweepReviewConvergence directly with their own fake clock.
func (*Broker) SubscribeActions ¶
func (*Broker) SubscribeActivity ¶
func (*Broker) SubscribeEntityBriefEvents ¶
func (b *Broker) SubscribeEntityBriefEvents(buffer int) (<-chan EntityBriefSynthesizedEvent, func())
SubscribeEntityBriefEvents returns a channel of brief-synthesized events plus an unsubscribe func.
func (*Broker) SubscribeEntityFactEvents ¶
func (b *Broker) SubscribeEntityFactEvents(buffer int) (<-chan EntityFactRecordedEvent, func())
SubscribeEntityFactEvents returns a channel of fact-recorded events.
func (*Broker) SubscribeMessages ¶
func (*Broker) SubscribeNotebookEvents ¶
SubscribeNotebookEvents returns a channel of notebook commit notifications plus an unsubscribe func. The /events SSE loop uses this to emit "notebook:write" events distinct from "wiki:write".
func (*Broker) SubscribeOfficeChanges ¶
func (*Broker) SubscribePamActionEvents ¶
func (b *Broker) SubscribePamActionEvents(buffer int) (<-chan PamActionStartedEvent, <-chan PamActionDoneEvent, <-chan PamActionFailedEvent, func())
SubscribePamActionEvents returns three channels (started/done/failed) plus a single unsubscribe func. The /events SSE handler uses this.
func (*Broker) SubscribePlaybookExecutionEvents ¶
func (b *Broker) SubscribePlaybookExecutionEvents(buffer int) (<-chan PlaybookExecutionRecordedEvent, func())
SubscribePlaybookExecutionEvents returns a channel of execution-recorded events plus an unsubscribe func. Mirrors SubscribeEntityFactEvents.
func (*Broker) SubscribePlaybookSynthesizedEvents ¶
func (b *Broker) SubscribePlaybookSynthesizedEvents(buffer int) (<-chan PlaybookSynthesizedEvent, func())
SubscribePlaybookSynthesizedEvents returns a channel of playbook-synthesis events plus an unsubscribe func. Mirrors SubscribePlaybookExecutionEvents.
func (*Broker) SubscribeReviewEvents ¶
func (b *Broker) SubscribeReviewEvents(buffer int) (<-chan ReviewStateChangeEvent, func())
SubscribeReviewEvents returns a channel of review state-change events plus an unsubscribe func. Used by the web UI SSE stream.
func (*Broker) SubscribeWikiEvents ¶
SubscribeWikiEvents returns a channel of wiki commit notifications plus an unsubscribe func. The web UI's SSE loop uses this to push "wiki:write" events to the browser.
func (*Broker) SubscribeWikiSectionsUpdated ¶
func (b *Broker) SubscribeWikiSectionsUpdated(buffer int) (<-chan WikiSectionsUpdatedEvent, func())
SubscribeWikiSectionsUpdated returns a channel of section-updated events plus an unsubscribe func. Mirror of SubscribeWikiEvents.
func (*Broker) SurfaceChannels ¶
SurfaceChannels returns all channels that have a surface configured for the given provider.
func (*Broker) SweepReviewConvergence ¶ added in v0.193.0
func (b *Broker) SweepReviewConvergence()
SweepReviewConvergence iterates every task currently in the review bucket of the lifecycle index and re-evaluates convergence. Cheap because the lifecycle index is O(1) and only review-state tasks are scanned (not the full task list). Called by the 30-second background goroutine and exposed for direct test invocation.
func (*Broker) TaskByID ¶ added in v0.193.0
TaskByID returns a copy of the task with the given ID, or nil when no such task exists. Used by the notification-context filter to resolve a message's SourceTaskID into the task's current lifecycle state + reviewer roster.
func (*Broker) TeamLearningLog ¶ added in v0.97.0
func (b *Broker) TeamLearningLog() *LearningLog
func (*Broker) TransitionLifecycle ¶ added in v0.185.1
func (b *Broker) TransitionLifecycle(taskID string, newState LifecycleState, reason string) error
TransitionLifecycle is the public entry point that acquires b.mu before delegating to transitionLifecycleLocked. Lane B / C / D event handlers call this once they have a verified taskID and target state.
func (*Broker) UnackedTasks ¶
UnackedTasks returns in_progress tasks with an owner that have not been acked and were created more than the given duration ago.
func (*Broker) UpdateAgentActivity ¶
func (b *Broker) UpdateAgentActivity(update agentActivitySnapshot)
func (*Broker) UpdateSchedulerJobState ¶
func (*Broker) UpdateSkillExecutionByWorkflowKey ¶
func (*Broker) WikiCompressor ¶ added in v0.101.0
func (b *Broker) WikiCompressor() *WikiCompressor
WikiCompressor returns the active compressor or nil.
func (*Broker) WikiIndex ¶
WikiIndex returns the broker's derived wiki index, or nil when the active memory backend is not markdown. HTTP handlers use this to run search queries against the structured fact store without going through the write worker.
func (*Broker) WikiInitErr ¶ added in v0.135.5
WikiInitErr returns the most recent ensureWikiWorker error, or nil if the worker is up or has not yet been attempted. Used by /health and by 503 responses so the underlying init failure is visible to operators instead of buried in broker stdout.
func (*Broker) WikiReadLog ¶ added in v0.98.0
WikiReadLog returns the broker's ReadLog under b.mu, matching the pattern used by ReviewLog(). Handlers must use this accessor — not b.readLog directly — to avoid a data race with ensureWikiWorker's write under b.mu.
func (*Broker) WikiSectionsCache ¶
func (b *Broker) WikiSectionsCache() *wikiSectionsCache
WikiSectionsCache returns the attached cache, or nil when the markdown backend is not active. Primarily used by tests; production code reads via the HTTP handler.
func (*Broker) WikiWorker ¶
func (b *Broker) WikiWorker() *WikiWorker
WikiWorker returns the broker's attached wiki worker, or nil when the active memory backend is not markdown.
type CapabilityCategory ¶
type CapabilityCategory string
const ( CapabilityCategoryRuntime CapabilityCategory = "runtime" CapabilityCategoryMemory CapabilityCategory = "memory" CapabilityCategoryAction CapabilityCategory = "action" CapabilityCategoryWorkflow CapabilityCategory = "workflow" CapabilityCategoryOffice CapabilityCategory = "office" CapabilityCategoryDirect CapabilityCategory = "direct" )
type CapabilityDescriptor ¶
type CapabilityDescriptor struct {
Key string
Label string
Category CapabilityCategory
Level CapabilityLevel
Lifecycle CapabilityLifecycle
Detail string
NextStep string
}
type CapabilityLevel ¶
type CapabilityLevel string
const ( CapabilityReady CapabilityLevel = "ready" CapabilityWarn CapabilityLevel = "warn" CapabilityInfo CapabilityLevel = "info" )
type CapabilityLifecycle ¶
type CapabilityLifecycle string
const ( CapabilityLifecycleReady CapabilityLifecycle = "ready" CapabilityLifecycleNeedsSetup CapabilityLifecycle = "needs_setup" CapabilityLifecycleDisabled CapabilityLifecycle = "disabled" CapabilityLifecycleDeferred CapabilityLifecycle = "deferred" CapabilityLifecyclePartial CapabilityLifecycle = "partial" CapabilityLifecycleProvisioning CapabilityLifecycle = "provisioning" )
type CapabilityProbeOptions ¶
type CapabilityRegistry ¶
type CapabilityRegistry struct {
Entries []CapabilityDescriptor
}
func BuildCapabilityRegistry ¶
func BuildCapabilityRegistry(runtime RuntimeCapabilities) CapabilityRegistry
func (CapabilityRegistry) Entry ¶
func (r CapabilityRegistry) Entry(key string) (CapabilityDescriptor, bool)
func (CapabilityRegistry) SummaryStatuses ¶
func (r CapabilityRegistry) SummaryStatuses(keys ...string) []CapabilityStatus
type CapabilityStatus ¶
type CapabilityStatus struct {
Name string
Level CapabilityLevel
Lifecycle CapabilityLifecycle
Detail string
NextStep string
}
type CatalogEntry ¶
type CatalogEntry struct {
Path string `json:"path"`
Title string `json:"title"`
AuthorSlug string `json:"author_slug"`
LastEditedTs string `json:"last_edited_ts"`
Group string `json:"group"`
// Read tracking — always present; zero when no reads have been recorded.
LastRead *time.Time `json:"last_read,omitempty"`
HumanReadCount int `json:"human_read_count"`
AgentReadCount int `json:"agent_read_count"`
DaysUnread int `json:"days_unread"`
// Archived is true when the entry is a tombstone (frontmatter archived: true).
// Only present in responses when ?include_archived=true is passed.
Archived bool `json:"archived,omitempty"`
// WordCount is the whitespace-delimited word count of the full file
// content (frontmatter included). Acceptable at v1 scale and matches
// the existing countWords helper used by BuildArticle.
WordCount int `json:"word_count"`
// PruneScore is a derived signal — (words * daysUnread) / readWeight —
// meant to surface verbose AND stale AND under-read articles. Higher
// score = more prunable. Zero when the article has no body.
PruneScore float64 `json:"prune_score,omitempty"`
}
CatalogEntry is a single article in the /wiki/catalog response. The JSON shape matches web/src/api/wiki.ts WikiCatalogEntry.
type ChannelIntentCounters ¶ added in v0.140.0
type ChannelIntentCounters struct {
Enqueued int64
Classified int64
Skipped int64
Searched int64
HitsFound int64
DemandFired int64
SearchFailed int64
RecordFailed int64
QueueSat int64
RepliesSent int64
}
ChannelIntentCounters is a thread-safe snapshot of the dispatcher's observability counters.
type ChannelIntentDispatcher ¶ added in v0.140.0
type ChannelIntentDispatcher struct {
// contains filtered or unexported fields
}
ChannelIntentDispatcher ingests channel messages, classifies them, and fires the demand-signal recording for cross-agent notebook hits. Lifecycle mirrors AutoNotebookWriter: New → Start(ctx) → Stop(timeout). Safe for concurrent Handle() callers.
func NewChannelIntentDispatcher ¶ added in v0.140.0
func NewChannelIntentDispatcher(client channelIntentBrokerClient) *ChannelIntentDispatcher
NewChannelIntentDispatcher constructs an idle dispatcher. nil client is safe but disables the search + record path (counters still advance up to classification).
func (*ChannelIntentDispatcher) Counters ¶ added in v0.140.0
func (d *ChannelIntentDispatcher) Counters() ChannelIntentCounters
Counters returns a thread-safe snapshot.
func (*ChannelIntentDispatcher) Handle ¶ added in v0.140.0
func (d *ChannelIntentDispatcher) Handle(msg channelMessage)
Handle is the broker-side ingress. Non-blocking enqueue. Drops with a counter increment on saturation. The classifier is intentionally NOT called here — it runs inside the dispatcher goroutine so the hot path stays a single channel send.
The hook fires for ALL senders (human or agent). The classifier's question-form filter is what restricts demand recording to genuine context-asks; sender role is not a useful pre-filter because agents can also ask each other for context.
func (*ChannelIntentDispatcher) Start ¶ added in v0.140.0
func (d *ChannelIntentDispatcher) Start(ctx context.Context)
Start launches the drain goroutine. Idempotent.
func (*ChannelIntentDispatcher) Stop ¶ added in v0.140.0
func (d *ChannelIntentDispatcher) Stop(timeout time.Duration)
Stop signals the drain goroutine and waits up to timeout. Idempotent. Mirrors AutoNotebookWriter.Stop — closes stopCh, never the queue, and wakes any test goroutine parked on progressCond.
func (*ChannelIntentDispatcher) WaitForCondition ¶ added in v0.140.0
func (d *ChannelIntentDispatcher) WaitForCondition(ctx context.Context, predicate func() bool) error
WaitForCondition blocks until predicate returns true, ctx cancels, or the dispatcher stops. Test-only.
type CircuitBreaker ¶
type CircuitBreaker struct {
// contains filtered or unexported fields
}
CircuitBreaker trips open after N consecutive failures; stays open for pauseDur, then half-open (one allowed trial). RecordSuccess closes it and zeroes the counter.
func NewCircuitBreaker ¶
func NewCircuitBreaker(threshold int, pauseDur time.Duration) *CircuitBreaker
func (*CircuitBreaker) Open ¶
func (c *CircuitBreaker) Open() bool
Open reports whether the breaker is currently blocking attempts.
func (*CircuitBreaker) RecordFailure ¶
func (c *CircuitBreaker) RecordFailure()
func (*CircuitBreaker) RecordSuccess ¶
func (c *CircuitBreaker) RecordSuccess()
RecordSuccess resets the breaker fully. Per pre-impl decision 5: only call this after BOTH a successful Dial AND a successful hello-ok response.
type CoalescedEdge ¶
type CoalescedEdge struct {
FromKind EntityKind `json:"from_kind"`
FromSlug string `json:"from_slug"`
ToKind EntityKind `json:"to_kind"`
ToSlug string `json:"to_slug"`
FirstSeenFactID string `json:"first_seen_fact_id"`
LastSeenTS time.Time `json:"last_seen_ts"`
OccurrenceCount int `json:"occurrence_count"`
}
CoalescedEdge is the reader's post-dedup view of an edge. Preserves the first-seen fact id and the most recent timestamp so UI panels can sort by recency without re-reading the full log.
type Comment ¶
type Comment struct {
ID string `json:"id"`
PromotionID string `json:"promotion_id"`
AuthorSlug string `json:"author_slug"`
Body string `json:"body"`
CreatedAt time.Time `json:"created_at"`
}
Comment is a single reviewer/author note on a promotion thread.
type CompressJob ¶ added in v0.101.0
CompressJob is one pending compression request for a specific article.
type CompressorConfig ¶ added in v0.101.0
type CompressorConfig struct {
Timeout time.Duration
// LLMCall is the pluggable shell-out used by tests. Production code
// leaves this nil and the worker falls back to defaultLLMCall.
LLMCall func(ctx context.Context, systemPrompt, userPrompt string) (string, error)
}
CompressorConfig is the tunable knobs. Defaults match the constants above.
type ContextCitation ¶ added in v0.95.0
type ContextCitation struct {
Backend string `json:"backend,omitempty"`
Source string `json:"source,omitempty"`
SourceID string `json:"source_id,omitempty"`
Path string `json:"path,omitempty"`
PageID string `json:"page_id,omitempty"`
ChunkID string `json:"chunk_id,omitempty"`
SourceURL string `json:"source_url,omitempty"`
LineStart int `json:"line_start,omitempty"`
LineEnd int `json:"line_end,omitempty"`
Title string `json:"title,omitempty"`
Snippet string `json:"snippet,omitempty"`
Score *float64 `json:"score,omitempty"`
Stale *bool `json:"stale,omitempty"`
RetrievedAt string `json:"retrieved_at,omitempty"`
}
ContextCitation is the backend-neutral citation shape used by the context harness. It is intentionally broad enough for markdown, Nex, and GBrain sources without making any one backend canonical.
func (*ContextCitation) UnmarshalJSON ¶ added in v0.95.0
func (c *ContextCitation) UnmarshalJSON(data []byte) error
type CreateRequest ¶ added in v0.92.0
type CreateRequest struct {
Name string `json:"name"`
Blueprint string `json:"blueprint,omitempty"`
InheritFrom string `json:"inherit_from,omitempty"`
CompanyName string `json:"company_name,omitempty"`
FromScratch bool `json:"from_scratch,omitempty"`
}
CreateRequest is the POST body for /workspaces/create. Fields beyond Name are forwarded verbatim to the orchestrator, which applies the inheritance table (see design's Lighter Onboarding section).
type DLQ ¶
type DLQ struct {
// contains filtered or unexported fields
}
DLQ owns all read/write access to wiki/.dlq/. It is safe for concurrent use.
mu is an RWMutex: writers (Enqueue, RecordAttempt, MarkResolved) take the write lock; read-only callers (Inspect, ReadyForReplay) take the read lock. ReadyForReplay is semantically read-only — it does not modify any file — so it can safely hold RLock alongside concurrent Inspect calls from an operator dashboard.
func NewDLQ ¶
NewDLQ constructs a DLQ rooted at the given wiki root. The .dlq/ sub- directory is created lazily on first write.
func (*DLQ) CorruptLineCounts ¶
CorruptLineCounts returns the running tally of JSONL rows the DLQ has skipped because they failed to decode or were missing required fields. Operators poll this to distinguish "queue is empty" from "queue file is corrupted and silently losing entries". Counts are process-local and reset on restart — persistent counters are a Slice 3 concern.
func (*DLQ) Enqueue ¶
Enqueue appends a new DLQEntry to extractions.jsonl. The entry's max_retries is coerced to DLQValidationMaxRetries when ErrorCategory is "validation". Callers should set FirstFailedAt and NextRetryNotBefore; if zero they are defaulted to now and now+base_backoff respectively.
func (*DLQ) Inspect ¶
Inspect returns a Snapshot of the current DLQ state. Read-only: does not append, tombstone, or mutate any file. Safe to call from an HTTP handler while the worker continues to enqueue and retry.
Uses the read lock so multiple operator dashboards polling GET /wiki/dlq do not serialise on each other.
func (*DLQ) MarkResolved ¶
MarkResolved appends a resolved_at tombstone. ReadyForReplay will skip this artifact_sha from now on.
func (*DLQ) ReadyForReplay ¶
ReadyForReplay scans extractions.jsonl and returns every entry whose next_retry_not_before is ≤ now, has retry_count < max_retries, and has no tombstone (resolved_at or promoted_at).
Only the latest state per artifact_sha is consulted (last-write-wins in append order), so successive RecordAttempt calls correctly reflect the updated backoff window rather than an old eligible row.
Read-only: holds the read lock so concurrent Inspect calls do not block.
func (*DLQ) RecordAttempt ¶
func (d *DLQ) RecordAttempt(ctx context.Context, artifactSHA string, attemptErr error, cat string) error
RecordAttempt bumps retry_count, updates last_attempted_at and next_retry_not_before, and appends the updated state. If the bump crosses max_retries, the entry is promoted to permanent-failures. cat is the error category of the new attempt.
type DLQEntry ¶
type DLQEntry struct {
ArtifactSHA string `json:"artifact_sha"`
ArtifactPath string `json:"artifact_path"`
Kind string `json:"kind"`
LastError string `json:"last_error"`
ErrorCategory DLQErrorCategory `json:"error_category"`
RetryCount int `json:"retry_count"`
MaxRetries int `json:"max_retries"`
FirstFailedAt time.Time `json:"first_failed_at"`
LastAttemptedAt time.Time `json:"last_attempted_at"`
NextRetryNotBefore time.Time `json:"next_retry_not_before"`
// FactLogAppend is populated only when ErrorCategory is
// DLQCategoryFactLogPersist. Carries the state needed for the
// append-specific replay path; unused/nil for extraction-class entries.
FactLogAppend *FactLogAppendPayload `json:"fact_log_append,omitempty"`
}
DLQEntry is one row in wiki/.dlq/extractions.jsonl. All time fields are RFC3339 UTC strings on the wire.
type DLQErrorCategory ¶
type DLQErrorCategory string
DLQErrorCategory describes the nature of the extraction failure. "validation" errors are never retried beyond the first attempt.
const ( DLQCategoryParse DLQErrorCategory = "parse" DLQCategoryProviderTimeout DLQErrorCategory = "provider_timeout" DLQCategoryValidation DLQErrorCategory = "validation" // DLQCategoryFactLogPersist is the category for a fact-log JSONL append // that failed AFTER the extraction LLM call succeeded and SubmitFacts // applied the in-memory index mutation. These are local I/O / git / // queue-saturation failures — NOT provider timeouts — so they carry // their own category (different metrics bucket, same backoff curve) // and a dedicated replay path that re-tries the append without // re-running the LLM (which would skip reinforced rows and leave the // JSONL permanently missing). See §7.4 substrate guarantee. DLQCategoryFactLogPersist DLQErrorCategory = "fact_log_persist" )
type DeadEnd ¶ added in v0.193.0
DeadEnd is one tried-and-discarded approach. The session report surfaces these explicitly so future runs (and the human reading the packet) can see what was attempted, not just what shipped — Karpathy autoresearch pattern. Reason is the agent's recorded rationale for abandoning the path.
type DecisionPacket ¶ added in v0.193.0
type DecisionPacket struct {
TaskID string `json:"taskId"`
LifecycleState LifecycleState `json:"lifecycleState"`
Spec Spec `json:"spec"`
SessionReport SessionReport `json:"sessionReport"`
ChangedFiles []DiffSummary `json:"changedFiles"`
ReviewerGrades []ReviewerGrade `json:"reviewerGrades"`
Dependencies Dependencies `json:"dependencies"`
UpdatedAt time.Time `json:"updatedAt"`
}
DecisionPacket is the full task-level artifact. One per task. Stored in broker memory keyed by task ID, persisted as JSON at ~/.wuphf/tasks/<id>/decision_packet.json (atomic-rename pattern), and surfaced over HTTP at /tasks/:id (Lane E + Lane G).
The on-disk JSON shape is 1:1 with the wire shape — both produced by json.Marshal of the same struct value. PascalCase keys match Lane E's already-shipped read-side stub and the design doc on-disk specification.
LifecycleState is `LifecycleState` (the typed Lane A constant) rather than `string` (Lane E's stub) so direct callers cannot stamp a non-canonical state into the packet. The stub typed it as `string` only because Lane E shipped before Lane A's typed constant was visible on the integration branch; integration converts the field type without changing the wire format (the underlying JSON value is the same string).
type DemandCandidate ¶ added in v0.138.0
type DemandCandidate struct {
EntryPath string `json:"entry_path"`
OwnerSlug string `json:"owner_slug"`
Score float64 `json:"score"`
TopSignal PromotionDemandSignal `json:"top_signal"`
}
DemandCandidate is the aggregated view of one entry's current rolling score returned by TopCandidates.
type Dependencies ¶ added in v0.193.0
type Dependencies struct {
// ParentTaskID is empty for root tasks. Sub-issues set this to
// their parent's task ID.
ParentTaskID string `json:"parentTaskId"`
// BlockedOn is a flat list of task IDs or PR identifiers blocking
// this task's progress. Mirrors teamTask.BlockedOn (added in
// Lane A).
BlockedOn []string `json:"blockedOn"`
}
Dependencies captures the v1 parent/child + flat-blocked-on shape. v1 has no DAG-wide cycle detection (parent/child is acyclic by tree definition; flat blockers cannot self-reference). v1.1 may extend.
type DiffSummary ¶ added in v0.193.0
type DiffSummary struct {
Path string `json:"path"`
Status string `json:"status,omitempty"`
Additions int `json:"additions"`
Deletions int `json:"deletions"`
RenamedFrom string `json:"renamedFrom,omitempty"`
}
DiffSummary is one row in ChangedFiles. Lane B/C and the owner agent hand the broker the populated structure; the broker does not run git itself in v1 to compute the diff.
Lane E's stub omits Status / RenamedFrom (Lane E only needs path + per-file delta counts for the inbox row hint). Lane C's canonical shape includes Status + RenamedFrom because they ship in the design doc's structure and because the Decision Packet view (Lane G) renders renames distinctly. The extra fields are JSON-omitempty so existing Lane E payloads continue to decode cleanly.
type Direction ¶
type Direction string
Direction describes which edge endpoints to return from Query.
type DiscoveredSection ¶
type DiscoveredSection struct {
Slug string `json:"slug"`
Title string `json:"title"`
ArticlePaths []string `json:"article_paths"`
ArticleCount int `json:"article_count"`
FirstSeenTs time.Time `json:"first_seen_ts"`
LastUpdateTs time.Time `json:"last_update_ts"`
FromSchema bool `json:"from_schema"`
}
DiscoveredSection is one top-level dir surfaced in the sidebar IA. A section is either declared by the active blueprint (FromSchema=true) or emerged organically from article writes (FromSchema=false). Both shapes ship to the UI in the same list so the sidebar can style them differently.
func DiscoverSections ¶
func DiscoverSections(ctx context.Context, repo *Repo, blueprint *operations.Blueprint) ([]DiscoveredSection, error)
DiscoverSections walks the repo's team/ tree and merges the observed first-segment groups with the blueprint's declared wiki_schema dirs. Blueprint sections are preserved even when empty; discovered-only sections are appended after them.
The returned slice is stable-sorted inside each partition: blueprint sections follow blueprint order (as declared in wiki_schema.dirs), discovered sections are alphabetical by slug. Consumers that want a different ordering can re-sort — this is the canonical order.
type EntityBriefSynthesizedEvent ¶
type EntityBriefSynthesizedEvent struct {
Kind EntityKind `json:"kind"`
Slug EntitySlug `json:"slug"`
CommitSHA string `json:"commit_sha"`
FactCount int `json:"fact_count"`
SynthesizedTS string `json:"synthesized_ts"`
}
EntityBriefSynthesizedEvent is the SSE payload broadcast after every successful synthesis commit.
type EntityEdge ¶
type EntityEdge struct {
FromKind EntityKind `json:"from_kind"`
FromSlug string `json:"from_slug"`
ToKind EntityKind `json:"to_kind"`
ToSlug string `json:"to_slug"`
FirstSeenFactID string `json:"first_seen_fact_id"`
LastSeenTS time.Time `json:"last_seen_ts"`
}
EntityEdge is one row of the append-only adjacency log.
type EntityFactRecordedEvent ¶
type EntityFactRecordedEvent struct {
Kind EntityKind `json:"kind"`
Slug string `json:"slug"`
FactID string `json:"fact_id"`
RecordedBy string `json:"recorded_by"`
FactCount int `json:"fact_count"`
ThresholdCrossed bool `json:"threshold_crossed"`
Timestamp string `json:"timestamp"`
}
EntityFactRecordedEvent is the SSE payload broadcast when a fact lands.
type EntityGraph ¶
type EntityGraph struct {
// contains filtered or unexported fields
}
EntityGraph owns all read/write access to the graph log.
func NewEntityGraph ¶
func NewEntityGraph(worker *WikiWorker) *EntityGraph
NewEntityGraph constructs a graph backed by the supplied worker.
func (*EntityGraph) Coalesce ¶
func (g *EntityGraph) Coalesce() ([]CoalescedEdge, error)
Coalesce reads the full log, deduplicates by (from, to), and returns one row per distinct edge. Newest-first by LastSeenTS.
func (*EntityGraph) Query ¶
func (g *EntityGraph) Query(kind EntityKind, slug string, direction Direction) ([]CoalescedEdge, error)
Query filters the coalesced graph to edges touching (kind, slug) in the requested direction. Passing direction="" treats it as DirectionOut.
func (*EntityGraph) RecordFactRefs ¶
RecordFactRefs extracts entity references from the given fact and, when any exist, appends new edges to the graph log via the wiki worker. Edges that already exist in the log are still appended (the reader coalesces — keeps the writer path cheap and idempotent on retries).
type EntityKind ¶
type EntityKind string
EntityKind is the narrow set of wiki subtrees we treat as "entities" for brief synthesis. Matches the existing IA — no new directories.
const ( EntityKindPeople EntityKind = "people" EntityKindCompanies EntityKind = "companies" EntityKindCustomers EntityKind = "customers" )
func ValidEntityKinds ¶
func ValidEntityKinds() []EntityKind
ValidEntityKinds lists every kind the fact log accepts. Any other value is rejected at the API boundary — there is no fallback to a generic "entity" bucket.
type EntityRef ¶
type EntityRef struct {
Kind EntityKind `json:"kind"`
Slug string `json:"slug"`
}
EntityRef is one parsed reference to another entity inside a fact.
func ExtractRefs ¶
func ExtractRefs(sourceKind EntityKind, sourceSlug, statement string, known func(slug string) (EntityKind, bool)) []EntityRef
ExtractRefs parses `statement` for references to entities *other than* the source entity (self-references are elided). `known` supplies a closure the caller can plug in to resolve bare `[[slug]]` wikilinks to a single kind; return ("", false) to skip (ambiguous or unknown). Passing nil disables bare-slug lookup entirely — useful in tests and when the graph is being rebuilt without disk access.
type EntitySlug ¶
type EntitySlug = string
EntitySlug is a typed alias. Helps readers of the SSE JSON schema; string at the wire level.
type EntitySynthesizer ¶
type EntitySynthesizer struct {
// contains filtered or unexported fields
}
EntitySynthesizer is the broker-level synthesis worker.
func NewEntitySynthesizer ¶
func NewEntitySynthesizer(worker *WikiWorker, factLog *FactLog, publisher entityEventPublisher, cfg SynthesizerConfig) *EntitySynthesizer
NewEntitySynthesizer wires a synthesizer against the given worker + fact log. Config may be the zero value; defaults are filled in here.
func (*EntitySynthesizer) EnqueueSynthesis ¶
func (s *EntitySynthesizer) EnqueueSynthesis(kind EntityKind, slug, requestBy string) (uint64, error)
EnqueueSynthesis adds a synthesis job if none is already in-flight or queued for the same entity. Returns the assigned job ID (or 0 when the request was coalesced into an existing queue entry).
func (*EntitySynthesizer) IsInflightOrQueued ¶ added in v0.99.0
func (s *EntitySynthesizer) IsInflightOrQueued(kind EntityKind, slug string) bool
IsInflightOrQueued reports whether a synthesis job is currently running or pending for the given entity. Used by BuildArticle to set SynthesisQueued.
func (*EntitySynthesizer) Mode ¶ added in v0.99.0
func (s *EntitySynthesizer) Mode() SynthesisMode
Mode returns the synthesis mode from the config.
func (*EntitySynthesizer) Start ¶
func (s *EntitySynthesizer) Start(ctx context.Context)
Start launches the synthesis loop. Returns immediately. Stop via the ctx or via Stop().
func (*EntitySynthesizer) Stop ¶
func (s *EntitySynthesizer) Stop()
Stop signals the worker to exit. Pending jobs in the buffered channel are discarded — caller is responsible for only calling this at shutdown.
func (*EntitySynthesizer) Threshold ¶
func (s *EntitySynthesizer) Threshold() int
Threshold returns the current synthesis threshold.
type Execution ¶
type Execution struct {
ID string `json:"id"`
Slug string `json:"slug"`
Outcome PlaybookOutcome `json:"outcome"`
Summary string `json:"summary"`
Notes string `json:"notes,omitempty"`
RecordedBy string `json:"recorded_by"`
CreatedAt time.Time `json:"created_at"`
}
Execution is one recorded run of a compiled playbook. Fields match the on-disk JSONL — adding a field is a forward-compatible change, removing one is a break and needs an entry-format version bump.
type ExecutionLog ¶
type ExecutionLog struct {
// contains filtered or unexported fields
}
ExecutionLog is the append-only log rooted in a wiki repo. Safe to share across goroutines.
func NewExecutionLog ¶
func NewExecutionLog(worker *WikiWorker) *ExecutionLog
NewExecutionLog constructs an ExecutionLog backed by the supplied worker.
func (*ExecutionLog) Append ¶
func (l *ExecutionLog) Append(ctx context.Context, slug string, outcome PlaybookOutcome, summary, notes, recordedBy string) (Execution, error)
Append validates the inputs, serializes one Execution, and enqueues the append through the wiki worker. Returns the persisted Execution.
type Extractor ¶
type Extractor struct {
// contains filtered or unexported fields
}
Extractor is the extraction-loop orchestrator. It is safe for concurrent use; the underlying components (provider, worker, resolver, DLQ, index) have their own concurrency controls.
func NewExtractor ¶
func NewExtractor(provider QueryProvider, worker *WikiWorker, dlq *DLQ, index *WikiIndex) *Extractor
NewExtractor constructs an Extractor. All arguments except `now` are required; pass nil for `now` to get time.Now.UTC by default.
func (*Extractor) ExtractFromArtifact ¶
ExtractFromArtifact reads a committed artifact from disk, runs the extraction prompt, resolves each entity through the gate, and submits the resulting facts + ghost entities back to the WikiWorker.
The artifactPath must match wiki/artifacts/{kind}/{sha}.md. Failures at any step route to the DLQ with an appropriate category so the replay loop can pick them up later. Returns a non-nil error only for callers that want to surface it (e.g. the ReplayDLQ loop); the commit pipeline logs and discards.
func (*Extractor) ReplayDLQ ¶
ReplayDLQ walks the DLQ replay queue and retries each ready entry. Routing depends on the ErrorCategory:
- DLQCategoryFactLogPersist: re-attempt the JSONL append only (no extraction rerun). Re-running extraction would skip the fact as reinforcement and the append would never recover — see §7.4.
- everything else: re-run ExtractFromArtifact on the artifact.
Success → MarkResolved tombstone; failure → RecordAttempt so the entry either backs off or promotes to permanent-failures.jsonl.
Returns (processed, retired, err) where processed is the number of entries attempted, retired is the count that moved out of the active queue (resolved + permanent promotions since this call started).
type ExtractorHook ¶
type ExtractorHook interface {
ExtractFromArtifact(ctx context.Context, artifactPath string) error
}
ExtractorHook is the narrow interface the worker uses to trigger the extraction loop after a successful artifact commit. Kept as an interface so wiki_worker.go does not take a hard dependency on wiki_extractor.go — tests can pass a fake hook that asserts ExtractFromArtifact was called.
type Fact ¶
type Fact struct {
ID string `json:"id"`
Kind EntityKind `json:"kind"`
Slug string `json:"slug"`
Text string `json:"text"`
SourcePath string `json:"source_path,omitempty"`
RecordedBy string `json:"recorded_by"`
CreatedAt time.Time `json:"created_at"`
}
Fact is one atomic observation recorded by an agent.
type FactCluster ¶
type FactCluster struct {
Predicate string `json:"predicate"`
Object string `json:"object"`
Entities []string `json:"entities"`
Count int `json:"count"`
}
FactCluster is one reinforced (predicate, object) pair observed across multiple distinct entities. Emitted by clusterReinforcedFacts as input to the v2 playbook synthesis prompt (§Thread C, WIKI-SLICE2-PLAN.md).
Entities are the distinct entity slugs whose fact logs contain a reinforced fact matching (Predicate, Object). Count is len(Entities), surfaced as a separate field so the prompt template can print it without a template function.
Count reflects distinct entities where the fact was confirmed via re-extraction (ReinforcedAt != nil), not all entities where the fact was observed. Facts seen only once never enter a cluster.
type FactLog ¶
type FactLog struct {
// contains filtered or unexported fields
}
FactLog is the append-only log rooted in a wiki repo. It is safe to share across goroutines — every operation takes its own lock and either streams directly from disk (reads) or enqueues through the WikiWorker (writes).
func NewFactLog ¶
func NewFactLog(worker *WikiWorker) *FactLog
NewFactLog constructs a FactLog backed by the supplied worker. The worker must be running before Append is called.
func (*FactLog) Append ¶
func (l *FactLog) Append(ctx context.Context, kind EntityKind, slug, text, sourcePath, recordedBy string) (Fact, error)
Append validates the inputs, serializes one Fact, and enqueues the append through the wiki worker. Returns the persisted Fact on success.
func (*FactLog) CountSinceSHA ¶
func (l *FactLog) CountSinceSHA(ctx context.Context, kind EntityKind, slug, sha string) (int, error)
CountSinceSHA returns the number of facts recorded after the given commit SHA (exclusive). When sha is empty, every fact counts. When sha does not appear in the repo history (or the file predates the sha), every fact counts — same semantics as "no prior synthesis."
type FactLogAppendPayload ¶
type FactLogAppendPayload struct {
Kind string `json:"kind"`
Slug string `json:"slug"`
ArtifactSHA string `json:"artifact_sha"`
// JSONLLines is the raw multi-line content the append was attempting to
// write. Preserving the bytes verbatim keeps the content hash stable
// across retries.
JSONLLines string `json:"jsonl_lines"`
}
FactLogAppendPayload carries the state needed to retry a failed fact-log JSONL append without re-running extraction. Populated on entries whose ErrorCategory is DLQCategoryFactLogPersist.
The payload captures the exact JSONL lines the original append tried to write (one JSON-encoded TypedFact per line, newline-terminated) along with the target kind/slug and the originating artifact SHA — enough for the replay handler to reconstruct the EnqueueFactLogAppend call and deterministically dedupe by fact_id against the current on-disk file.
type FactStore ¶
type FactStore interface {
UpsertFact(ctx context.Context, f TypedFact) error
UpsertEntity(ctx context.Context, e IndexEntity) error
UpsertEdge(ctx context.Context, e IndexEdge) error
UpsertRedirect(ctx context.Context, r Redirect) error
GetFact(ctx context.Context, id string) (TypedFact, bool, error)
ListFactsForEntity(ctx context.Context, slug string) ([]TypedFact, error)
ListEdgesForEntity(ctx context.Context, slug string) ([]IndexEdge, error)
// ListAllFacts returns every indexed fact, ordered deterministically by
// ID. Used by cross-entity consumers (e.g. playbook cluster detection in
// Slice 2 Thread C) that need to scan reinforced (predicate, object)
// pairs across all entities. Read-only; never mutates the fact log.
ListAllFacts(ctx context.Context) ([]TypedFact, error)
// CountFacts returns the total number of indexed facts. Cheap in both
// backends (SELECT COUNT(*) on SQLite, len(map) on in-memory). Callers
// that do full scans via ListAllFacts use this as a cheap pre-check so
// they can log a warning when the corpus outgrows the bounded-scan
// assumption (no index on predicate/object yet — tracked by Slice 3).
CountFacts(ctx context.Context) (int, error)
ResolveRedirect(ctx context.Context, slug string) (string, bool, error)
// ListFactsByPredicateObject returns every fact whose triplet matches
// (predicate, object) exactly. Used by the typed-predicate graph walk
// for multi_hop queries (Slice 2 Thread A).
ListFactsByPredicateObject(ctx context.Context, predicate, object string) ([]TypedFact, error)
// ListFactsByTriplet returns every fact whose triplet matches the given
// subject + predicate and whose triplet.object starts with objectPrefix
// (case-sensitive). An empty objectPrefix matches any object. Used by the
// typed-predicate graph walk and counterfactual rewrite (Slice 2 Thread A).
ListFactsByTriplet(ctx context.Context, subject, predicate, objectPrefix string) ([]TypedFact, error)
// ListReinforcedFactsByPredicate returns every fact with a non-nil
// ReinforcedAt whose triplet predicate matches `predicate` exactly. Pass
// "" to return all reinforced facts (no predicate filter). Result is
// sorted by ID ascending so consumers see deterministic ordering.
//
// Backed by idx_facts_triplet_pred_obj on the SQLite store, so the cost
// is O(matching facts), not O(total facts). The in-memory store filters
// linearly — equivalent for small corpora.
ListReinforcedFactsByPredicate(ctx context.Context, predicate string) ([]TypedFact, error)
// ListAllFactsPaged returns up to `limit` facts whose ID is strictly
// greater than `afterID`, sorted by ID ascending. Pass afterID="" to
// start from the beginning. Pass limit<=0 for an implementation default
// (suggested: 1000). The caller drives pagination by feeding the last
// returned ID into afterID on the next call.
//
// Use this instead of ListAllFacts when the consumer can process facts
// incrementally — it bounds memory regardless of corpus size.
ListAllFactsPaged(ctx context.Context, afterID string, limit int) ([]TypedFact, error)
// IterateEntities invokes fn for every entity row in the store. Iteration
// order is implementation-defined but stable within a single call.
// Callers use this to implement signal lookups (by email, name, domain)
// that need to scan every entity when a direct index is not available.
//
// Contract: fn must be safe to call repeatedly; IterateEntities does not
// buffer the full result set. An error returned from fn short-circuits
// the iteration and is returned to the caller verbatim.
IterateEntities(ctx context.Context, fn func(IndexEntity) error) error
// CanonicalHashFacts returns a stable hash over all indexed facts for
// the §7.4 rebuild contract. ReinforcedAt is EXCLUDED from the hash
// input so two extraction runs on the same artifact (the second one
// purely bumps reinforced_at) produce identical hashes. Use
// CanonicalHashAll for end-to-end drift detection where ReinforcedAt
// participates.
CanonicalHashFacts(ctx context.Context) (string, error)
// CanonicalHashAll is the composite §7.4 hash over facts + entities +
// edges + redirects. ReinforcedAt IS included here so the hash advances
// whenever any layer (including reinforcement) changes.
CanonicalHashAll(ctx context.Context) (string, error)
Close() error
}
FactStore is the narrow interface the index uses for structured storage. modernc.org/sqlite slots in here without wiki_index.go changing.
Implementations MUST be goroutine-safe for concurrent reads. A single writer (the broker's ReconcilePath loop) calls Upsert; readers call Get / List / Search concurrently.
type FeedbackItem ¶ added in v0.193.0
type FeedbackItem struct {
AppendedAt time.Time `json:"appendedAt,omitempty"`
Author string `json:"author,omitempty"`
Body string `json:"body,omitempty"`
}
FeedbackItem is one entry in the appendable feedback log Lane D writes when a reviewer asks for changes. v1 intake never produces these; the shape is here so Lane C can deserialize a Decision Packet that already carries feedback from a previous changes_requested cycle.
type GraphAllNode ¶
type GraphAllNode struct {
Kind EntityKind `json:"kind"`
Slug string `json:"slug"`
Title string `json:"title"`
}
GraphAllNode is one entity rendered as a node by GET /entity/graph/all.
type GuardScanResult ¶ added in v0.86.0
type GuardScanResult struct {
Verdict GuardVerdict
TrustLevel GuardTrustLevel
Findings []string
Summary string
}
GuardScanResult bundles the verdict, findings, trust level, and a short human-readable summary suitable for stamping into frontmatter or surfacing in the UI.
func ScanSkill ¶ added in v0.86.0
func ScanSkill(fm SkillFrontmatter, body string, trust GuardTrustLevel) GuardScanResult
ScanSkill applies all guard rules to fm + body and returns a verdict and findings list. The caller decides allow / reject based on the trust ladder.
Findings are accumulated in severity order: frontmatter integrity issues first (always dangerous), then body dangerous patterns, then caution patterns. Verdict is the highest severity seen.
type GuardTrustLevel ¶ added in v0.86.0
type GuardTrustLevel string
GuardTrustLevel marks how much we trust the source of a skill body. The caller maps this onto the scan verdict to decide allow / reject.
const ( // TrustBuiltin is reserved for skills shipped with the binary. TrustBuiltin GuardTrustLevel = "builtin" // TrustTrusted is for explicitly vetted skills (workspace admin authored). TrustTrusted GuardTrustLevel = "trusted" // TrustCommunity is the Stage A wiki source — humans wrote the article, // the LLM merely classified it. TrustCommunity GuardTrustLevel = "community" // TrustAgentCreated is the Stage B+ LLM-synth path. Treated stricter than // Hermes' policy because WUPHF agents can synthesize at scale. TrustAgentCreated GuardTrustLevel = "agent_created" )
type GuardVerdict ¶ added in v0.86.0
type GuardVerdict string
GuardVerdict captures the highest-severity finding from a scan.
const ( // VerdictSafe means no findings were detected. VerdictSafe GuardVerdict = "safe" // VerdictCaution means at least one cautionary pattern matched. VerdictCaution GuardVerdict = "caution" // VerdictDangerous means at least one dangerous pattern matched. VerdictDangerous GuardVerdict = "dangerous" )
type HeadlessEvent ¶ added in v0.147.0
type HeadlessEvent struct {
Kind string `json:"kind"`
Type string `json:"type"`
Provider string `json:"provider,omitempty"`
Agent string `json:"agent,omitempty"`
TurnID string `json:"turn_id,omitempty"`
TaskID string `json:"task_id,omitempty"`
ParentID string `json:"parent_id,omitempty"`
ToolName string `json:"tool_name,omitempty"`
Text string `json:"text,omitempty"`
Detail string `json:"detail,omitempty"`
Status string `json:"status,omitempty"`
StartedAt string `json:"started_at,omitempty"`
Metrics *HeadlessEventMetrics `json:"metrics,omitempty"`
RawType string `json:"raw_type,omitempty"`
ToolCalls []HeadlessManifestEntry `json:"tool_calls,omitempty"`
TextLen *int `json:"text_len,omitempty"`
}
HeadlessEvent is the canonical, provider-agnostic envelope for a single progress signal emitted from a headless agent turn. All four runners (Claude, Codex, Opencode, OpenAI-compatible) populate the same shape so the web UI can render a normalized timeline regardless of which provider is executing.
Wire shape: emitted as one JSONL line on /agent-stream/{slug} with `kind` set to "headless_event" so the frontend can branch on a single discriminator without inspecting type-specific fields. The line lives alongside the raw provider chunks the runner already tees into the stream — additive for now so existing consumers keep working; future slices may replace the raw tee once the typed channel is the system of record.
Field semantics:
- Kind: always "headless_event". Lets a JSON.parse-then-discriminate consumer skip provider-native events without a structural sniff.
- Type: phase of the turn — "status", "text", "tool_use", "tool_result", "idle", "error", "manifest". A2-MVP emitted only "idle" and "error"; A3 wired the intermediate phases; A4 adds "manifest" — a per-turn completion summary emitted after the terminal idle/error event.
- Provider: "claude" | "codex" | "opencode" | "openai-compat".
- Agent: the speaker slug (the agent the turn belongs to).
- TurnID, TaskID, ParentID: correlation IDs. TurnID groups events from one ReadXxxJSONStream call. TaskID is the broker task the turn is servicing (already used for SSE scoping in /agent-stream ?task=). ParentID is reserved for nested tool/sub-agent calls.
- ToolName, Detail: payload for tool_use / tool_result / error.
- Text: payload for text events (and the human-readable summary for idle).
- Status: "active" | "idle" | "error" — mirrors the activity snapshot status so a single event drives both the timeline and status-pill subscribers.
- StartedAt: RFC3339 timestamp from the runner's clock so ordering survives reordering at the SSE boundary and replay timing is reconstructable.
- Metrics: turn-level latency and token totals. Populated on idle and manifest.
- RawType: the underlying provider event type for debug tooling. Empty for runner-synthesized events like idle and manifest.
- ToolCalls: populated only on manifest events. Sorted list of distinct tools called during the turn with per-tool call counts.
- TextLen: populated only on manifest events. Total byte length of all text chunks emitted during the turn.
type HeadlessEventMetrics ¶ added in v0.147.0
type HeadlessEventMetrics struct {
TotalMs int64 `json:"total_ms,omitempty"`
FirstEventMs int64 `json:"first_event_ms,omitempty"`
FirstTextMs int64 `json:"first_text_ms,omitempty"`
FirstToolMs int64 `json:"first_tool_ms,omitempty"`
InputTokens int `json:"input_tokens,omitempty"`
OutputTokens int `json:"output_tokens,omitempty"`
}
HeadlessEventMetrics carries turn-level timing and token totals. All values are optional; zero is treated as "not measured".
type HeadlessManifestEntry ¶ added in v0.174.0
HeadlessManifestEntry is one tool in a manifest event's ToolCalls list. Count is the number of times that tool was called during the turn.
type HeadlessPamRunner ¶
type HeadlessPamRunner struct{}
HeadlessPamRunner is the default: one fresh CLI process per Pam turn. Context is clean by construction, so no /clear is needed.
type HealthResponse ¶ added in v0.105.2
type HealthResponse struct {
Status string `json:"status"`
SessionMode string `json:"session_mode"`
OneOnOneAgent string `json:"one_on_one_agent"`
FocusMode bool `json:"focus_mode"`
Provider string `json:"provider"`
ProviderModel string `json:"provider_model"`
MemoryBackend string `json:"memory_backend"`
MemoryBackendActive string `json:"memory_backend_active"`
MemoryBackendReady bool `json:"memory_backend_ready"`
NexConnected bool `json:"nex_connected"`
Build buildinfo.Info `json:"build"`
}
HealthResponse is the stable JSON response served by GET /health.
type HumanIdentity ¶
type HumanIdentity struct {
Name string `json:"name"`
Email string `json:"email"`
Slug string `json:"slug"`
CreatedAt time.Time `json:"created_at"`
}
HumanIdentity is the cached git identity for a single human.
It is written to disk as the JSON contents of ~/.wuphf/humans/{sha256(email)[:16]}.json and is the source of truth the broker uses when stamping a `/wiki/write-human` commit.
type HumanIdentityRegistry ¶
type HumanIdentityRegistry struct {
// contains filtered or unexported fields
}
HumanIdentityRegistry manages the on-disk cache of git identities. Safe for concurrent use.
func NewHumanIdentityRegistry ¶
func NewHumanIdentityRegistry() *HumanIdentityRegistry
NewHumanIdentityRegistry constructs a registry rooted at {RuntimeHomeDir}/.wuphf/humans. The directory is created lazily on first write; construction never touches the filesystem.
func NewHumanIdentityRegistryAt ¶
func NewHumanIdentityRegistryAt(dir string) *HumanIdentityRegistry
NewHumanIdentityRegistryAt is the test hook — accepts an explicit dir so each test run has its own cache.
func (*HumanIdentityRegistry) Dir ¶
func (r *HumanIdentityRegistry) Dir() string
Dir returns the on-disk directory the registry reads/writes. Useful for tests and observability.
func (*HumanIdentityRegistry) List ¶
func (r *HumanIdentityRegistry) List() []HumanIdentity
List returns every identity currently cached on disk. Ordering is not guaranteed. Errors from the filesystem are swallowed — an unreadable cache entry simply does not appear in the list.
func (*HumanIdentityRegistry) Local ¶
func (r *HumanIdentityRegistry) Local() HumanIdentity
Local returns the local-machine identity — the one derived from the current user's `git config --global`. Cached after the first call so repeated writes don't fork a subprocess per commit.
On any error (git missing, config unset, persist failure) Local falls back to FallbackHumanIdentity so callers never need to nil-check.
func (*HumanIdentityRegistry) Lookup ¶
func (r *HumanIdentityRegistry) Lookup(slug string) (HumanIdentity, bool)
Lookup returns the HumanIdentity cached for the given slug, if any. The slug is the URL-safe handle derived from the email local-part; see deriveSlug.
func (*HumanIdentityRegistry) Observe ¶
func (r *HumanIdentityRegistry) Observe(name, email string) (HumanIdentity, error)
Observe records an identity seen on a landed commit. Idempotent — writes the cache file only if a matching entry is not already on disk. This is how the registry grows when a second human's commit arrives.
type HumanWikiIntentCounters ¶ added in v0.137.0
type HumanWikiIntentCounters struct {
Enqueued int64
Written int64
Skipped int64
WriteFailed int64
QueueSat int64
}
HumanWikiIntentCounters is a snapshot of the writer's observability counters, returned by Counters() for tests + future metrics surface.
type HumanWikiIntentWriter ¶ added in v0.137.0
type HumanWikiIntentWriter struct {
// contains filtered or unexported fields
}
HumanWikiIntentWriter ingests human channel messages, classifies for remember-intent, and writes matching messages directly into the team wiki. Lifecycle mirrors AutoNotebookWriter: New → Start(ctx) → Stop(timeout). Safe for concurrent Handle() callers.
func NewHumanWikiIntentWriter ¶ added in v0.137.0
func NewHumanWikiIntentWriter(wiki humanWikiIntentWriterClient) *HumanWikiIntentWriter
NewHumanWikiIntentWriter constructs an idle writer. Call Start to begin. nil wiki is safe but disables actual writes (counters still advance).
func (*HumanWikiIntentWriter) Counters ¶ added in v0.137.0
func (w *HumanWikiIntentWriter) Counters() HumanWikiIntentCounters
Counters returns a thread-safe snapshot.
func (*HumanWikiIntentWriter) Handle ¶ added in v0.137.0
func (w *HumanWikiIntentWriter) Handle(msg channelMessage)
Handle is the broker-side ingress. Non-blocking enqueue. Drops with a counter increment on saturation.
The classifier is intentionally NOT called here — it runs inside the writer goroutine. Keeping the hook site to a single channel send keeps the PostMessage hot path predictable.
func (*HumanWikiIntentWriter) Start ¶ added in v0.137.0
func (w *HumanWikiIntentWriter) Start(ctx context.Context)
Start launches the drain goroutine. Idempotent.
func (*HumanWikiIntentWriter) Stop ¶ added in v0.137.0
func (w *HumanWikiIntentWriter) Stop(timeout time.Duration)
Stop signals the drain goroutine to exit and waits up to timeout. Idempotent. Mirrors AutoNotebookWriter.Stop — closes stopCh, never the queue, and wakes any test goroutine parked on progressCond.
func (*HumanWikiIntentWriter) WaitForCondition ¶ added in v0.137.0
func (w *HumanWikiIntentWriter) WaitForCondition(ctx context.Context, predicate func() bool) error
WaitForCondition blocks until predicate returns true, ctx cancels, or the writer stops. Test-only.
type InboxCounts ¶ added in v0.193.0
type InboxCounts struct {
DecisionRequired int `json:"decisionRequired"`
Running int `json:"running"`
Blocked int `json:"blocked"`
MergedToday int `json:"mergedToday"`
}
InboxCounts is the cardinality summary that the inbox header renders. All four counts are O(1) reads of len(b.lifecycleIndex[state]); the inbox query never iterates b.tasks for these.
MergedToday is the one exception that costs O(merged-bucket-size) to compute because the index does not segment by day. v1 accepts that: the merged bucket is bounded by recent activity and the total broker task count is small enough that this stays under the <100ms ceiling.
type InboxFilter ¶ added in v0.193.0
type InboxFilter string
InboxFilter selects which lifecycle bucket(s) the inbox query returns. The five constants below are the only valid filter values; any other string yields ErrInboxFilterUnknown.
const ( InboxFilterDecisionRequired InboxFilter = "decision_required" InboxFilterRunning InboxFilter = "running" InboxFilterBlocked InboxFilter = "blocked" InboxFilterMerged InboxFilter = "merged" InboxFilterAll InboxFilter = "all" )
type InboxPayload ¶ added in v0.193.0
type InboxPayload struct {
Rows []InboxRow `json:"rows"`
Counts InboxCounts `json:"counts"`
RefreshedAt string `json:"refreshedAt"`
}
InboxPayload is the full response to GET /tasks/inbox.
type InboxRow ¶ added in v0.193.0
type InboxRow struct {
TaskID string `json:"taskId"`
Title string `json:"title"`
Assignment string `json:"assignment"`
LifecycleState LifecycleState `json:"lifecycleState"`
SeveritySummary SeveritySummary `json:"severitySummary"`
ElapsedMs int64 `json:"elapsedMs"`
ReviewerSummary ReviewerSummary `json:"reviewerSummary"`
}
InboxRow is one entry in the inbox payload. Field shape and JSON keys are 1:1 with Lane G's TS InboxRow type. Build-time:
- TaskID: teamTask.ID
- Title: teamTask.Title (for v1, Spec.Problem is what intake fills, but the existing teamTask.Title field is the human-readable label written by the spec confirmation step)
- Assignment: Spec.Assignment from the Decision Packet (empty when Lane C has not stored a packet yet)
- LifecycleState: teamTask.LifecycleState as a string
- SeveritySummary: aggregated from DecisionPacket.ReviewerGrades
- ElapsedMs: now - parseBrokerTimestamp(task.CreatedAt)
- ReviewerSummary: graded count vs len(task.Reviewers)
type IndexEdge ¶
type IndexEdge struct {
Subject string `json:"subject"`
Predicate string `json:"predicate"`
Object string `json:"object"`
Timestamp time.Time `json:"timestamp"`
SourceSHA string `json:"source_sha"`
}
IndexEdge is one typed edge in graph.log (§6.2).
type IndexEntity ¶
type IndexEntity struct {
Slug string `json:"slug"`
CanonicalSlug string `json:"canonical_slug"` // same as Slug unless this is a redirect
Kind string `json:"kind"`
Aliases []string `json:"aliases,omitempty"`
Signals Signals `json:"signals"`
LastSynthesizedSHA string `json:"last_synthesized_sha,omitempty"`
LastSynthesizedAt time.Time `json:"last_synthesized_at,omitempty"`
FactCountAtSynth int `json:"fact_count_at_synth,omitempty"`
CreatedAt time.Time `json:"created_at"`
CreatedBy string `json:"created_by"`
}
IndexEntity is the per-entity header row that the index holds for fast signal lookups. Frontmatter fields from §4.1.
type IndexOption ¶
type IndexOption func(*WikiIndex)
IndexOption configures a WikiIndex at construction.
func WithFactStore ¶
func WithFactStore(s FactStore) IndexOption
WithFactStore injects a custom FactStore. Defaults to inMemoryFactStore.
func WithTextIndex ¶
func WithTextIndex(t TextIndex) IndexOption
WithTextIndex injects a custom TextIndex. Defaults to inMemoryTextIndex.
type IntakeOutcome ¶ added in v0.193.0
type IntakeOutcome struct {
// TaskID is the broker-assigned task ID created in the intake state.
// Always non-empty on success.
TaskID string
// Spec is the validated spec the intake agent emitted. Always populated
// on success.
Spec Spec
// AutoAssign is non-empty when Spec.AutoAssign is non-empty. The CLI
// should drive Countdown to either auto-confirm (no keypress within
// 3s) or fall back to manual y/n confirm.
AutoAssign string
// Countdown is non-nil when AutoAssign is non-empty. CLI calls
// Countdown.Wait(); a keypress published via Countdown.Cancel()
// returns false (interrupted) so the caller can fall back to y/n.
// Nil when AutoAssign is empty.
Countdown *AutoAssignCountdown
}
IntakeOutcome is the package-public result of an intake run. The CLI (Lane F) consumes this shape: on success it gets the parsed Spec and the task ID; on AutoAssign it gets a Countdown handle to drive the 3-second cancellable confirm; on parse/validation failure the raw error surfaces with no partial state persisted.
type IntakeProvider ¶ added in v0.193.0
type IntakeProvider interface {
// CallSpecLLM sends systemPrompt + userPrompt (one user turn) to the
// underlying LLM and returns the raw text response. Implementations
// must respect ctx cancellation/timeout.
CallSpecLLM(ctx context.Context, systemPrompt, userPrompt string) (string, error)
}
IntakeProvider is the small interface the intake driver uses to talk to an LLM. It returns the raw text content; the caller parses the JSON-fenced spec block. The interface lives where it is consumed (per Go idiom) so tests inject fakes without dragging the live HTTP client into the test binary.
type JoinURLBuilder ¶ added in v0.127.0
JoinURLBuilder maps an invite token to a shareable URL. Required — NewShareTransport panics on a nil builder so a misconfigured launcher fails loudly rather than producing relative-path URLs that look fine until a remote user tries to click them.
type Launcher ¶
type Launcher struct {
// contains filtered or unexported fields
}
Launcher sets up and manages the multi-agent team.
func NewLauncher ¶
NewLauncher creates a launcher for the given operation blueprint or legacy pack.
func (*Launcher) AgentCount ¶
AgentCount returns the number of agents in the pack.
func (*Launcher) Attach ¶
Attach attaches the user's terminal to the tmux session. In iTerm2: uses tmux -CC for native panes (resizable, close buttons, drag). Otherwise: uses regular tmux attach with -L wuphf to avoid nesting.
func (*Launcher) Broker ¶ added in v0.92.0
Broker returns the Launcher's internal broker instance. Used by cmd/wuphf/main.go to wire workspace orchestration after launch.
func (*Launcher) BrokerBaseURL ¶
func (*Launcher) BrokerToken ¶
func (*Launcher) Drain ¶ added in v0.92.0
Drain cleanly stops every Launcher dispatch path so the broker can exit without leaving in-flight work mid-turn. Called by the broker's /admin/pause handler with a 60-second deadline.
Order matters:
- Flip the package-level draining flag so new pane-dispatch enqueues short-circuit before queueing more `/clear` cycles.
- Cancel the headless dispatch context so headless workers exit at their next outer-loop tick.
- Stop the watchdog scheduler so its goroutine drains.
- Wait for the headless WaitGroup with the caller's deadline. If the deadline expires, return a descriptive error so the caller can log the timeout — the process will still exit via the admin-pause hook.
Drain does NOT stop the broker, close the HTTP listener, or kill tmux panes. That happens after the caller invokes the admin-pause exit hook (os.Exit(0) in production), which bypasses tmux teardown intentionally: the orchestrator on the other side is the one issuing tmux-kill via tmuxKiller after the broker exits.
func (*Launcher) GenerateChannelTemplateFromPrompt ¶
func (*Launcher) GenerateChannelTemplateFromPromptCtx ¶ added in v0.185.1
func (*Launcher) GenerateMemberTemplateFromPrompt ¶
func (*Launcher) Kill ¶
Kill destroys the tmux session, all agent processes, and the broker. Also removes per-agent temp files (MCP config + system prompt) so the broker token and prompt content do not linger in $TMPDIR.
Drains every long-lived goroutine before broker.Stop and the per-launch tempdir RemoveAll. Pre-fix the scheduler goroutine outlived Kill (now drained via schedulerWorker.Stop) but the headless workers were only context-cancelled — cancel kicks the subprocess but the worker goroutine takes a tick to unwind, and it can race os.RemoveAll(launchTempDirPath) inside cleanupAgentTempFiles by writing a fresh per-agent prompt/MCP file into a directory the cleanup is removing.
Sequence:
- schedulerWorker.Stop() — drain watchdog
- headless.cancel() — kick subprocess
- stopHeadlessWorkers() — wait on workerWg
- broker.Stop() — listener teardown
- cleanupAgentTempFiles() — rm -rf launch dir
func (*Launcher) Launch ¶
Launch starts a tmux session hosting the channel-view TUI and the shared broker. Agents run headlessly by default via `claude --print` per turn; per-agent interactive panes are reserved as an internal fallback primitive (see trySpawnWebAgentPanes) and are not spawned at startup. The user attaches to tmux to drive the channel view; agent output is surfaced through the channel timeline rather than a dedicated pane.
func (*Launcher) LaunchWeb ¶
LaunchWeb starts the broker, web UI server, and background agents without tmux.
func (*Launcher) OneOnOneAgent ¶
OneOnOneAgent returns the active direct-session agent slug, if any.
func (*Launcher) Preflight ¶
Preflight checks that required tools are available.
The gh capability advisory runs after every successful runtime check (codex, opencode, claude+tmux), not just the claude branch. Pre-fix, codex/opencode launches missed the "gh CLI not found / not authed" note, leaving operators puzzled when their agents couldn't open PRs.
func (*Launcher) PreflightWeb ¶
PreflightWeb checks only for claude (no tmux requirement for web mode).
When the user has not yet completed onboarding we deliberately skip the runtime-binary check: the whole point of the web-mode onboarding wizard is to pick a runtime. Hard-failing here would make the binary unlaunchable until the user already had the CLI they were trying to pick. A missing runtime is still caught at first-dispatch time with a clear message once onboarding has committed a choice to ~/.wuphf/config.json.
func (*Launcher) ReconfigureSession ¶
func (*Launcher) ResetSession ¶
func (*Launcher) SetBrokerConfigurator ¶ added in v0.97.0
SetBrokerConfigurator registers startup wiring that must run immediately after the launcher constructs its broker and before that broker starts serving requests.
func (*Launcher) SetFocusMode ¶
SetFocusMode enables CEO-routed delegation mode.
func (*Launcher) SetOneOnOne ¶
func (*Launcher) SetOpusCEO ¶
SetOpusCEO upgrades the CEO agent from Sonnet to Opus.
func (*Launcher) SetUnsafe ¶
SetUnsafe enables unrestricted permissions for all agents (CLI-only flag).
func (*Launcher) UsesTmuxRuntime ¶
UsesTmuxRuntime reports whether agents run in tmux panes. Exported for cmd/wuphf/main.go and tests; thin delegator over the targeter.
type LearningLog ¶ added in v0.97.0
type LearningLog struct {
// contains filtered or unexported fields
}
func NewLearningLog ¶ added in v0.97.0
func NewLearningLog(worker *WikiWorker) *LearningLog
func (*LearningLog) Append ¶ added in v0.97.0
func (l *LearningLog) Append(ctx context.Context, rec LearningRecord) (LearningRecord, error)
func (*LearningLog) Search ¶ added in v0.97.0
func (l *LearningLog) Search(filters LearningSearchFilters) ([]LearningSearchResult, error)
type LearningRecord ¶ added in v0.97.0
type LearningRecord struct {
ID string `json:"id"`
Type LearningType `json:"type"`
Key string `json:"key"`
Insight string `json:"insight"`
Confidence int `json:"confidence"`
Source LearningSource `json:"source"`
Trusted bool `json:"trusted"`
Scope string `json:"scope"`
PlaybookSlug string `json:"playbook_slug,omitempty"`
ExecutionID string `json:"execution_id,omitempty"`
TaskID string `json:"task_id,omitempty"`
Files []string `json:"files,omitempty"`
Entities []string `json:"entities,omitempty"`
CreatedBy string `json:"created_by"`
CreatedAt time.Time `json:"created_at"`
Supersedes string `json:"supersedes,omitempty"`
}
type LearningSearchFilters ¶ added in v0.97.0
type LearningSearchFilters struct {
Query string
Scope string
Type LearningType
Source LearningSource
Trusted *bool
PlaybookSlug string
File string
Limit int
}
type LearningSearchResult ¶ added in v0.97.0
type LearningSearchResult struct {
LearningRecord
EffectiveConfidence int `json:"effective_confidence"`
}
type LearningSource ¶ added in v0.97.0
type LearningSource string
const ( LearningSourceUserStated LearningSource = "user-stated" LearningSourceObserved LearningSource = "observed" LearningSourceInferred LearningSource = "inferred" LearningSourceExecution LearningSource = "execution" LearningSourceSynthesis LearningSource = "synthesis" LearningSourceCrossAgent LearningSource = "cross-agent" LearningSourceCrossModel LearningSource = "cross-model" )
func ValidLearningSources ¶ added in v0.97.0
func ValidLearningSources() []LearningSource
type LearningType ¶ added in v0.97.0
type LearningType string
const ( LearningTypePattern LearningType = "pattern" LearningTypePitfall LearningType = "pitfall" LearningTypePreference LearningType = "preference" LearningTypeArchitecture LearningType = "architecture" LearningTypeTool LearningType = "tool" LearningTypeOperational LearningType = "operational" )
func ValidLearningTypes ¶ added in v0.97.0
func ValidLearningTypes() []LearningType
type LifecycleManifestSubKind ¶ added in v0.193.0
type LifecycleManifestSubKind string
LifecycleManifestSubKind labels the five lifecycle-bound manifest events emitted by the Decision Packet layer. Frontend consumers branch on this string to distinguish lifecycle events from per-turn manifests.
const ( LifecycleManifestSpecCreated LifecycleManifestSubKind = "spec.created" LifecycleManifestArtifactReady LifecycleManifestSubKind = "artifact.ready" LifecycleManifestReviewSubmitted LifecycleManifestSubKind = "review.submitted" LifecycleManifestDecisionRequired LifecycleManifestSubKind = "decision.required" LifecycleManifestDecisionRecorded LifecycleManifestSubKind = "decision.recorded" )
type LifecycleState ¶ added in v0.185.1
type LifecycleState string
LifecycleState is the typed source of truth for the multi-agent control loop. The canonical values plus LifecycleStateUnknown (migration fallback) form a closed enum; new states require updating both the forward-map (lifecycleDerivedFields) and the migration shim.
const ( // LifecycleStateUnknown is the migration fallback for tasks whose // derived-field tuple does not appear in lifecycleMigrationMap. The // broker logs a warning and surfaces the task as an explicit operator // decision instead of silently picking a state. LifecycleStateUnknown LifecycleState = "unknown" LifecycleStateIntake LifecycleState = "intake" LifecycleStateReady LifecycleState = "ready" LifecycleStateRunning LifecycleState = "running" LifecycleStateReview LifecycleState = "review" LifecycleStateDecision LifecycleState = "decision" LifecycleStateBlockedOnPRMerge LifecycleState = "blocked_on_pr_merge" LifecycleStateQueuedBehindOwner LifecycleState = "queued_behind_owner" LifecycleStateChangesRequested LifecycleState = "changes_requested" LifecycleStateMerged LifecycleState = "merged" )
func CanonicalLifecycleStates ¶ added in v0.185.1
func CanonicalLifecycleStates() []LifecycleState
CanonicalLifecycleStates returns the valid lifecycle states (excluding the unknown migration fallback) in stable order. Used by tests sweeping the forward map.
type Lint ¶
type Lint struct {
// contains filtered or unexported fields
}
Lint runs all five checks against the wiki index and commits the report.
func NewLint ¶
func NewLint(idx *WikiIndex, worker *WikiWorker, prov LintProvider) *Lint
NewLint constructs a Lint runner. All three arguments are required.
func (*Lint) ResolveContradiction ¶
func (l *Lint) ResolveContradiction(ctx context.Context, report LintReport, findingIdx int, winner string, identity HumanIdentity) error
ResolveContradiction resolves the contradiction at findings[findingIdx].
- winner == "A": appends supersedes:[B.id] to fact A; sets valid_until on B to now.
- winner == "B": mirrored.
- winner == "Both": appends contradicts_with:[other.id] to each; both stay valid.
All writes go through WikiWorker under the caller's git identity.
func (*Lint) Run ¶
func (l *Lint) Run(ctx context.Context) (LintReport, error)
Run executes all five lint checks, builds the report, commits it via WikiWorker, and returns the report. The returned report reflects only what was found — callers do not need to read the committed markdown to iterate findings.
type LintFinding ¶
type LintFinding struct {
// Severity is "critical" | "warning" | "info".
Severity string `json:"severity"`
// Type is one of: contradictions | orphans | stale | missing_crossrefs | dedup_review.
Type string `json:"type"`
// EntitySlug is the entity this finding relates to.
EntitySlug string `json:"entity_slug,omitempty"`
// FactIDs are the IDs of the facts involved in this finding.
FactIDs []string `json:"fact_ids,omitempty"`
// Summary is a human-readable description of the finding.
Summary string `json:"summary"`
// ResolveActions lists the choices presented to the user for contradiction
// findings: e.g. ["Fact A (id: abc123)", "Fact B (id: def456)", "Both"].
// Empty for non-contradiction findings.
ResolveActions []string `json:"resolve_actions,omitempty"`
}
LintFinding is one item in a LintReport.
type LintProvider ¶
type LintProvider interface {
// Query sends systemPrompt + userPrompt to the configured LLM and
// returns the raw response string or an error.
Query(ctx context.Context, systemPrompt, userPrompt string) (string, error)
}
LintProvider is the narrow interface the lint runner needs for LLM judgment calls. Production code wires in a closure over provider.RunConfiguredOneShot; tests substitute a deterministic fake.
type LintReport ¶
type LintReport struct {
// Date is the YYYY-MM-DD the lint ran.
Date string `json:"date"`
// Findings is the ordered list of findings (critical first, then warning, then info).
Findings []LintFinding `json:"findings"`
}
LintReport is the full output of one lint run.
type LocalProviderStatus ¶
type LocalProviderStatus struct {
Kind string `json:"kind"`
BinaryInstalled bool `json:"binary_installed"`
BinaryPath string `json:"binary_path,omitempty"`
BinaryVersion string `json:"binary_version,omitempty"`
Endpoint string `json:"endpoint"`
Model string `json:"model"`
Reachable bool `json:"reachable"`
LoadedModel string `json:"loaded_model,omitempty"`
Probed bool `json:"probed"`
ProbeSkippedNote string `json:"probe_skipped_note,omitempty"`
PlatformSupported bool `json:"platform_supported"`
WindowsNote string `json:"windows_note,omitempty"`
Install map[string]string `json:"install,omitempty"`
Start map[string]string `json:"start,omitempty"`
Notes []string `json:"notes,omitempty"`
}
LocalProviderStatus is the doctor-panel payload for one local OpenAI- compatible runtime. The Settings UI renders one card per kind: green (Reachable=true), yellow (BinaryInstalled=true but Reachable=false — installed but not started), or red (BinaryInstalled=false). The Install field carries copy-paste shell snippets the user runs themselves; the broker never executes them.
type MaintenanceAction ¶ added in v0.149.0
type MaintenanceAction string
MaintenanceAction is the discriminator for the 7 supported actions.
const ( MaintActionSummarize MaintenanceAction = "summarize" MaintActionAddCitation MaintenanceAction = "add_citation" MaintActionExtractFacts MaintenanceAction = "extract_facts" MaintActionResolveContradiction MaintenanceAction = "resolve_contradiction" MaintActionSplitLong MaintenanceAction = "split_long_page" MaintActionLinkRelated MaintenanceAction = "link_related" MaintActionRefreshStale MaintenanceAction = "refresh_stale" )
type MaintenanceAssistant ¶ added in v0.149.0
type MaintenanceAssistant struct {
// contains filtered or unexported fields
}
MaintenanceAssistant computes suggestions for one article + action pair. All inputs are existing broker subsystems; no new state is introduced.
func NewMaintenanceAssistant ¶ added in v0.149.0
func NewMaintenanceAssistant(worker *WikiWorker, index *WikiIndex, lint *Lint) *MaintenanceAssistant
NewMaintenanceAssistant wires the assistant to its dependencies. worker is required (provides on-disk article reads + repo head SHA). index and lint are optional — when nil, actions that need them return Skipped responses.
func (*MaintenanceAssistant) Suggest ¶ added in v0.149.0
func (m *MaintenanceAssistant) Suggest(ctx context.Context, action MaintenanceAction, articlePath string) (MaintenanceSuggestion, error)
Suggest dispatches to the action-specific computer.
type MaintenanceDiff ¶ added in v0.149.0
type MaintenanceDiff struct {
// ProposedContent is the full new article body. Empty for actions that
// do not modify the article body (extract_facts, resolve_contradiction).
ProposedContent string `json:"proposed_content,omitempty"`
// Added is the list of newly-introduced lines (in order).
Added []string `json:"added,omitempty"`
// Removed is the list of removed lines (in order).
Removed []string `json:"removed,omitempty"`
}
MaintenanceDiff is the proposed change to the article body. v1 carries the whole proposed content plus added / removed line counts so the UI can render a small unified-diff-style preview.
type MaintenanceEvidence ¶ added in v0.149.0
type MaintenanceEvidence struct {
// Kind is "wiki_article" | "fact" | "lint_finding" | "edit_log".
Kind string `json:"kind"`
// Label is the short human-readable name (article title, predicate,
// finding type).
Label string `json:"label"`
// Path is the wiki path or fact id the evidence points at. Empty when
// the evidence is purely textual.
Path string `json:"path,omitempty"`
// Snippet is a short excerpt the UI can render verbatim.
Snippet string `json:"snippet,omitempty"`
}
MaintenanceEvidence is one piece of source material the suggestion was derived from. The UI links each item back to its origin.
type MaintenanceFactProposal ¶ added in v0.149.0
type MaintenanceFactProposal struct {
Subject string `json:"subject"`
Predicate string `json:"predicate"`
Object string `json:"object"`
Confidence float64 `json:"confidence"`
// SourceLine is the article-relative line index the fact was extracted
// from (1-based for display). Lets the UI highlight context.
SourceLine int `json:"source_line,omitempty"`
}
MaintenanceFactProposal is one proposed structured fact for the extract_facts action. The user reviews each one in the side panel; only accepted facts go to the fact log on commit.
type MaintenanceSuggestion ¶ added in v0.149.0
type MaintenanceSuggestion struct {
Action MaintenanceAction `json:"action"`
Title string `json:"title"`
Description string `json:"description"`
// Diff is populated for body-mutating actions.
Diff *MaintenanceDiff `json:"diff,omitempty"`
// Facts is populated for extract_facts.
Facts []MaintenanceFactProposal `json:"facts,omitempty"`
// Evidence is the list of source pointers the suggestion drew from.
Evidence []MaintenanceEvidence `json:"evidence,omitempty"`
// LintFinding is populated for resolve_contradiction. The UI uses this
// to redirect into the existing ResolveContradictionModal flow.
LintFinding *LintFinding `json:"lint_finding,omitempty"`
// LintReportDate, LintFindingIdx pair the finding with its report.
LintReportDate string `json:"lint_report_date,omitempty"`
LintFindingIdx int `json:"lint_finding_idx,omitempty"`
// ExpectedSHA is the article SHA at the time the suggestion was
// computed. Sent back when the user accepts so the WikiEditor save
// path can detect stale suggestions exactly like a stale editor open.
ExpectedSHA string `json:"expected_sha,omitempty"`
// Skipped is true when no suggestion was warranted (e.g. page is short
// enough that split_long is not useful, or no contradictions exist).
// The UI shows a "nothing to do" state rather than an empty diff.
Skipped bool `json:"skipped,omitempty"`
SkippedReason string `json:"skipped_reason,omitempty"`
}
MaintenanceSuggestion is the single-action response from the assistant.
type MemoryBackendStatus ¶
type MemoryBackendStatus struct {
SelectedKind string
SelectedLabel string
ActiveKind string
ActiveLabel string
Detail string
NextStep string
}
func ResolveMemoryBackendStatus ¶
func ResolveMemoryBackendStatus() MemoryBackendStatus
type MemoryWorkflow ¶ added in v0.95.0
type MemoryWorkflow struct {
Required bool `json:"required"`
Status string `json:"status,omitempty"`
RequirementReason string `json:"requirement_reason,omitempty"`
RequiredSteps []MemoryWorkflowStep `json:"required_steps,omitempty"`
Lookup MemoryWorkflowStepState `json:"lookup,omitempty"`
Capture MemoryWorkflowStepState `json:"capture,omitempty"`
Promote MemoryWorkflowStepState `json:"promote,omitempty"`
Citations []ContextCitation `json:"citations,omitempty"`
Captures []MemoryWorkflowArtifact `json:"captures,omitempty"`
Promotions []MemoryWorkflowArtifact `json:"promotions,omitempty"`
Override *MemoryWorkflowOverride `json:"override,omitempty"`
PartialErrors []string `json:"partial_errors,omitempty"`
CreatedAt string `json:"created_at,omitempty"`
UpdatedAt string `json:"updated_at,omitempty"`
CompletedAt string `json:"completed_at,omitempty"`
}
type MemoryWorkflowArtifact ¶ added in v0.95.0
type MemoryWorkflowArtifact struct {
Backend string `json:"backend,omitempty"`
Source string `json:"source,omitempty"`
Path string `json:"path,omitempty"`
PageID string `json:"page_id,omitempty"`
PromotionID string `json:"promotion_id,omitempty"`
EntityKind string `json:"entity_kind,omitempty"`
EntitySlug string `json:"entity_slug,omitempty"`
PlaybookSlug string `json:"playbook_slug,omitempty"`
Title string `json:"title,omitempty"`
SkipReason string `json:"skip_reason,omitempty"`
Snippet string `json:"snippet,omitempty"`
CommitSHA string `json:"commit_sha,omitempty"`
State string `json:"state,omitempty"`
RecordedAt string `json:"recorded_at,omitempty"`
UpdatedAt string `json:"updated_at,omitempty"`
Missing bool `json:"missing,omitempty"`
}
type MemoryWorkflowOverride ¶ added in v0.95.0
type MemoryWorkflowReconcileReport ¶ added in v0.95.0
type MemoryWorkflowReconcileReport struct {
Checked int `json:"checked"`
Repaired int `json:"repaired"`
Timestamp string `json:"timestamp"`
Tasks []MemoryWorkflowReconcileTaskResult `json:"tasks,omitempty"`
MissingArtifacts []MemoryWorkflowArtifact `json:"missing_artifacts,omitempty"`
}
type MemoryWorkflowReconcileTaskResult ¶ added in v0.95.0
type MemoryWorkflowReconcileTaskResult struct {
TaskID string `json:"task_id"`
Changed bool `json:"changed"`
Repairs []string `json:"repairs,omitempty"`
MissingArtifacts []MemoryWorkflowArtifact `json:"missing_artifacts,omitempty"`
}
type MemoryWorkflowReconciler ¶ added in v0.95.0
type MemoryWorkflowReconciler struct {
// contains filtered or unexported fields
}
func NewMemoryWorkflowReconciler ¶ added in v0.95.0
func NewMemoryWorkflowReconciler(worker *WikiWorker, review *ReviewLog, now func() time.Time) *MemoryWorkflowReconciler
func (*MemoryWorkflowReconciler) ReconcileTasks ¶ added in v0.95.0
func (r *MemoryWorkflowReconciler) ReconcileTasks(ctx context.Context, tasks []teamTask) ([]teamTask, MemoryWorkflowReconcileReport, error)
type MemoryWorkflowStep ¶ added in v0.95.0
type MemoryWorkflowStep string
const ( MemoryWorkflowStepLookup MemoryWorkflowStep = "lookup" MemoryWorkflowStepCapture MemoryWorkflowStep = "capture" MemoryWorkflowStepPromote MemoryWorkflowStep = "promote" )
type MemoryWorkflowStepState ¶ added in v0.95.0
type MemoryWorkflowStepState struct {
Required bool `json:"required,omitempty"`
Status string `json:"status,omitempty"`
Actor string `json:"actor,omitempty"`
Query string `json:"query,omitempty"`
CompletedAt string `json:"completed_at,omitempty"`
UpdatedAt string `json:"updated_at,omitempty"`
Count int `json:"count,omitempty"`
}
type NotebookDemandIndex ¶ added in v0.138.0
type NotebookDemandIndex struct {
// contains filtered or unexported fields
}
NotebookDemandIndex aggregates PromotionDemandEvents into per-entry rolling scores backed by an append-only JSONL log. All methods are safe for concurrent callers.
func NewNotebookDemandIndex ¶ added in v0.138.0
func NewNotebookDemandIndex(logPath string) (*NotebookDemandIndex, error)
NewNotebookDemandIndex opens (or creates) the JSONL log at logPath, replays existing events into the in-memory map, and returns a ready index. Window and threshold honour env overrides; invalid env values fall back to the defaults with a warn log.
func NewNotebookDemandIndexForTest ¶ added in v0.193.0
func NewNotebookDemandIndexForTest( logPath string, clock func() time.Time, ) (*NotebookDemandIndex, error)
NewNotebookDemandIndexForTest mirrors NewNotebookDemandIndex but lets the caller inject a clock used during the initial replay. SetClockForTest can only override the clock AFTER construction, but replay() uses the clock to compute the window cutoff and runs inside the constructor — so a test that records events at a fixed past date and then reloads the index needs the fake clock from the start. Production code uses NewNotebookDemandIndex.
func (*NotebookDemandIndex) AutoEscalateDemandCandidates ¶ added in v0.138.0
func (idx *NotebookDemandIndex) AutoEscalateDemandCandidates( ctx context.Context, reviewLog *ReviewLog, reader promotionDemandReader, ) error
AutoEscalateDemandCandidates submits any entry whose rolling score has breached the threshold to the review log as a new pending promotion. Idempotent: an entry already escalated (in-memory tracker) is not re-submitted, and entries whose path no longer exists on disk are skipped.
func (*NotebookDemandIndex) Record ¶ added in v0.138.0
func (idx *NotebookDemandIndex) Record(evt PromotionDemandEvent) error
Record persists a demand event (append to JSONL) and updates the in-memory score map. Same-day duplicates from the same searcher on the same entry collapse into one record.
func (*NotebookDemandIndex) Score ¶ added in v0.138.0
func (idx *NotebookDemandIndex) Score(entryPath string) float64
Score returns the current rolling-window score for entryPath. Events older than windowDays are excluded.
func (*NotebookDemandIndex) SetClockForTest ¶ added in v0.138.0
func (idx *NotebookDemandIndex) SetClockForTest(clock func() time.Time)
SetClockForTest overrides the clock used for window-expiry checks. Test-only.
func (*NotebookDemandIndex) Threshold ¶ added in v0.138.0
func (idx *NotebookDemandIndex) Threshold() float64
Threshold returns the configured auto-escalation threshold. Useful for observability surfaces.
func (*NotebookDemandIndex) TopCandidates ¶ added in v0.138.0
func (idx *NotebookDemandIndex) TopCandidates(n int) []DemandCandidate
TopCandidates returns up to n candidates sorted by descending score. Entries with zero or negative score are excluded.
func (*NotebookDemandIndex) WaitForCondition ¶ added in v0.138.0
func (idx *NotebookDemandIndex) WaitForCondition(ctx context.Context, predicate func() bool) error
WaitForCondition blocks until predicate returns true or ctx is cancelled. Test-only helper modelled on AutoNotebookWriter.WaitForCondition.
func (*NotebookDemandIndex) WindowDays ¶ added in v0.138.0
func (idx *NotebookDemandIndex) WindowDays() int
WindowDays returns the configured sliding-window length in days.
type NotebookEntry ¶
type NotebookEntry struct {
Path string `json:"path"`
Title string `json:"title"`
Modified time.Time `json:"modified"`
SizeBytes int64 `json:"size_bytes"`
}
NotebookEntry summarises one entry in an agent's notebook catalog. Ordered by filename reverse-chron (dated-prefix filenames sort naturally).
type NotebookSignalScanner ¶ added in v0.86.0
type NotebookSignalScanner struct {
// contains filtered or unexported fields
}
NotebookSignalScanner walks team/agents/*/notebook/**/*.md and clusters cross-agent entries — either by token-overlap similarity (legacy Jaccard path) or by cosine similarity over real text embeddings (new semantic path). Each qualifying cluster becomes a SkillCandidate.
The two paths produce SkillCandidate values with the same shape; only the clustering algorithm differs. embeddingClusteringEnabled (see notebook_signal_scanner_embeddings.go) decides which path runs.
func NewNotebookSignalScanner ¶ added in v0.86.0
func NewNotebookSignalScanner(b *Broker) *NotebookSignalScanner
NewNotebookSignalScanner constructs a scanner with defaults pulled from env (or the documented fallbacks):
WUPHF_STAGE_B_NOTEBOOK_MIN_CLUSTER → minClusterSize (default 2) WUPHF_STAGE_B_NOTEBOOK_MIN_AGENTS → minDistinctAgents (default 2) WUPHF_STAGE_B_NOTEBOOK_SIMILARITY → similarityThreshold (default 0.6) WUPHF_STAGE_B_NOTEBOOK_MAX_PER_PASS → maxCandidatesPerPass (default 10)
func (*NotebookSignalScanner) Scan ¶ added in v0.86.0
func (s *NotebookSignalScanner) Scan(ctx context.Context) ([]SkillCandidate, error)
Scan walks team/agents/*/notebook/**/*.md, tokenises each entry, clusters them by Jaccard similarity, and emits one SkillCandidate per cluster that passes minClusterSize + minDistinctAgents. Returns up to maxCandidatesPerPass candidates ordered by SignalCount desc.
func (*NotebookSignalScanner) SetEmbeddingCache ¶ added in v0.105.0
func (s *NotebookSignalScanner) SetEmbeddingCache(c *embedding.Cache)
SetEmbeddingCache replaces the embedding cache. Tests pass a temp-dir- backed cache; production use the lazy default from RuntimeHomeDir.
func (*NotebookSignalScanner) SetEmbeddingProvider ¶ added in v0.105.0
func (s *NotebookSignalScanner) SetEmbeddingProvider(p embedding.Provider)
SetEmbeddingProvider replaces the embedding provider — primarily a test seam so unit tests can dependency-inject a deterministic stub without touching env vars. Passing nil disables embedding-based clustering for this scanner until a non-nil provider is set.
type OnboardingFields ¶ added in v0.92.0
type OnboardingFields struct {
CompanyDescription string `json:"company_description,omitempty"`
CompanyPriority string `json:"company_priority,omitempty"`
LLMProvider string `json:"llm_provider,omitempty"`
TeamLeadSlug string `json:"team_lead_slug,omitempty"`
}
OnboardingFields mirrors internal/workspaces.OnboardingFields. The CLI adapter maps between the two; defining the shape here keeps the broker's public surface self-contained.
type OpenclawBridge ¶
type OpenclawBridge struct {
// contains filtered or unexported fields
}
OpenclawBridge adapts OpenClaw Gateway sessions into WUPHF office members.
func BuildOpenclawBridgeFromConfig ¶ added in v0.125.0
func BuildOpenclawBridgeFromConfig(broker *Broker) (*OpenclawBridge, error)
BuildOpenclawBridgeFromConfig reads persisted OpenClaw bridge bindings from config and, if any are configured, constructs (but does not Start) an OpenclawBridge. Returns (nil, nil) when no bindings are configured so callers can treat the integration as strictly opt-in.
Build is the host-driven entrypoint: callers (the launcher) own the bridge lifecycle via OpenclawBridge.Run, which attaches a transport.Host before the supervised loop starts. The legacy StartOpenclawBridgeFromConfig wraps Build + Start for callers that drive the bridge directly (probes, tests).
func NewOpenclawBridge ¶
func NewOpenclawBridge(broker *Broker, client openclawClient, bindings []config.OpenclawBridgeBinding) *OpenclawBridge
NewOpenclawBridge constructs a bridge with a single preconstructed client. It does not dial until Start is called. For supervised reconnects, use NewOpenclawBridgeWithDialer.
func NewOpenclawBridgeWithDialer ¶
func NewOpenclawBridgeWithDialer(broker *Broker, initial openclawClient, dialer openclawDialer, bindings []config.OpenclawBridgeBinding) *OpenclawBridge
NewOpenclawBridgeWithDialer constructs a bridge that supervises reconnects. If initial is non-nil, that client is used for the first session; subsequent sessions use the dialer. If initial is nil, dialer must be non-nil.
func StartOpenclawBridgeFromConfig ¶
func StartOpenclawBridgeFromConfig(ctx context.Context, broker *Broker) (*OpenclawBridge, error)
StartOpenclawBridgeFromConfig builds the bridge and Starts it directly, bypassing transport.Host attachment. Inbound messages route via broker.PostInboundSurfaceMessage rather than host.ReceiveMessage. Use BuildOpenclawBridgeFromConfig + OpenclawBridge.Run to opt into the host-driven path.
func (*OpenclawBridge) AttachSlug ¶
func (b *OpenclawBridge) AttachSlug(slug, sessionKey string)
AttachSlug binds an office member slug to a session key. It updates the bridge's slug→key and key→slug maps so inbound events route to the right member. The contract requires no error/ctx return: callers that need to surface subscribe failures (HTTP handlers) must use [AttachSlugAndSubscribe] instead. AttachSlug performs a best-effort async subscribe via the bridge's stored ctx so the contract-level call still establishes the gateway subscription, but failures are logged via system message rather than returned. Idempotent: re-attaching the same pair is a no-op.
func (*OpenclawBridge) AttachSlugAndSubscribe ¶ added in v0.122.0
func (b *OpenclawBridge) AttachSlugAndSubscribe(ctx context.Context, slug, sessionKey string) error
AttachSlugAndSubscribe is the synchronous, error-returning variant used by HTTP handlers that need to fail the request when the gateway subscription fails. Updates the slug→key/key→slug maps and synchronously subscribes via the supplied ctx. Idempotent: re-attaching the same pair is a no-op.
func (*OpenclawBridge) Binding ¶ added in v0.122.0
func (b *OpenclawBridge) Binding() transport.Binding
Binding declares the adapter scope. OpenClaw is member-bound: each bridged session becomes an office member, but the bridge itself does not anchor to a single member slug (it manages many). Like Telegram's multi-channel pattern, we return a zero MemberSlug — honest declaration that no static member is bound at the adapter level.
func (*OpenclawBridge) CreateSession ¶
CreateSession calls sessions.create on the gateway and returns the new key. Used by handleOfficeMembers when a user hires a new openclaw agent without supplying an existing session key (the "auto-create" path).
func (*OpenclawBridge) DetachSession ¶ added in v0.99.9
func (b *OpenclawBridge) DetachSession(ctx context.Context, slug, sessionKey string) error
DetachSession unsubscribes a specific session key for slug without disturbing a newer slug binding that may have been attached after this session key.
func (*OpenclawBridge) DetachSlug ¶
func (b *OpenclawBridge) DetachSlug(slug string)
DetachSlug removes the binding between slug and its session key. The contract requires no error/ctx return: callers that need to surface unsubscribe failures must use [DetachSlugAndUnsubscribe] instead. DetachSlug performs a best-effort async unsubscribe via the bridge's stored ctx; failures are logged via system message rather than returned.
func (*OpenclawBridge) DetachSlugAndUnsubscribe ¶ added in v0.122.0
func (b *OpenclawBridge) DetachSlugAndUnsubscribe(ctx context.Context, slug string) error
DetachSlugAndUnsubscribe is the synchronous, error-returning variant used by HTTP handlers. Local state is always cleared so the slug frees up regardless of network outcome; the returned error informs the caller that the remote session may be leaked. Returns an error when no bridge client is connected so the caller can surface the failed remote teardown rather than silently orphan the upstream subscription.
func (*OpenclawBridge) HasSlug ¶
func (b *OpenclawBridge) HasSlug(slug string) bool
HasSlug reports whether the given slug is bound to a bridged OpenClaw session. Used by the launcher's mention dispatcher to decide whether to route a tagged message through the bridge instead of (or in addition to) the normal agent-spawn path.
func (*OpenclawBridge) Health ¶ added in v0.122.0
func (b *OpenclawBridge) Health() transport.Health
Health returns a point-in-time connectivity snapshot. Reads only the cached health fields under healthMu so it is O(1) and safe to call on every channel-header render (per Transport.Health contract).
func (*OpenclawBridge) Name ¶ added in v0.122.0
func (b *OpenclawBridge) Name() string
Name returns the stable adapter identifier.
func (*OpenclawBridge) OnOfficeMessage ¶
func (b *OpenclawBridge) OnOfficeMessage(ctx context.Context, slug, channel, message string) error
OnOfficeMessage sends a human-authored message to the OpenClaw agent identified by slug. The channel argument is where the reply should land (e.g. "general" for @mentions, a DM slug like "human__pm-bot" for DMs). Retries on transient errors with a SINGLE reused idempotency key so the gateway can deduplicate.
Reply routing: OpenClaw streams the assistant reply via an async event. We remember this channel here, keyed by the session key; handleClientEvent reads it when the reply arrives. If channel is empty we fall back to "general" so older callers and probes keep working.
func (*OpenclawBridge) Run ¶ added in v0.122.0
Run starts the supervised bridge and blocks until ctx is cancelled. The host is attached before Start so handleClientEvent routes inbound assistant messages through host.ReceiveMessage instead of writing to the broker directly. A nil host is rejected so a misconfigured launcher fails loudly rather than silently degrading to the legacy broker entrypoint.
func (*OpenclawBridge) Send ¶ added in v0.122.0
Send delivers one outbound message from the office to the bridged agent. Routes via OnOfficeMessage, which handles retries with a single reused idempotency key. The Outbound.Binding.MemberSlug identifies the target agent; ChannelSlug carries the reply-routing hint that handleClientEvent uses when the assistant reply arrives via the async event stream.
func (*OpenclawBridge) SetRetryDelaysForTest ¶
func (b *OpenclawBridge) SetRetryDelaysForTest(d []time.Duration)
SetRetryDelaysForTest is only used by tests.
func (*OpenclawBridge) SnapshotBindings ¶
func (b *OpenclawBridge) SnapshotBindings() map[string]string
SnapshotBindings returns a copy of the current slug→sessionKey mapping. Used by runOnce on reconnect to re-subscribe every attached slug.
func (*OpenclawBridge) Start ¶
func (b *OpenclawBridge) Start(ctx context.Context) error
Start launches the supervised reconnect loop.
func (*OpenclawBridge) Stop ¶
func (b *OpenclawBridge) Stop()
Stop cancels the bridge context, closes the client, and waits for the event loop to drain.
type PamAction ¶
type PamAction struct {
ID PamActionID
Label string // human-facing label for the desk menu
SystemPrompt string // locked system prompt — do not edit without review
UserPromptTmpl string // fmt pattern; takes article body
// AllowedTools is currently advisory only — not passed to the sub-process.
// Reserved for future tool-restriction plumbing.
AllowedTools []string
CommitMsgTmpl string // fmt pattern; takes article path
}
PamAction describes a single job Pam can run. The prompt template is a plain fmt.Sprintf template: %s is replaced with the article body.
func LookupPamAction ¶
func LookupPamAction(id PamActionID) (PamAction, error)
LookupPamAction returns the registered action for id, or ErrUnknownPamAction.
func PamActions ¶
func PamActions() []PamAction
PamActions returns the registry in menu order. The returned slice — and every PamAction inside it — is a defensive copy so callers can't mutate the registry (including via shared AllowedTools backing arrays).
type PamActionDoneEvent ¶
type PamActionDoneEvent struct {
JobID uint64 `json:"job_id"`
Action string `json:"action"`
ArticlePath string `json:"article_path"`
CommitSHA string `json:"commit_sha"`
FinishedAt string `json:"finished_at"`
}
PamActionDoneEvent is broadcast when Pam commits the result of a job.
type PamActionFailedEvent ¶
type PamActionFailedEvent struct {
JobID uint64 `json:"job_id"`
Action string `json:"action"`
ArticlePath string `json:"article_path"`
Error string `json:"error"`
FailedAt string `json:"failed_at"`
}
PamActionFailedEvent is broadcast when Pam could not finish a job. The Error field is the short reason for the UI; details go to the server log.
type PamActionID ¶
type PamActionID string
PamActionID is a typed string so callers can't pass arbitrary action names.
const ( // PamActionEnrichArticle: pull fresh info + media from the web and fold it // into the article body. First action shipped — v1 of Pam's desk menu. PamActionEnrichArticle PamActionID = "enrich_article" )
type PamActionStartedEvent ¶
type PamActionStartedEvent struct {
JobID uint64 `json:"job_id"`
Action string `json:"action"`
ArticlePath string `json:"article_path"`
RequestBy string `json:"request_by"`
StartedAt string `json:"started_at"`
}
PamActionStartedEvent is broadcast when Pam begins a job.
type PamDispatcher ¶
type PamDispatcher struct {
// contains filtered or unexported fields
}
PamDispatcher serializes Pam's work. Like the entity + playbook synthesizers, only one job runs at a time — otherwise two archivist commits could race on the WikiWorker queue.
func NewPamDispatcher ¶
func NewPamDispatcher(worker pamWiki, publisher pamEventPublisher, cfg PamDispatcherConfig) *PamDispatcher
NewPamDispatcher wires a dispatcher against the given worker. The publisher may be nil in tests that don't care about SSE fan-out. Worker is typed as the narrow `pamWiki` interface — *WikiWorker satisfies it today, and any future test seam or alternative backend only needs Enqueue + ReadArticle.
func (*PamDispatcher) Enqueue ¶
func (d *PamDispatcher) Enqueue(action PamActionID, articlePath, requestBy string) (uint64, error)
Enqueue submits a Pam job. Coalesces per (action, path): repeated clicks while a job is running fold into at most one follow-up. On a coalesce hit the existing job's id is returned — zero is reserved for errors.
func (*PamDispatcher) Start ¶
func (d *PamDispatcher) Start(ctx context.Context)
Start launches the drain goroutine. Idempotent.
func (*PamDispatcher) Stop ¶
func (d *PamDispatcher) Stop()
Stop signals the worker to exit. Pending jobs are dropped.
type PamDispatcherConfig ¶
PamDispatcherConfig tunes the dispatcher. Zero values -> defaults.
type PamJob ¶
type PamJob struct {
Action PamActionID
ArticlePath string
RequestBy string
EnqueuedAt time.Time
ID uint64
}
PamJob is one pending action for a specific article.
type PamRunner ¶
type PamRunner interface {
Run(ctx context.Context, systemPrompt, userPrompt string) (string, error)
}
PamRunner runs a single Pam turn as a sub-process. Implementations decide how to execute that sub-process (e.g. headless one-shot via the configured provider CLI).
type PlaybookExecutionRecordedEvent ¶
type PlaybookExecutionRecordedEvent struct {
// Slug is the playbook slug (team/playbooks/{slug}.md).
Slug string `json:"slug"`
// Path is the jsonl log path that received the append. Callers use it
// to avoid a second HTTP round-trip when they want to refetch.
Path string `json:"path"`
// CommitSHA is the short sha produced by CommitPlaybookExecution.
CommitSHA string `json:"commit_sha"`
// RecordedBy is the author slug the commit is attributed to.
RecordedBy string `json:"recorded_by"`
// Timestamp is the RFC3339 wall-clock time when the commit completed.
Timestamp string `json:"timestamp"`
}
PlaybookExecutionRecordedEvent is the SSE payload broadcast when an execution-log entry lands. The UI subscribes by named event `playbook:execution_recorded` — follow the pattern already established by the entity + notebook event streams.
type PlaybookOutcome ¶
type PlaybookOutcome string
PlaybookOutcome is the narrow set of states a run can end in.
const ( PlaybookOutcomeSuccess PlaybookOutcome = "success" PlaybookOutcomePartial PlaybookOutcome = "partial" PlaybookOutcomeAborted PlaybookOutcome = "aborted" )
func ValidPlaybookOutcomes ¶
func ValidPlaybookOutcomes() []PlaybookOutcome
ValidPlaybookOutcomes is the whitelist used by the validator.
type PlaybookSummary ¶
type PlaybookSummary struct {
Slug string `json:"slug"`
Title string `json:"title"`
SourcePath string `json:"source_path"`
SkillPath string `json:"skill_path"`
SkillExists bool `json:"skill_exists"`
ExecutionLogPath string `json:"execution_log_path"`
ExecutionCount int `json:"execution_count"`
RunnableByAgents []string `json:"runnable_by_agents"`
}
PlaybookSummary is one row returned by GET /playbook/list.
type PlaybookSynthesisJob ¶
type PlaybookSynthesisJob struct {
Slug string
RequestBy string
EnqueuedAt time.Time
// ID is a monotonic counter so callers can correlate responses.
ID uint64
}
PlaybookSynthesisJob is one pending synthesis request for a specific slug.
type PlaybookSynthesizedEvent ¶
type PlaybookSynthesizedEvent struct {
Slug string `json:"slug"`
CommitSHA string `json:"commit_sha"`
ExecutionCount int `json:"execution_count"`
SynthesizedTS string `json:"synthesized_ts"`
SourcePath string `json:"source_path"`
TriggeredByUser bool `json:"triggered_by_user"`
}
PlaybookSynthesizedEvent is the SSE payload broadcast after every successful synthesis commit. Kept distinct from the SKILL.md compile path — the UI cares about the learnings landing, not the recompile.
type PlaybookSynthesizer ¶
type PlaybookSynthesizer struct {
// contains filtered or unexported fields
}
PlaybookSynthesizer is the broker-level synthesis worker for playbook learnings. Single-writer via a drain goroutine so only one archivist commit is in flight at a time across the whole wiki.
func NewPlaybookSynthesizer ¶
func NewPlaybookSynthesizer(worker *WikiWorker, execLog *ExecutionLog, publisher playbookSynthEventPublisher, cfg PlaybookSynthesizerConfig) *PlaybookSynthesizer
NewPlaybookSynthesizer wires a synthesizer against the given worker + execution log. Config may be zero; defaults are filled in here.
func (*PlaybookSynthesizer) EnqueueSynthesis ¶
func (s *PlaybookSynthesizer) EnqueueSynthesis(slug, requestBy string, triggeredByUser bool) (uint64, error)
EnqueueSynthesis adds a synthesis job if none is already in-flight or queued for the same slug. Returns the assigned job ID (or 0 when the request was coalesced).
func (*PlaybookSynthesizer) OnExecutionRecorded ¶
func (s *PlaybookSynthesizer) OnExecutionRecorded(slug string)
OnExecutionRecorded is the hook the broker calls after every append to the execution log. It fetches the current execution count, compares against the last-synthesized count in the playbook frontmatter, and enqueues a synthesis if the delta meets the threshold.
Non-blocking: all work happens inline but returns immediately; errors are logged and swallowed so the caller's /playbook/execution handler stays fast.
func (*PlaybookSynthesizer) Start ¶
func (s *PlaybookSynthesizer) Start(ctx context.Context)
Start launches the synthesis loop. Returns immediately. Stop via ctx or Stop().
func (*PlaybookSynthesizer) Stop ¶
func (s *PlaybookSynthesizer) Stop()
Stop signals the worker to exit. Pending jobs are dropped.
func (*PlaybookSynthesizer) SynthesizeNow ¶
func (s *PlaybookSynthesizer) SynthesizeNow(ctx context.Context, slug, actor string) (uint64, error)
SynthesizeNow runs a synthesis for the given slug synchronously. Used by the POST /playbook/synthesize endpoint and the MCP tool for on-demand refresh. Returns once the commit is in the wiki queue (not when it is written — the caller can read-back to confirm).
func (*PlaybookSynthesizer) Threshold ¶
func (s *PlaybookSynthesizer) Threshold() int
Threshold returns the current synthesis threshold.
type PlaybookSynthesizerConfig ¶
type PlaybookSynthesizerConfig struct {
Threshold int
Timeout time.Duration
// LLMCall is the pluggable shell-out used by tests. Production leaves
// this nil and the worker falls back to defaultLLMCall from
// entity_synthesizer.go (provider.RunConfiguredOneShot).
LLMCall func(ctx context.Context, systemPrompt, userPrompt string) (string, error)
// ClusterSource enables the Slice 2 Thread C v2 prompt. When non-nil,
// synthesize() queries this store for reinforced (predicate, object)
// pairs shared across ≥ ClusterMinEntities distinct entities and feeds
// them into prompts/synthesis_playbook_v2.tmpl. When nil, the v1 prompt
// builder runs unchanged — additive rollout, no hot-path regression.
ClusterSource FactStore
// ClusterMinEntities overrides DefaultClusterMinEntities. Values ≤ 0
// use the default. Small-workspace operators can drop this to 2 to
// surface patterns earlier; the default of 3 is deliberately
// conservative per WIKI-SLICE2-PLAN.md Thread C.
ClusterMinEntities int
}
PlaybookSynthesizerConfig tunes the worker. Zero values -> defaults.
type Promotion ¶
type Promotion struct {
ID string `json:"id"`
State PromotionState `json:"state"`
SourceSlug string `json:"source_slug"`
SourcePath string `json:"source_path"`
TargetPath string `json:"target_path"`
Rationale string `json:"rationale"`
ReviewerSlug string `json:"reviewer_slug"`
HumanOnly bool `json:"human_only"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
ExpiresAt time.Time `json:"expires_at"`
ApprovedAt *time.Time `json:"approved_at,omitempty"`
CommitSHA string `json:"commit_sha,omitempty"`
Comments []Comment `json:"comments"`
StateHistory []StateTransition `json:"state_history"`
}
Promotion is the in-memory canonical view of a single promotion. It is rebuilt by replaying the JSONL log; no line of the log carries the whole Promotion on its own once comments or further transitions attach to it.
type PromotionDemandEvent ¶ added in v0.138.0
type PromotionDemandEvent struct {
EntryPath string `json:"entry_path"`
OwnerSlug string `json:"owner_slug"`
SearcherSlug string `json:"searcher_slug"`
Signal PromotionDemandSignal `json:"signal"`
RecordedAt time.Time `json:"recorded_at"`
}
PromotionDemandEvent is the JSONL record persisted to <wiki_root>/.promotion-demand/events.jsonl. Field tags are stable wire format; renaming any field is a breaking change.
type PromotionDemandSignal ¶ added in v0.138.0
type PromotionDemandSignal int
PromotionDemandSignal enumerates the wire-format demand signal kinds. New signal types added by future PRs MUST extend this enum and the corresponding signalWeight() mapping; do not invent ad-hoc weights at call sites.
const ( // DemandSignalCrossAgentSearch fires when one agent searches another's // notebook shelf and gets a hit. PR 3 wiring. DemandSignalCrossAgentSearch PromotionDemandSignal = iota // DemandSignalChannelContextAsk fires when a channel context-ask // classifier matches and a notebook search returns hits. PR 5 wiring. DemandSignalChannelContextAsk // DemandSignalCEOReviewFlag fires when the CEO explicitly flags an // entry via team_notebook_review. PR 4 wiring. DemandSignalCEOReviewFlag // DemandSignalRejectionCooldown applies a negative weight when a prior // promotion attempt was rejected, suppressing re-escalation for the // 7-day window default. DemandSignalRejectionCooldown )
type PromotionState ¶
type PromotionState string
PromotionState is the wire-format state enum. Values are dash-case to match the frontend's ReviewState type in web/src/api/notebook.ts.
const ( PromotionPending PromotionState = "pending" PromotionInReview PromotionState = "in-review" PromotionChangesRequested PromotionState = "changes-requested" PromotionApproved PromotionState = "approved" PromotionRejected PromotionState = "rejected" PromotionExpired PromotionState = "expired" PromotionArchived PromotionState = "archived" )
type PromotionSweep ¶ added in v0.141.0
type PromotionSweep struct {
// contains filtered or unexported fields
}
PromotionSweep is the periodic broker-driven safety net that drains the demand index into the review log. Lifecycle mirrors AutoNotebookWriter: NewPromotionSweep → Start(ctx) → Stop(timeout).
Safe for concurrent Counters() callers. tick() is single-threaded (only run() invokes it) so internal state — last-commit baselines, budget tracking — does not require its own lock beyond progressMu.
func NewPromotionSweep ¶ added in v0.141.0
func NewPromotionSweep(escalator promotionEscalator, counter notebookCounter, cfg PromotionSweepConfig) *PromotionSweep
NewPromotionSweep constructs an idle sweep. Either argument may be nil for tests; a nil escalator turns tick() into a metrics-only pass, a nil counter disables the content-volume gate (every tick runs).
func (*PromotionSweep) Counters ¶ added in v0.141.0
func (s *PromotionSweep) Counters() PromotionSweepCounters
Counters returns a point-in-time snapshot of sweep observability state.
func (*PromotionSweep) Start ¶ added in v0.141.0
func (s *PromotionSweep) Start(ctx context.Context)
Start launches the sweep goroutine. Idempotent: a second call is a no-op. The goroutine exits when ctx is cancelled or Stop is called.
func (*PromotionSweep) Stop ¶ added in v0.141.0
func (s *PromotionSweep) Stop(timeout time.Duration)
Stop signals the goroutine to exit and waits up to timeout for it to finish. Idempotent.
type PromotionSweepConfig ¶ added in v0.141.0
type PromotionSweepConfig struct {
// Interval is the base cadence between sweep ticks.
Interval time.Duration
// DailyTokenBudget is the per-office cap on LLM tokens consumed by
// the (optional) LLM extraction pass. PR 6 reads but never spends
// from this budget; the LLM hook lands behind LLMEnabled.
DailyTokenBudget int
// LLMEnabled toggles the LLM extraction pass. Default false.
LLMEnabled bool
}
PromotionSweepConfig holds the runtime knobs for the sweep. All zero values are replaced by defaults in NewPromotionSweep.
type PromotionSweepCounters ¶ added in v0.141.0
type PromotionSweepCounters struct {
Sweeps int64
Skipped int64
Failed int64
BudgetExhausted int64
NearThreshold int
BaseCadenceSec int64
CurrentCadenceSec int64
}
PromotionSweepCounters is the observability snapshot for the sweep.
type ProposedEntity ¶
type ProposedEntity struct {
Kind EntityKind
ProposedSlug string
// ExistingSlug is non-empty when the LLM claimed a match against a known
// slug. The resolver verifies the claim; if the slug does not exist in the
// index, it logs a warning and falls through to signal-based resolution.
ExistingSlug string
Signals Signals
Aliases []string
Confidence float64
// Ghost marks entities the extractor explicitly called out as "no strong
// identity signal" — create a new record without attempting fuzzy matching.
Ghost bool
}
ProposedEntity is the resolver's input: everything the LLM extraction pipeline produced for one entity mention.
type QueryAnswer ¶
type QueryAnswer struct {
QueryClass QueryClass `json:"query_class"`
AnswerMarkdown string `json:"answer_markdown"`
SourcesCited []int `json:"sources_cited"`
Sources []QuerySource `json:"sources"`
Confidence float64 `json:"confidence"`
Coverage string `json:"coverage"` // complete | partial | none
Notes string `json:"notes,omitempty"`
LatencyMs int64 `json:"latency_ms"`
}
QueryAnswer is the structured response returned to the caller.
type QueryClass ¶
type QueryClass string
QueryClass is the classifier output label.
const ( QueryClassStatus QueryClass = "status" QueryClassRelationship QueryClass = "relationship" QueryClassMultiHop QueryClass = "multi_hop" QueryClassCounterfactual QueryClass = "counterfactual" QueryClassGeneral QueryClass = "general" )
func ClassifyQuery ¶
func ClassifyQuery(q string) (QueryClass, float64)
ClassifyQuery returns a QueryClass + confidence for the given query string.
Confidence is expressed as a float64 in [0.0, 1.0]:
- 1.0 strong heuristic signal (wikilink, clear counterfactual phrase)
- 0.8+ medium signal (role pattern, who-at construction)
- 0.5 default (entity-adjacent but ambiguous)
type QueryHandler ¶
type QueryHandler struct {
// contains filtered or unexported fields
}
QueryHandler orchestrates the full cited-answer loop.
func NewQueryHandler ¶
func NewQueryHandler(idx *WikiIndex, p QueryProvider) *QueryHandler
NewQueryHandler constructs a QueryHandler backed by the given index and provider. The embedded prompt template is parsed at construction time so errors surface immediately rather than at query time.
func (*QueryHandler) Answer ¶
func (h *QueryHandler) Answer(ctx context.Context, req QueryRequest) (QueryAnswer, error)
Answer runs the full query pipeline and returns a QueryAnswer.
It never returns a non-nil error for recoverable conditions (timeout, parse failure, out-of-scope query); those are signaled via QueryAnswer.Coverage and QueryAnswer.AnswerMarkdown. A non-nil error is returned only when the index itself is unavailable.
type QueryProvider ¶
type QueryProvider interface {
RunPrompt(ctx context.Context, systemPrompt, userPrompt string) (string, error)
}
QueryProvider is the narrow interface the query handler uses to invoke an LLM. Tests substitute a fake to avoid any real network call.
type QueryRequest ¶
type QueryRequest struct {
Query string // natural-language question
RequestedBy string // slug of agent or human asking
TopK int // default 20 if zero
Timeout time.Duration // default 10s if zero
}
QueryRequest carries all inputs to QueryHandler.Answer.
type QuerySource ¶
type QuerySource struct {
Kind string `json:"kind"`
SlugOrID string `json:"slug_or_id"`
Title string `json:"title"`
Excerpt string `json:"excerpt"`
ValidFrom string `json:"valid_from,omitempty"`
ValidUntil string `json:"valid_until,omitempty"`
Staleness float64 `json:"staleness"`
SourcePath string `json:"source_path,omitempty"`
}
QuerySource is one entry in the sources list passed to the LLM and returned in QueryAnswer.Sources. Field names mirror the template variables.
type ReadEvent ¶ added in v0.98.0
type ReadEvent struct {
Path string `json:"path"`
Timestamp time.Time `json:"ts"`
// Reader is "web" for human browser access or an agent slug (e.g.
// "slack-agent"). Empty string is never written — Append is a no-op
// when reader is empty.
Reader string `json:"reader"`
IsAgent bool `json:"is_agent"` // true when Reader != "web"
}
ReadEvent is one access record written to reads.jsonl.
type ReadLog ¶ added in v0.98.0
type ReadLog struct {
// contains filtered or unexported fields
}
ReadLog appends access events to reads.jsonl and answers stats queries. It is safe to share across goroutines.
func NewReadLog ¶ added in v0.98.0
NewReadLog constructs a ReadLog whose backing file sits at <wikiRoot>/.reads/reads.jsonl.
func (*ReadLog) AllStats ¶ added in v0.98.0
AllStats returns access statistics for every tracked path in a single scan of reads.jsonl. Use this when you need stats for multiple articles (e.g. BuildCatalog sort=last_read) to avoid O(n*m) scans.
type ReadStats ¶ added in v0.98.0
type ReadStats struct {
LastRead *time.Time // nil if the article has never been accessed
HumanReadCount int // reads where IsAgent == false
AgentReadCount int // reads where IsAgent == true
DaysUnread int // whole days since LastRead; 0 if accessed today or never
}
ReadStats summarises access history for one article path.
type Redirect ¶
type Redirect struct {
From string `json:"from"`
To string `json:"to"`
MergedAt time.Time `json:"merged_at"`
MergedBy string `json:"merged_by"`
CommitSHA string `json:"commit_sha"`
}
Redirect maps a younger slug to its survivor (§7.2).
type Repo ¶
type Repo struct {
// contains filtered or unexported fields
}
Repo represents the wiki git repository living at ~/.wuphf/wiki/.
func (*Repo) AppendFactLog ¶
func (r *Repo) AppendFactLog(ctx context.Context, slug, relPath, additionalContent, message string) (string, int, error)
AppendFactLog appends additionalContent to the fact-log file at relPath and commits the resulting bytes. The file is created if it does not exist. `additionalContent` must be the raw bytes to append — the caller is responsible for newline-terminating each JSONL record. A trailing newline is added if missing so the final file always ends with "\n".
Uses the repo-wide write lock so concurrent appenders are serialised; the WikiWorker single-writer invariant (§11.5, Anti-pattern 5) routes every caller through this path.
Implementation: O_APPEND on a per-open fd. Cheaper than the earlier read-modify-write for prolific entities whose JSONL files can grow past a few MB — each append is O(bytesWritten) instead of O(filesize). The repo-wide mutex still guarantees exclusivity for the non-atomic "fstat + write" sequence we need to keep the trailing-newline invariant.
The accepted relPath shape matches Repo.CommitFactLog: wiki/facts/**/*.jsonl or team/entities/*.facts.jsonl.
func (*Repo) ApplyPromotion ¶
func (r *Repo) ApplyPromotion(ctx context.Context, p *Promotion, approverSlug string) (string, error)
ApplyPromotion executes the atomic promote commit. Returns the short commit SHA of the primary promotion commit.
The approverSlug is the git author slug that the commit is recorded under — typically the reviewer's slug, or "human" when a human clicked Approve in the web UI.
func (*Repo) AuditLog ¶
AuditLog returns every commit in the wiki repo, most-recent first. This is the cross-article audit trail — per-article history lives in Log(). Two intentional design choices:
- We always include the full author slug, timestamp, and file list so downstream audit tooling (CSV export, compliance review, SOC2 artefact generation, etc.) can work without re-shelling to git.
- Bootstrap (`wuphf-bootstrap`), recovery (`wuphf-recovery`), and system (`system`) authors are surfaced alongside agent slugs. Audit tools can filter them out by author, but the default feed is the complete lineage — hiding bootstrap would create a false impression that articles "appeared" at first-agent-write time.
limit <= 0 returns everything. since.IsZero() returns everything regardless of age; otherwise only commits strictly newer than `since` are returned.
func (*Repo) BackupMirror ¶
BackupMirror copies the wiki repo to ~/.wuphf/wiki.bak/ skipping git object packs for speed. The worker calls this asynchronously and debounced.
func (*Repo) BackupRoot ¶
BackupRoot returns the wiki backup mirror path.
func (*Repo) BuildArticle ¶
func (r *Repo) BuildArticle(ctx context.Context, relPath, reader string, readLog *ReadLog) (ArticleMeta, error)
BuildArticle reads an article and computes its metadata + backlinks. Returns os.ErrNotExist wrapped if the article is missing.
reader is "web" for a human browser request, an agent slug (e.g. "slack-agent") for MCP tool access, or "" to suppress read tracking. readLog may be nil, in which case tracking is skipped regardless of reader.
func (*Repo) BuildCatalog ¶
func (r *Repo) BuildCatalog(ctx context.Context, sortBy string, readLog *ReadLog, includeArchived bool) ([]CatalogEntry, error)
BuildCatalog walks team/ and returns every .md article with title + author + last-edit metadata grouped by top-level thematic dir. Git metadata is read in one batch so catalog latency scales with repo history, not article count.
When sort is "last_read", entries are sorted by last access time ascending (oldest-accessed first; never-accessed articles appear first). readLog may be nil, in which case read stats are all zero and sort falls back to path order.
Archived tombstones (frontmatter archived: true) are excluded by default. Pass includeArchived=true to include them (for admin/recovery views).
Shape matches web/src/api/wiki.ts WikiCatalogEntry.
func (*Repo) Commit ¶
func (r *Repo) Commit(ctx context.Context, slug, relPath, content, mode, message string) (string, int, error)
Commit writes content for slug @ path, stages, and commits with a per-commit git identity that never touches the user's global git config. Returns the short commit SHA and the number of bytes written.
mode must be one of: "create", "replace", "append_section". "create" fails if the file exists; "replace" overwrites wholesale; "append_section" appends two newlines + content to an existing file (or creates it fresh).
func (*Repo) CommitArchive ¶ added in v0.100.0
func (r *Repo) CommitArchive(ctx context.Context, relPath, tombstone, archivePath, archiveContent, message string) (string, error)
CommitArchive writes a tombstone at relPath and the full original content at archivePath (.archive/<relPath>), then commits both files in a single git commit under the archivist identity. Used by WikiArchiver.Sweep.
relPath must be a valid team/ article path and archivePath must live under .archive/. Both are validated before any file is written.
Returns the commit SHA. Returns ("", nil) when there was nothing to commit (both files were already identical to what's on disk).
func (*Repo) CommitArtifact ¶
func (r *Repo) CommitArtifact(ctx context.Context, slug, relPath, content, message string) (string, int, error)
CommitArtifact writes the raw artifact body to wiki/artifacts/{kind}/{sha}.md and commits it under the supplied author slug. Follows the same shape as CommitEntityFact / CommitLintReport (no index/all.md regen).
func (*Repo) CommitBootstrap ¶
CommitBootstrap stages every untracked / modified path under team/ and commits the whole pile as author `wuphf-bootstrap`. It is idempotent: if nothing is dirty, it returns ("", nil) without creating an empty commit.
This is the handshake between the blueprint materializer (which only writes files to disk) and git. Without it, the freshly-seeded skeletons are untracked — on a later crash they get folded into a `wuphf-recovery` commit, which is misleading in an audit view. With it, the first commit for every skeleton article is attributable to the bootstrap step, not to some later recovery pass or to an agent that happened to edit the file.
The author slug `wuphf-bootstrap` is deliberate: it is visually distinct from both the per-agent slugs (operator/planner/…) and the two reserved system slugs (`system`, `wuphf-recovery`). Audit views can filter or colour it differently from real human / agent edits.
func (*Repo) CommitEntityFact ¶
func (r *Repo) CommitEntityFact(ctx context.Context, slug, relPath, content, message string) (string, int, error)
CommitEntityFact writes the given content to relPath inside the wiki repo and commits it under the supplied slug. Always uses "replace" semantics — the caller owns the merge (the fact log appends in memory and submits the full file bytes).
func (*Repo) CommitEntityGraph ¶
func (r *Repo) CommitEntityGraph(ctx context.Context, slug, content, message string) (string, int, error)
CommitEntityGraph writes the full graph log to team/entities/.graph.jsonl and commits under the supplied author slug. Always replace-mode — the EntityGraph builder in entity_graph.go merges existing bytes with the new edges before calling this.
func (*Repo) CommitFactLog ¶
func (r *Repo) CommitFactLog(ctx context.Context, slug, relPath, content, message string) (string, int, error)
CommitFactLog writes the given content to relPath inside wiki/facts/ and commits it under the supplied slug. Used by ResolveContradiction to mutate fact records that live in the new-schema fact log location.
func (*Repo) CommitGhostBrief ¶
func (r *Repo) CommitGhostBrief(ctx context.Context, kind, slug, content, message string) (string, int, error)
CommitGhostBrief writes a minimal brief for a freshly-minted entity to team/{kind}/{slug}.md and commits it on the wiki branch. Idempotent: if the file already exists with byte-identical content, returns the current HEAD SHA without a new commit.
kind must be one of "people", "companies", "customers". slug must match [a-z0-9][a-z0-9-]*. Returns (commitSHA, bytesWritten, err).
Closes the §7.4 substrate-rebuild gap: every ghost-entity row the extractor mints in the in-memory index also lands as markdown on disk so a wipe + ReconcileFromMarkdown rebuilds to a logically-identical state. Mirrors the locking + git-add/diff/commit shape of CommitEntityFact and CommitLintReport so the worker mutex serialises every writer and re-extracting the same artifact is a no-op.
func (*Repo) CommitHuman ¶
func (r *Repo) CommitHuman(ctx context.Context, relPath, content, expectedSHA, message string, identity HumanIdentity) (string, int, error)
CommitHuman writes content to relPath with the caller-supplied human identity, enforcing an expected-SHA pre-check. When identity has its zero value, the fallback `human <human@wuphf.local>` identity is used so the legacy single-user attribution path still works.
Returns the new short SHA, bytes written, and an error; ErrWikiSHAMismatch means the caller should re-load and re-apply their edits. Mode is inferred from mustExist: a fresh article (expectedSHA == "") uses "create", an edit uses "replace".
Mirrors Repo.Commit in all other respects: same validateArticlePath guard, same working-tree atomicity, same regenerateIndexLocked pass so the index lands in the same commit as the article edit.
func (*Repo) CommitLintReport ¶
func (r *Repo) CommitLintReport(ctx context.Context, slug, relPath, content, message string) (string, int, error)
CommitLintReport writes the daily lint report markdown to wiki/.lint/ and commits it under the supplied slug (typically ArchivistAuthor).
func (*Repo) CommitNotebook ¶
func (r *Repo) CommitNotebook(ctx context.Context, slug, relPath, content, mode, message string) (string, int, error)
CommitNotebook writes + commits a notebook entry. Exposed for the worker.
func (*Repo) CommitPlaybookExecution ¶
func (r *Repo) CommitPlaybookExecution(ctx context.Context, slug, relPath, content, message string) (string, int, error)
CommitPlaybookExecution appends-in-full to the jsonl execution log. Same "replace-with-merged-bytes" pattern as entity facts.
func (*Repo) CommitPlaybookSkill ¶
func (r *Repo) CommitPlaybookSkill(ctx context.Context, slug, relPath, content, message string) (string, int, error)
CommitPlaybookSkill writes content to the canonical compiled-skill path and commits it. Does NOT regen index/all.md — the compiled subdirectory is hidden from the catalog on purpose.
func (*Repo) CommitScanStaged ¶
CommitScanStaged stages every untracked/modified path under team/ and commits the whole pile as author `scanner`. It is idempotent: if nothing is dirty, returns ("", nil) without creating an empty commit.
This mirrors CommitBootstrap's contract but with the scanner identity so audit tools can distinguish scanner-ingested content from manually authored articles and from skeleton bootstrap commits.
func (*Repo) CommitTeamLearnings ¶ added in v0.97.0
func (r *Repo) CommitTeamLearnings(ctx context.Context, slug, relPath, jsonlContent, markdownContent, message string) (string, int, error)
CommitTeamLearnings writes the merged JSONL log and generated markdown page in one commit. The normal Repo.Commit path rejects .jsonl, so learnings use this narrow path while still regenerating the wiki catalog for index.md.
func (*Repo) EnsureNotebookDirs ¶ added in v0.105.1
EnsureNotebookDirs materializes agents/{slug}/notebook/.gitkeep for every valid slug and commits the shelves as bootstrap metadata. It is idempotent and intentionally writes no notebook entries.
func (*Repo) HeadSHA ¶
HeadSHA returns the short HEAD commit hash. Returns empty string if the repo has no commits yet.
func (*Repo) IndexAllPath ¶
IndexAllPath returns the path to the auto-regenerated catalog.
func (*Repo) IndexRegen ¶
IndexRegen walks team/ and rewrites index/all.md with one entry per article. Entries are sorted by directory then by modification time (newest first within a directory).
func (*Repo) Init ¶
Init ensures the wiki repo exists at r.root with a valid .git directory. If git is missing, returns ErrGitUnavailable. Idempotent.
func (*Repo) RecoverDirtyTree ¶
RecoverDirtyTree detects uncommitted changes on startup and auto-commits them as `wuphf-recovery` so no user data is discarded.
func (*Repo) RestoreFromBackup ¶
RestoreFromBackup swaps the corrupt repo for the backup mirror. Returns ErrBackupMissing if the mirror does not exist.
type ResolvedEntity ¶
type ResolvedEntity struct {
// Slug is the final canonical slug to use.
Slug string
Kind EntityKind
// Matched is true when the resolver found an existing entity.
Matched bool
// MatchReason documents which signal produced the match. One of:
// "existing_slug_honored" | "email_match" | "fuzzy_name" | "new_entity" | "ambiguous"
MatchReason string
// GhostEntity reflects whether a fresh record was created.
GhostEntity bool
}
ResolvedEntity is the resolver's output.
func ResolveEntity ¶
func ResolveEntity(ctx context.Context, idx SignalIndex, p ProposedEntity) (ResolvedEntity, error)
ResolveEntity applies the slug-invention guard and returns the canonical ResolvedEntity. It never mutates the index — callers are responsible for creating new records when Matched is false and MatchReason is "new_entity".
Algorithm (in priority order):
- If p.ExistingSlug is set, validate it against the index. Honoured → matched; missing → warning + fall through.
- If p.Signals.Email is set, look up by email.
- If p.Signals.PersonName is set, fuzzy name match (JaroWinkler ≥ 0.9). Exactly one high-confidence match → matched. Multiple matches → Matched=false, MatchReason="ambiguous".
- Otherwise create a new entity with collision-safe slug.
type ReviewLog ¶
type ReviewLog struct {
// contains filtered or unexported fields
}
ReviewLog owns the on-disk JSONL + the in-memory cache of promotions. All mutations serialize on `mu`; the lock also guards file append so concurrent writers never produce interleaved lines.
func NewReviewLog ¶
func NewReviewLog(path string, resolver ReviewerResolver, clock func() time.Time) (*ReviewLog, error)
NewReviewLog opens (or creates) the JSONL at path, replays existing records into the in-memory cache, and returns a ready ReviewLog.
func (*ReviewLog) AddComment ¶
AddComment appends a comment to the thread without changing state.
func (*ReviewLog) AdvanceToInReview ¶
func (l *ReviewLog) AdvanceToInReview(promotionID, actorSlug string) (*Promotion, StateTransition, error)
AdvanceToInReview marks a pending promotion as picked up by the reviewer. Idempotent: if the promotion is already in-review (or past it), no-op.
func (*ReviewLog) Approve ¶
func (l *ReviewLog) Approve(promotionID, actorSlug, rationale, commitSHA string) (*Promotion, StateTransition, error)
Approve transitions in-review → approved. The caller is the one that already executed the atomic promote commit — we record the commit SHA on the promotion. Humans bypass the reviewer check by passing an empty actorSlug; agents must match Promotion.ReviewerSlug exactly.
func (*ReviewLog) CanApprove ¶
CanApprove returns nil if actorSlug is authorized to approve the promotion, or the same error Approve would return. Runs the reviewer-validation check WITHOUT mutating state so callers can guard expensive side-effects (like the atomic wiki commit in Repo.ApplyPromotion) on authorization first.
Closes a TOCTOU gap: reviewApprove used to call ApplyPromotion before Approve validated the actor, so a wrong-slug POST would land a wiki commit then fail the state transition. CanApprove runs under ReviewLog.mu so the check is consistent with the state Approve will see when called next.
func (*ReviewLog) List ¶
List returns non-archived promotions (scope="all") or reviews assigned to a specific reviewer slug. Results are sorted most-recently-updated first.
func (*ReviewLog) Reject ¶
func (l *ReviewLog) Reject(promotionID, actorSlug string) (*Promotion, StateTransition, error)
Reject is the author-side withdrawal. Transitions to terminal `rejected`.
func (*ReviewLog) RequestChanges ¶
func (l *ReviewLog) RequestChanges(promotionID, actorSlug, rationale string) (*Promotion, StateTransition, error)
RequestChanges transitions in-review → changes-requested.
func (*ReviewLog) Resubmit ¶
func (l *ReviewLog) Resubmit(promotionID, actorSlug string) (*Promotion, StateTransition, error)
Resubmit transitions changes-requested → in-review. Only the author may resubmit.
func (*ReviewLog) SubmitPromotion ¶
func (l *ReviewLog) SubmitPromotion(req SubmitPromotionRequest) (*Promotion, error)
SubmitPromotion creates a new promotion in state=pending. The reviewer is resolved from the target path via the injected resolver (human-only and ceo-fallback sentinels are honored as-is).
func (*ReviewLog) TickExpiry ¶
func (l *ReviewLog) TickExpiry(now time.Time) []StateTransition
TickExpiry is called periodically (broker goroutine, every 10 min) to advance stale promotions to expired/archived. Returns the list of transitions so callers can fan out SSE events.
type ReviewStateChangeEvent ¶
type ReviewStateChangeEvent struct {
ID string `json:"id"`
OldState PromotionState `json:"old_state"`
NewState PromotionState `json:"new_state"`
ActorSlug string `json:"actor_slug"`
Timestamp string `json:"timestamp"`
}
ReviewStateChangeEvent is the SSE payload for every promotion transition. Kept narrow on purpose — the frontend re-fetches the full review to get comment threads and such.
type ReviewerGrade ¶ added in v0.193.0
type ReviewerGrade struct {
ReviewerSlug string `json:"reviewerSlug"`
Severity Severity `json:"severity"`
Suggestion string `json:"suggestion"`
Reasoning string `json:"reasoning"`
FilePath string `json:"filePath"`
Line int `json:"line"`
SubmittedAt time.Time `json:"submittedAt"`
}
ReviewerGrade is one reviewer agent's CodeRabbit-style structured grade. Severity is typed (compile-time) so a typo by the agent produces a build-time failure rather than a silently mis-tier'd grade. FilePath and Line are optional (for grades targeting overall behaviour rather than a specific code site). SubmittedAt is when the broker recorded the grade, not when the agent's process started.
SubmittedAt is `time.Time` (matches the design doc and Lane E's stub). Lane D's pre-integration stub used `string` (RFC3339) for the same field; integration drops Lane D's stub in favour of this definition, and Lane D's timeout filler is rewritten to populate time.Time. The constant values for Severity match Lane D's stub exactly (critical/major/minor/nitpick/skipped) so the timeout fill behaviour is preserved without code changes inside Lane D.
type ReviewerResolver ¶
ReviewerResolver is the injected function that maps a target wiki path to a reviewer slug (or ReviewerHumanOnly / ReviewerFallback). Production wiring passes (*operations.Blueprint).ResolveReviewer.
type ReviewerRoutingSignals ¶ added in v0.193.0
type ReviewerRoutingSignals struct {
Files []string // diff paths from `git diff --name-only`
WikiPaths []string // subset of Files under the wiki root
ToolNames []string // unique tool names observed across the task's manifest events
TaskTags []string // verbatim copy of teamTask.Tags
}
ReviewerRoutingSignals is the deterministic snapshot of a task's state that the routing logic intersects against each agent's Watching set. Built by extractRoutingSignalsLocked from on-disk diff output, agent-stream manifest events, and the task's own Tags field.
Kept as an explicit struct (rather than passing four slices) so the test surface can construct a synthetic signal set without spinning up a worktree or stream buffer.
type ReviewerSummary ¶ added in v0.193.0
ReviewerSummary captures the convergence progress for a task's reviewer set. Graded counts only reviewer slugs who emitted a grade with a typed severity; Total is len(task.Reviewers). When Lane D has not yet populated Reviewers (or no reviewers were assigned), both are zero.
type RouteContract ¶ added in v0.105.2
type RouteContract struct {
Domain string
Capability string
Path string
Method string
Auth string
RequestType string
ResponseType string
}
RouteContract describes the stable HTTP contract for one broker route.
func BrokerRouteContracts ¶ added in v0.105.2
func BrokerRouteContracts() []RouteContract
BrokerRouteContracts returns the contract registry for routes that have been moved under explicit domain registration.
type RuntimeArtifact ¶
type RuntimeArtifact struct {
ID string
Kind RuntimeArtifactKind
Title string
Summary string
State string
Progress string
Owner string
Channel string
RelatedID string
StartedAt string
UpdatedAt string
Path string
Worktree string
PartialOutput string
ResumeHint string
ReviewHint string
Blocking bool
}
func (RuntimeArtifact) EffectiveProgress ¶
func (a RuntimeArtifact) EffectiveProgress() string
func (RuntimeArtifact) EffectiveSummary ¶
func (a RuntimeArtifact) EffectiveSummary() string
func (RuntimeArtifact) EffectiveTitle ¶
func (a RuntimeArtifact) EffectiveTitle() string
type RuntimeArtifactKind ¶
type RuntimeArtifactKind string
const ( RuntimeArtifactTask RuntimeArtifactKind = "task" RuntimeArtifactTaskLog RuntimeArtifactKind = "task_log" RuntimeArtifactWorkflowRun RuntimeArtifactKind = "workflow_run" RuntimeArtifactRequest RuntimeArtifactKind = "request" RuntimeArtifactHumanAction RuntimeArtifactKind = "human_action" RuntimeArtifactExternalAction RuntimeArtifactKind = "external_action" )
type RuntimeCapabilities ¶
type RuntimeCapabilities struct {
Tmux TmuxCapability
Codex CapabilityStatus
Opencode CapabilityStatus
Items []CapabilityStatus
Registry CapabilityRegistry
}
func DetectRuntimeCapabilities ¶
func DetectRuntimeCapabilities() RuntimeCapabilities
func DetectRuntimeCapabilitiesWithOptions ¶
func DetectRuntimeCapabilitiesWithOptions(opts CapabilityProbeOptions) RuntimeCapabilities
func (RuntimeCapabilities) Counts ¶
func (c RuntimeCapabilities) Counts() (ready, warn, info int)
type RuntimeMessage ¶
type RuntimeRequest ¶
type RuntimeSnapshot ¶
type RuntimeSnapshot struct {
Channel string
SessionMode string
DirectAgent string
GeneratedAt time.Time
Tasks []RuntimeTask
Requests []RuntimeRequest
Recent []RuntimeMessage
Artifacts []RuntimeArtifact
Capabilities RuntimeCapabilities
Registry CapabilityRegistry
Memory SessionMemorySnapshot
Recovery SessionRecovery
}
func BuildRuntimeSnapshot ¶
func BuildRuntimeSnapshot(input RuntimeSnapshotInput) RuntimeSnapshot
func (RuntimeSnapshot) FormatText ¶
func (s RuntimeSnapshot) FormatText() string
type RuntimeSnapshotInput ¶
type RuntimeSnapshotInput struct {
Channel string
SessionMode string
DirectAgent string
Tasks []RuntimeTask
Requests []RuntimeRequest
Recent []RuntimeMessage
Artifacts []RuntimeArtifact
Capabilities RuntimeCapabilities
Registry CapabilityRegistry
Now time.Time
}
type RuntimeTask ¶
type SQLiteFactStore ¶
type SQLiteFactStore struct {
// contains filtered or unexported fields
}
SQLiteFactStore implements FactStore via modernc.org/sqlite.
func NewSQLiteFactStore ¶
func NewSQLiteFactStore(path string) (*SQLiteFactStore, error)
NewSQLiteFactStore opens (or creates) the SQLite database at path and applies the schema. The caller must call Close() when done.
func (*SQLiteFactStore) CanonicalHashAll ¶
func (s *SQLiteFactStore) CanonicalHashAll(ctx context.Context) (string, error)
CanonicalHashAll extends §7.4 to cover facts + entities + edges + redirects. The per-table serialisation matches the in-memory implementation so contract tests pass against both backends from the same markdown corpus.
func (*SQLiteFactStore) CanonicalHashFacts ¶
func (s *SQLiteFactStore) CanonicalHashFacts(ctx context.Context) (string, error)
CanonicalHashFacts implements §7.4: sha256 over all fact rows sorted by ID. Serialisation is identical to inMemoryFactStore so the contract test passes against both backends from the same markdown corpus. ReinforcedAt is EXCLUDED from the hash input so two extraction runs on the same artifact (the second one purely bumps reinforced_at) produce identical hashes. End-to-end drift including reinforcement lives in CanonicalHashAll.
func (*SQLiteFactStore) Close ¶
func (s *SQLiteFactStore) Close() error
func (*SQLiteFactStore) CountFacts ¶
func (s *SQLiteFactStore) CountFacts(ctx context.Context) (int, error)
CountFacts returns the total number of rows in the facts table. Cheap — COUNT(*) hits the primary index on id. Used by cross-entity consumers to pre-check corpus size before triggering a full scan via ListAllFacts.
func (*SQLiteFactStore) IterateEntities ¶
func (s *SQLiteFactStore) IterateEntities(ctx context.Context, fn func(IndexEntity) error) error
IterateEntities streams every entity row through fn. Rows close on return (defer Close + final rows.Err() check). The query uses ORDER BY entities.slug so iteration is stable across calls.
func (*SQLiteFactStore) ListAllFacts ¶
func (s *SQLiteFactStore) ListAllFacts(ctx context.Context) ([]TypedFact, error)
ListAllFacts returns every fact in the store sorted by ID. Matches inMemoryFactStore ordering so cross-entity consumers (playbook clustering) see deterministic results regardless of backend.
func (*SQLiteFactStore) ListAllFactsPaged ¶
func (s *SQLiteFactStore) ListAllFactsPaged(ctx context.Context, afterID string, limit int) ([]TypedFact, error)
ListAllFactsPaged returns up to `limit` facts with id > afterID, sorted by id ASC. Empty afterID starts at the beginning (the empty string sorts before every non-empty id). limit <= 0 falls back to the 1000-row default.
Keyset pagination (id > ?) over the primary key is O(log N) per page, which keeps memory bounded regardless of total fact count.
func (*SQLiteFactStore) ListEdgesForEntity ¶
func (*SQLiteFactStore) ListFactsByPredicateObject ¶
func (s *SQLiteFactStore) ListFactsByPredicateObject(ctx context.Context, predicate, object string) ([]TypedFact, error)
ListFactsByPredicateObject returns every fact whose triplet predicate+object match exactly. Backed by idx_facts_triplet_pred_obj.
func (*SQLiteFactStore) ListFactsByTriplet ¶
func (s *SQLiteFactStore) ListFactsByTriplet(ctx context.Context, subject, predicate, objectPrefix string) ([]TypedFact, error)
ListFactsByTriplet returns every fact whose triplet matches (subject, predicate) and whose triplet.object starts with objectPrefix. Empty objectPrefix matches any object. Backed by idx_facts_triplet (subject, predicate).
objectPrefix may contain any bytes; SQL LIKE metacharacters are escaped so a literal "%" or "_" in the prefix matches only itself, not any-char wildcards.
func (*SQLiteFactStore) ListFactsForEntity ¶
func (*SQLiteFactStore) ListReinforcedFactsByPredicate ¶
func (s *SQLiteFactStore) ListReinforcedFactsByPredicate(ctx context.Context, predicate string) ([]TypedFact, error)
ListReinforcedFactsByPredicate returns reinforced facts (reinforced_at IS NOT NULL) optionally narrowed by triplet predicate. Backed by the partial index idx_facts_reinforced (triplet_predicate, id) WHERE reinforced_at IS NOT NULL, so the cost is O(matching rows). Empty predicate returns every reinforced fact in the store. Result is sorted by id ASC for parity with the in-memory backend.
func (*SQLiteFactStore) ResolveRedirect ¶
func (*SQLiteFactStore) UpsertEdge ¶
func (s *SQLiteFactStore) UpsertEdge(ctx context.Context, e IndexEdge) error
func (*SQLiteFactStore) UpsertEntity ¶
func (s *SQLiteFactStore) UpsertEntity(ctx context.Context, e IndexEntity) error
func (*SQLiteFactStore) UpsertFact ¶
func (s *SQLiteFactStore) UpsertFact(ctx context.Context, f TypedFact) error
func (*SQLiteFactStore) UpsertRedirect ¶
func (s *SQLiteFactStore) UpsertRedirect(ctx context.Context, r Redirect) error
type ScanError ¶ added in v0.86.0
ScanError records a single per-article failure during a scan pass. The caller decides whether to surface or aggregate; the scanner never panics.
type ScanResult ¶ added in v0.86.0
type ScanResult struct {
Scanned int `json:"scanned"`
Matched int `json:"matched"`
Proposed int `json:"proposed"`
Deduped int `json:"deduped"`
RejectedByGuard int `json:"rejected_by_guard"`
Errors []ScanError `json:"errors,omitempty"`
DurationMs int64 `json:"duration_ms"`
Trigger string `json:"trigger"`
}
ScanResult is the JSON-serializable summary of a single scan pass. Counts are intentionally additive: callers can sum results across passes for telemetry.
type ScopedMemoryHit ¶
type ScopedMemoryHit struct {
Scope string `json:"scope,omitempty"`
Backend string `json:"backend,omitempty"`
Identifier string `json:"identifier,omitempty"`
Title string `json:"title,omitempty"`
Snippet string `json:"snippet,omitempty"`
OwnerSlug string `json:"owner_slug,omitempty"`
Slug string `json:"slug,omitempty"`
PageID int `json:"page_id,omitempty"`
ChunkID int `json:"chunk_id,omitempty"`
ChunkIndex int `json:"chunk_index,omitempty"`
Source string `json:"source,omitempty"`
Score *float64 `json:"score,omitempty"`
Stale *bool `json:"stale,omitempty"`
}
func QuerySharedMemory ¶
type SearchHit ¶
type SearchHit struct {
FactID string `json:"fact_id"`
Score float64 `json:"score"`
Snippet string `json:"snippet,omitempty"`
Entity string `json:"entity_slug,omitempty"`
}
SearchHit is one result row from the text index.
type SelfHealSignalScanner ¶ added in v0.86.0
type SelfHealSignalScanner struct {
// contains filtered or unexported fields
}
SelfHealSignalScanner scans broker tasks for resolved self-heal incidents and emits one SkillCandidate per incident.
func NewSelfHealSignalScanner ¶ added in v0.86.0
func NewSelfHealSignalScanner(b *Broker) *SelfHealSignalScanner
NewSelfHealSignalScanner constructs a scanner. The first pass surfaces every resolved incident; subsequent passes are incremental from minResolvedAt = time.Now() at the end of the previous pass.
func (*SelfHealSignalScanner) Scan ¶ added in v0.86.0
func (s *SelfHealSignalScanner) Scan(ctx context.Context) ([]SkillCandidate, error)
Scan returns a SkillCandidate for every resolved self-heal incident whose UpdatedAt is strictly greater than s.minResolvedAt. After a successful pass, minResolvedAt is advanced to time.Now() so subsequent passes are incremental. Returns up to maxCandidatesPerPass candidates ordered by UpdatedAt desc.
type SessionMemoryActionSummary ¶
type SessionMemoryActionSummary struct {
ID string `json:"id"`
Kind string `json:"kind,omitempty"`
Source string `json:"source,omitempty"`
Channel string `json:"channel,omitempty"`
Actor string `json:"actor,omitempty"`
Summary string `json:"summary,omitempty"`
RelatedID string `json:"related_id,omitempty"`
SignalIDs []string `json:"signal_ids,omitempty"`
DecisionID string `json:"decision_id,omitempty"`
CreatedAt string `json:"created_at,omitempty"`
}
type SessionMemoryMessageSummary ¶
type SessionMemoryMessageSummary struct {
ID string `json:"id"`
From string `json:"from,omitempty"`
Title string `json:"title,omitempty"`
Content string `json:"content,omitempty"`
ReplyTo string `json:"reply_to,omitempty"`
Timestamp string `json:"timestamp,omitempty"`
Summary string `json:"summary,omitempty"`
}
type SessionMemoryRequestSummary ¶
type SessionMemoryRequestSummary struct {
ID string `json:"id"`
Kind string `json:"kind,omitempty"`
Status string `json:"status,omitempty"`
From string `json:"from,omitempty"`
Title string `json:"title,omitempty"`
Question string `json:"question,omitempty"`
Channel string `json:"channel,omitempty"`
ReplyTo string `json:"reply_to,omitempty"`
RecommendedID string `json:"recommended_id,omitempty"`
Blocking bool `json:"blocking,omitempty"`
Required bool `json:"required,omitempty"`
Secret bool `json:"secret,omitempty"`
Summary string `json:"summary,omitempty"`
}
type SessionMemorySnapshot ¶
type SessionMemorySnapshot struct {
Version int `json:"version"`
SessionMode string `json:"session_mode,omitempty"`
DirectAgent string `json:"direct_agent,omitempty"`
GeneratedAt string `json:"generated_at,omitempty"`
Focus string `json:"focus,omitempty"`
NextSteps []string `json:"next_steps,omitempty"`
Highlights []string `json:"highlights,omitempty"`
Tasks []SessionMemoryTaskSummary `json:"tasks,omitempty"`
Requests []SessionMemoryRequestSummary `json:"requests,omitempty"`
Actions []SessionMemoryActionSummary `json:"actions,omitempty"`
Messages []SessionMemoryMessageSummary `json:"messages,omitempty"`
}
func BuildSessionMemorySnapshot ¶
func BuildSessionMemorySnapshot(sessionMode, directAgent string, tasks []RuntimeTask, requests []RuntimeRequest, recent []RuntimeMessage) SessionMemorySnapshot
func BuildSessionMemorySnapshotFromOfficeState ¶
func BuildSessionMemorySnapshotFromOfficeState(sessionMode, directAgent string, tasks []teamTask, requests []humanInterview, actions []officeActionLog, messages []channelMessage) SessionMemorySnapshot
func (SessionMemorySnapshot) RestorationContext ¶
func (s SessionMemorySnapshot) RestorationContext() SessionRestoreContext
func (SessionMemorySnapshot) ToRecovery ¶
func (s SessionMemorySnapshot) ToRecovery() SessionRecovery
type SessionMemoryTaskSummary ¶
type SessionMemoryTaskSummary struct {
ID string `json:"id"`
Title string `json:"title,omitempty"`
Owner string `json:"owner,omitempty"`
Status string `json:"status,omitempty"`
PipelineStage string `json:"pipeline_stage,omitempty"`
ReviewState string `json:"review_state,omitempty"`
ExecutionMode string `json:"execution_mode,omitempty"`
WorktreePath string `json:"worktree_path,omitempty"`
WorktreeBranch string `json:"worktree_branch,omitempty"`
ThreadID string `json:"thread_id,omitempty"`
Blocked bool `json:"blocked,omitempty"`
DependsOn []string `json:"depends_on,omitempty"`
Summary string `json:"summary,omitempty"`
}
type SessionRecovery ¶
func BuildSessionRecovery ¶
func BuildSessionRecovery(sessionMode, directAgent string, tasks []RuntimeTask, requests []RuntimeRequest, recent []RuntimeMessage) SessionRecovery
type SessionReport ¶ added in v0.193.0
type SessionReport struct {
Highlights string `json:"highlights"`
TopWins []Win `json:"topWins"`
DeadEnds []DeadEnd `json:"deadEnds"`
Metadata map[string]string `json:"metadata"`
}
SessionReport is the owner-agent-authored summary that appears in the Decision Packet center column. Highlights renders as the lead summary paragraph; TopWins as the labelled-delta list; DeadEnds as the tried-and-discarded section.
Per design doc, FullLog []ExperimentRow is intentionally omitted from v1 — research-class tasks (Karpathy autoresearch shape) are not in scope for v1 demand evidence. Defer to v1.1 when a real research session needs an experiment-row table.
type SessionRestoreContext ¶
type SessionRestoreContext struct {
Focus string `json:"focus,omitempty"`
NextSteps []string `json:"next_steps,omitempty"`
ActiveTaskIDs []string `json:"active_task_ids,omitempty"`
PendingRequestIDs []string `json:"pending_request_ids,omitempty"`
WorkingDirectories []string `json:"working_directories,omitempty"`
ThreadIDs []string `json:"thread_ids,omitempty"`
}
type Severity ¶ added in v0.193.0
type Severity string
Severity is the typed string constant used by reviewer agent grades. Mirrors the CodeRabbit five-tier convention so v1 reviewer routing can fan grades to the same UI without per-tier translation logic.
const ( // SeverityCritical is a blocking finding the reviewer believes // prevents merge without a fix. UI surfaces these in red at the top // of the grades section. SeverityCritical Severity = "critical" // SeverityMajor is a substantial finding that should be addressed // before merge but does not block by itself. UI orange. SeverityMajor Severity = "major" // SeverityMinor is a cleanup / style suggestion the reviewer would // accept either way. UI yellow (AA-compliant pair, see design doc). SeverityMinor Severity = "minor" // SeverityNitpick is the lowest-confidence suggestion tier. UI blue. SeverityNitpick Severity = "nitpick" // SeveritySkipped is the reviewer-timeout / reviewer-process-exit // placeholder so the convergence rule can fire with a complete // grade list. The UI greys these out and surfaces a banner. SeveritySkipped Severity = "skipped" )
func (Severity) IsCanonical ¶ added in v0.193.0
IsCanonical reports whether s is one of the five typed Severity values. Returns false for the zero value and for any free-form string cast.
type SeveritySummary ¶ added in v0.193.0
type SeveritySummary struct {
Critical int `json:"critical"`
Major int `json:"major"`
Minor int `json:"minor"`
Nitpick int `json:"nitpick"`
}
SeveritySummary mirrors Lane G's TS shape exactly (camelCase JSON keys, ints for each tier). Lane C populates the underlying ReviewerGrade list per task; Lane E aggregates it deterministically.
type ShareInviteDetails ¶ added in v0.134.0
type ShareInviteDetails struct {
}
ShareInviteDetails carries the join URL and broker-issued metadata for a freshly created invite. Callers that need more than the URL (e.g. the in-process share controller surfacing the expiry timestamp) should prefer CreateInviteDetailed over CreateInvite. ExpiresAt is the same RFC3339 string the broker stores so callers cannot drift in formatting.
type ShareTransport ¶ added in v0.127.0
type ShareTransport struct {
// contains filtered or unexported fields
}
ShareTransport adapts the broker's human-share surface to the transport.OfficeBoundTransport contract. It holds no state of its own beyond the broker reference, the URL builders, and the host pointer set by Run.
Two URL builders exist: the immutable constructor builder (urlBuilder) and an optional override (urlBuilderOverride) that the in-process share controller installs once it knows its bind address. CreateInvite reads the override first; absent an override it falls back to the constructor builder. This split keeps the constructor builder (typically RelativeJoinURL) safe as a default while letting the controller upgrade to absolute URLs without a re-construction dance.
func NewShareTransport ¶ added in v0.127.0
func NewShareTransport(broker *Broker, urlBuilder JoinURLBuilder) *ShareTransport
NewShareTransport constructs a ShareTransport bound to the given broker. urlBuilder must be non-nil; pass RelativeJoinURL when no absolute base is known so callers see an explicit relative-path choice rather than a silent nil-builder fallback.
func (*ShareTransport) Binding ¶ added in v0.127.0
func (s *ShareTransport) Binding() transport.Binding
Binding declares the office scope. The adapter admits humans into the office itself rather than a specific channel or member, so MemberSlug and ChannelSlug are intentionally empty.
func (*ShareTransport) CreateInvite ¶ added in v0.127.0
CreateInvite creates a new human-share invite via Broker.createHumanInvite and returns the join URL produced by the active JoinURLBuilder. The override builder (set via SetURLBuilder) wins over the constructor builder so the in-process share controller can upgrade from RelativeJoinURL to an absolute URL once its bind address is known. The network argument is part of the OfficeBoundTransport contract but ShareTransport ignores it: URL construction is controlled by the builder, which the share controller selects based on its own bind logic.
func (*ShareTransport) CreateInviteDetailed ¶ added in v0.134.0
func (s *ShareTransport) CreateInviteDetailed(_ context.Context) (ShareInviteDetails, error)
CreateInviteDetailed creates an invite and returns the join URL plus broker-issued metadata (invite ID and RFC3339 expiry). Identical to CreateInvite for URL construction; see CreateInvite for the override precedence rule.
CONCURRENCY NOTE: when more than one in-process controller can mint invites against the same ShareTransport instance (e.g. the network-share controller and the public-tunnel controller running side-by-side), callers MUST use CreateInviteDetailedWithBuilder instead. This method reads the override builder via SetURLBuilder, which is a separate atomic operation from invite creation and can be raced — a tunnel that calls SetURLBuilder(tunnelJoinURL) immediately followed by CreateInviteDetailed can have its builder overwritten by a parallel share path's SetURLBuilder(shareJoinURL), producing an invite URL with the wrong origin.
func (*ShareTransport) CreateInviteDetailedWithBuilder ¶ added in v0.157.0
func (s *ShareTransport) CreateInviteDetailedWithBuilder(_ context.Context, builder JoinURLBuilder) (ShareInviteDetails, error)
CreateInviteDetailedWithBuilder is the race-free variant: the URL builder is bound atomically to this single invite-creation, never touching the shared urlBuilderOverride field. Concurrent callers each see their own builder applied to their own token. Use this from any code path that may run alongside another controller that also mints invites against the same ShareTransport instance.
A nil builder is rejected so a misuse fails loudly rather than silently substituting an empty string for the URL.
func (*ShareTransport) Health ¶ added in v0.127.0
func (s *ShareTransport) Health() transport.Health
Health reports Connected once Run has started; before Run is called Health returns Disconnected. The share surface is in-process and has no upstream dependency to fail, so once Run is live the state is steady.
func (*ShareTransport) Name ¶ added in v0.127.0
func (s *ShareTransport) Name() string
Name returns the stable adapter identifier.
func (*ShareTransport) RevokeInvite ¶ added in v0.127.0
func (s *ShareTransport) RevokeInvite(ctx context.Context, inviteID string) error
RevokeInvite revokes the invite and every session it admitted. Per the OfficeBoundTransport contract the adapter is responsible for calling host.RevokeParticipant for each affected admitted human before returning; host.RevokeParticipant in turn closes the session in the broker. The two-step dance (broker.RevokeHumanInvite + host.RevokeParticipant) keeps the broker's revocation idempotent: if Host.RevokeParticipant errors mid-fan-out the invite is already marked revoked, so a retry will only fan out the remaining sessions.
Errors from individual host.RevokeParticipant calls are accumulated via errors.Join so a partial fan-out does not silently hide later failures behind the first one. The loop runs to completion on every call; failing fast on the first error would leave later sessions live.
func (*ShareTransport) Run ¶ added in v0.127.0
Run stores the host atomically, installs the broker's human-admit hook so the in-process accept handler fans out to Host.UpsertParticipant, and then blocks until ctx is cancelled. The human-share surface is in-process — the existing handlers in broker_human_share.go drive accept directly — so Run does not subscribe to anything external. A nil host is rejected so a misconfigured launcher fails loudly rather than silently degrading.
The admit hook is cleared on Run exit so a stale closure (capturing the now- stopped host) cannot keep firing if a second adapter installs itself later.
func (*ShareTransport) Send ¶ added in v0.127.0
Send is a no-op for the human-share adapter — and that is the correct architecture, not a TODO. Admitted humans already receive office-wide messages in real-time through the broker's existing SSE fan-out: the broker's handleEvents (broker_sse.go) accepts the human session cookie, publishMessage (broker_publish.go) fans every channelMessage to all subscribers without filtering humans out, the share HTTP server proxies /api/* including text/event-stream through to the broker (cmd/wuphf/share.go), and the React app subscribes via useBrokerEvents (web/src/hooks/useBrokerEvents.ts). Doing real delivery here would duplicate that path.
Returning nil keeps the OfficeBoundTransport contract honest: the adapter accepts the outbound message and trusts the broker SSE channel to deliver. If a future deployment ever needs out-of-band push (e.g. native mobile admitted humans without the React EventSource), that work belongs in the broker's SSE layer or a sibling adapter — not here.
func (*ShareTransport) SetURLBuilder ¶ added in v0.134.0
func (s *ShareTransport) SetURLBuilder(b JoinURLBuilder)
SetURLBuilder installs an override URL builder. Passing nil clears the override so CreateInvite falls back to the constructor builder. Atomic so the broker hot path that calls CreateInvite does not contend with the controller installing the override on start.
type SharedMemoryWrite ¶
type SharedMemoryWrite struct {
}
type SignalIndex ¶
type SignalIndex interface {
// EntityBySlug looks up a single entity by its canonical slug within the
// given kind. Returns (zero, false, nil) when not found.
EntityBySlug(ctx context.Context, slug string) (resolverEntity, bool, error)
// EntityByEmail returns the entity with this normalised email, if any.
EntityByEmail(ctx context.Context, email string) (resolverEntity, bool, error)
// EntityByDomain returns all entities associated with this web domain.
EntityByDomain(ctx context.Context, domain string) ([]resolverEntity, error)
// EntityByName returns all entities whose names are partial or full matches
// for the query string. The resolver applies its own fuzzy filter on top.
EntityByName(ctx context.Context, name string) ([]resolverEntity, error)
}
SignalIndex is the narrow read interface the resolver depends on. Implement on the in-memory store and the SQLite store; tests use spySignalIndex below.
type Signals ¶
type Signals struct {
Email string `json:"email,omitempty"`
Domain string `json:"domain,omitempty"`
PersonName string `json:"person_name,omitempty"`
JobTitle string `json:"job_title,omitempty"`
}
Signals are the matching signals the resolver uses to dedupe entities.
type SkillCandidate ¶ added in v0.86.0
type SkillCandidate struct {
// Source identifies the signal type for downstream debugging + telemetry.
Source SkillCandidateSource
// SuggestedName is a kebab-case slug hint. The synthesizer may override.
SuggestedName string
// SuggestedDescription is a one-line trigger phrase hint. The synthesizer
// may override.
SuggestedDescription string
// Excerpts are verbatim text snippets from the source (notebook entries,
// incident task details). The synthesizer cites these as motivation.
Excerpts []SkillCandidateExcerpt
// RelatedWikiPaths are the team/ wiki articles related to this candidate
// (resolved via the existing wiki retrieval API). The synthesizer reads
// these for grounded synthesis.
RelatedWikiPaths []string
// SignalCount is how many independent signals contributed to this
// candidate (e.g. 3 agents wrote about the same topic).
SignalCount int
// FirstSeenAt and LastSeenAt frame the time window of the signal cluster.
FirstSeenAt time.Time
LastSeenAt time.Time
}
SkillCandidate is the shared signal envelope produced by Stage B signal sources and consumed by the SkillSynthesizer (PR 2-B).
type SkillCandidateExcerpt ¶ added in v0.86.0
type SkillCandidateExcerpt struct {
// Path is the source location: a wiki-relative file path for notebook
// excerpts, a task ID for self-heal excerpts.
Path string
// Snippet is the verbatim text excerpted from the source.
Snippet string
// Author is the agent slug for notebook excerpts, the task owner for
// self-heal excerpts, or "human" for human-authored snippets.
Author string
// CreatedAt is the source's timestamp (notebook mtime or task UpdatedAt).
CreatedAt time.Time
}
SkillCandidateExcerpt is a single verbatim citation that motivated the candidate. The synthesizer may inline it as evidence in the proposal body.
type SkillCandidateSource ¶ added in v0.86.0
type SkillCandidateSource string
SkillCandidateSource tags the upstream emitter of a candidate so consumers can branch on origin.
const ( // SourceNotebookCluster indicates the candidate came from cross-agent // notebook clustering (notebook_signal_scanner.go). SourceNotebookCluster SkillCandidateSource = "notebook_cluster" // SourceSelfHealResolved indicates the candidate came from a resolved // self-heal incident task (self_heal_signal.go). SourceSelfHealResolved SkillCandidateSource = "self_heal_resolved" )
type SkillCompileMetrics ¶ added in v0.86.0
type SkillCompileMetrics struct {
ManualClicksTotal int64
CronTicksTotal int64
ProposalsCreatedTotal int64
ProposalsApprovedTotal int64
ProposalsRejectedByGuardTotal int64
LastTickDurationMs int64
// LastSkillCompilePassAtNano stores unix nanoseconds of the last successful
// compile pass (0 = never). Updated and read via atomic.StoreInt64 /
// atomic.LoadInt64 so reads are safe without broker.mu.
LastSkillCompilePassAtNano int64
// StageBProposalsTotal counts proposals written by the Stage B
// synthesizer (LLM-synth from candidate signals). Incremented atomically
// once the unified write helper accepts the proposal.
StageBProposalsTotal int64
// CounterNudgesFiredTotal counts skill_review_nudge tasks fired by the
// Hermes-style per-agent counter (Stage B'). Incremented atomically by
// the tool-event hot path each time a nudge task is appended.
CounterNudgesFiredTotal int64
// SelfHealCandidatesScanned counts candidates with Source ==
// SourceSelfHealResolved that the synthesizer attempted to LLM-synthesize.
SelfHealCandidatesScanned int64
// SelfHealSkillsSynthesized counts self-heal candidates that the LLM
// accepted AND that successfully wrote through the unified funnel.
SelfHealSkillsSynthesized int64
// SelfHealLLMRejections counts self-heal candidates rejected by the LLM
// or by the post-LLM sanity checks (parse failures, name regex, body
// heading missing, length checks, etc.).
SelfHealLLMRejections int64
// EmbeddingCallsTotal is incremented every time the notebook scanner
// computes a fresh embedding (cache miss path). Cache hits do NOT
// bump this counter — see EmbeddingCacheHitsTotal.
EmbeddingCallsTotal int64
// EmbeddingCacheHitsTotal counts on-disk cache hits across all
// embedding paths. A high hit ratio is the goal — we never want to
// re-embed the same entry once it has stabilised in the cache.
EmbeddingCacheHitsTotal int64
// EmbeddingCacheMissesTotal counts cache misses (live API calls).
// EmbeddingCallsTotal == EmbeddingCacheMissesTotal in steady state;
// the two diverge when a single batched API call fans out to N
// per-text Set events.
EmbeddingCacheMissesTotal int64
// EmbeddingCostUsdBits stores a float64 USD cost using
// math.Float64bits. Updated via addFloatBits / loadFloatBits in
// notebook_signal_scanner_embeddings.go so reads + writes are
// lock-free.
EmbeddingCostUsdBits uint64
// SemanticDedupHitsTotal counts proposals that matched an existing
// skill via the semantic dedup gate (Jaro-Winkler or embedding cosine).
SemanticDedupHitsTotal int64
// SkillEnhancementsTotal counts proposals that enhanced an existing
// skill instead of being discarded or created as new.
SkillEnhancementsTotal int64
}
SkillCompileMetrics captures cumulative + last-run telemetry for the Stage A compile loop. All fields are updated and read atomically so callers need not hold broker.mu.
type SkillCounter ¶ added in v0.87.0
type SkillCounter struct {
// contains filtered or unexported fields
}
SkillCounter is the broker-side state for the Hermes counter pattern. It owns its own mutex so callers can invoke Increment / Reset / Stats from any goroutine — including the MCP tool-event hot path — without holding b.mu. The threshold + cooldown are immutable post-construction; callers who need to test edge cases use NewSkillCounterWith for direct overrides.
func NewSkillCounter ¶ added in v0.87.0
func NewSkillCounter() *SkillCounter
NewSkillCounter constructs a counter using the env-configured threshold and cooldown. Use NewSkillCounterWith from tests that want explicit thresholds.
func NewSkillCounterWith ¶ added in v0.87.0
func NewSkillCounterWith(threshold int, cooldown time.Duration) *SkillCounter
NewSkillCounterWith constructs a counter with explicit threshold + cooldown. Threshold <= 0 is normalized to 1 to avoid an off-by-one in callers that pass 0. Cooldown < 0 is normalized to 0 (no cooldown).
func (*SkillCounter) Cooldown ¶ added in v0.87.0
func (c *SkillCounter) Cooldown() time.Duration
Cooldown returns the configured nudge cooldown.
func (*SkillCounter) Increment ¶ added in v0.87.0
func (c *SkillCounter) Increment(agentSlug, toolName, summary string) (shouldNudge bool, iterations int)
Increment registers one tool call for agentSlug. Returns shouldNudge=true when the counter has reached the threshold AND no nudge has fired within the cooldown window. When shouldNudge is true, Increment also resets the counter to 0 and stamps lastNudgedAt — treat one nudge fire as the "reset event" so the next nudge requires another N tool calls.
toolName + summary are recorded in the per-agent ring buffer so the nudge task body can list recent activity. Empty toolName is allowed (caller already validated) but is recorded as "(unknown)" for clarity.
func (*SkillCounter) RecentToolCalls ¶ added in v0.87.0
func (c *SkillCounter) RecentToolCalls(agentSlug string, limit int) []recentToolCall
RecentToolCalls returns a snapshot of up to limit most-recent calls for agentSlug, oldest-first. Returns nil if the agent has no tracked calls. Used by fireSkillReviewNudgeLocked to build the task body.
func (*SkillCounter) Reset ¶ added in v0.87.0
func (c *SkillCounter) Reset(agentSlug string)
Reset clears the per-agent counter without firing a nudge. Called when the agent invokes team_skill_create or team_skill_patch — they just codified something, so the tally restarts from zero. Reset never blocks a future nudge: it only zeroes iterations + records the reset time.
func (*SkillCounter) SetClock ¶ added in v0.87.0
func (c *SkillCounter) SetClock(now func() time.Time)
SetClock replaces the counter's time source. Tests use this to drive cooldown logic deterministically.
func (*SkillCounter) Stats ¶ added in v0.87.0
func (c *SkillCounter) Stats() map[string]SkillCounterMetrics
Stats returns a copy of the per-agent metrics suitable for JSON serialization. Safe to call concurrently with Increment / Reset.
func (*SkillCounter) Threshold ¶ added in v0.87.0
func (c *SkillCounter) Threshold() int
Threshold returns the configured nudge threshold. Useful for tests and telemetry.
func (*SkillCounter) TotalNudgesFired ¶ added in v0.87.0
func (c *SkillCounter) TotalNudgesFired() int64
TotalNudgesFired sums NudgesFiredTotal across all tracked agents. Used by the broker for the aggregate counter_nudges_fired_total telemetry value.
type SkillCounterMetrics ¶ added in v0.87.0
type SkillCounterMetrics struct {
Iterations int `json:"iterations"`
LastResetAt time.Time `json:"last_reset_at,omitempty"`
LastNudgedAt time.Time `json:"last_nudged_at,omitempty"`
NudgesFiredTotal int64 `json:"nudges_fired_total"`
}
SkillCounterMetrics exposes per-agent telemetry suitable for serialization in the /skills/compile/stats response. Returned via SkillCounter.Stats.
type SkillFrontmatter ¶ added in v0.86.0
type SkillFrontmatter struct {
// Name is the skill slug (e.g. "daily-digest"). Mandatory.
Name string `yaml:"name"`
// Description is a one-line summary. Mandatory. The LLM router reads this.
Description string `yaml:"description"`
// Version is a semver string (e.g. "1.0.0"). Populated on every regeneration.
Version string `yaml:"version,omitempty"`
// License defaults to MIT unless the workspace overrides it.
License string `yaml:"license,omitempty"`
// Metadata contains WUPHF-specific provenance under the wuphf namespace.
Metadata SkillMetadata `yaml:"metadata,omitempty"`
}
SkillFrontmatter represents the top-level Anthropic Agent Skills frontmatter. Name and Description are mandatory; all other fields are optional.
func ParseSkillMarkdown ¶ added in v0.86.0
func ParseSkillMarkdown(content []byte) (SkillFrontmatter, string, error)
ParseSkillMarkdown splits YAML frontmatter from body, parses the YAML, and returns (frontmatter, body, error). Tolerate missing optional fields. Returns an error when name or description is absent.
type SkillMetadata ¶ added in v0.86.0
type SkillMetadata struct {
Wuphf SkillWuphfMeta `yaml:"wuphf,omitempty"`
}
SkillMetadata is the top-level metadata namespace. Only the wuphf sub-key is defined here; other tools may add their own namespaces alongside it.
type SkillSafetyScan ¶ added in v0.86.0
type SkillSafetyScan struct {
// Verdict is safe | caution | dangerous.
Verdict string `yaml:"verdict"`
// Findings is the list of specific issues found during the scan.
Findings []string `yaml:"findings,omitempty"`
// TrustLevel is the trust tier applied during this scan.
TrustLevel string `yaml:"trust_level,omitempty"`
// Summary is a human-readable explanation of the verdict.
Summary string `yaml:"summary,omitempty"`
}
SkillSafetyScan holds the result of a skill_guard scan. Verdict is one of safe | caution | dangerous.
type SkillScanner ¶ added in v0.86.0
type SkillScanner struct {
// contains filtered or unexported fields
}
SkillScanner walks the wiki under team/, asks the LLM to classify each article, and writes proposals through the broker's funnel.
func NewSkillScanner ¶ added in v0.86.0
func NewSkillScanner(b *Broker, provider llmProvider, budget int) *SkillScanner
NewSkillScanner constructs a scanner. budget is the maximum number of LLM calls per pass — guards against runaway spend. Callers may pass 0 to use a reasonable default (see defaultSkillCompileBudget).
func (*SkillScanner) Scan ¶ added in v0.86.0
func (s *SkillScanner) Scan(ctx context.Context, scopePath string, dryRun bool, trigger string) (ScanResult, error)
Scan walks the wiki under team/ (or scopePath if non-empty), asks the LLM for each candidate, and writes proposals through writeSkillProposalLocked. scopePath is wiki-relative (e.g. "team/customers"). Empty scans the full team subtree. dryRun=true performs the LLM classification but skips the actual proposal write.
type SkillSpec ¶ added in v0.86.0
type SkillSpec struct {
Frontmatter SkillFrontmatter
Body string
SourceArticle string
}
SkillSpec is the canonical in-memory representation the scanner produces before handing off to writeSkillProposalLocked. It bundles the parsed frontmatter, the body, and the source article path for provenance.
type SkillSynthesizer ¶ added in v0.86.0
type SkillSynthesizer struct {
// contains filtered or unexported fields
}
SkillSynthesizer aggregates Stage B signals, asks the LLM to synthesize a skill body for each candidate, and writes proposals through the broker's unified funnel.
func NewSkillSynthesizer ¶ added in v0.86.0
func NewSkillSynthesizer(b *Broker, agg stageBCandidateSource) *SkillSynthesizer
NewSkillSynthesizer constructs a synthesizer bound to broker b. The aggregator is required (the synthesizer has nothing to do without candidates); the provider is set separately by the caller so tests can inject fakes.
func (*SkillSynthesizer) SynthesizeOnce ¶ added in v0.86.0
func (s *SkillSynthesizer) SynthesizeOnce(ctx context.Context, trigger string) (StageBSynthResult, error)
SynthesizeOnce runs one synth pass: aggregate candidates → LLM synthesize → dedup → safety guard → write proposal. trigger is one of "manual", "cron", or "event" for telemetry.
Concurrency:
- Acquire b.mu, check / set b.skillSynthInflight.
- If a pass is already in flight, set b.skillSynthCoalesced and return ErrSynthCoalesced.
- Release b.mu, run the pass.
- Re-acquire b.mu, clear the inflight flag, and recurse once if a coalesced request arrived during the pass.
type SkillTombstoneEntry ¶ added in v0.86.0
type SkillTombstoneEntry struct {
// Slug is the skill's normalised name slug.
Slug string `yaml:"slug"`
// SourceArticle is the wiki path that triggered the proposal, if any.
SourceArticle string `yaml:"source_article,omitempty"`
// RejectedAt is an RFC3339 timestamp of when the rejection occurred.
RejectedAt string `yaml:"rejected_at"`
// Reason is a human-readable explanation (e.g. "rejected by guard: dangerous").
Reason string `yaml:"reason,omitempty"`
}
SkillTombstoneEntry records a single rejected skill proposal.
type SkillWuphfMeta ¶ added in v0.86.0
type SkillWuphfMeta struct {
// Title is the display title shown in the UI (frontend-only).
Title string `yaml:"title,omitempty"`
// Trigger is a natural-language trigger phrase kept for legacy broker fields.
// The top-level Description field is authoritative for LLM routing.
Trigger string `yaml:"trigger,omitempty"`
// SourceArticles lists the wiki paths that drove this skill's content.
SourceArticles []string `yaml:"source_articles,omitempty"`
// SourceSignals lists notebook citations (Stage B+ only).
SourceSignals []string `yaml:"source_signals,omitempty"`
// CreatedBy is the identity that wrote this proposal ("archivist", agent slug, etc.).
CreatedBy string `yaml:"created_by,omitempty"`
// Status is one of proposed | active | disabled | archived.
Status string `yaml:"status,omitempty"`
// DisabledFromStatus records the status a disabled skill came from so
// disabled proposals cannot be re-enabled as active without approval.
DisabledFromStatus string `yaml:"disabled_from_status,omitempty"`
// LastSynthesizedSHA is the repo HEAD SHA at the time of last synthesis.
LastSynthesizedSHA string `yaml:"last_synthesized_sha,omitempty"`
// LastSynthesizedTs is the RFC3339 timestamp of the last synthesis run.
LastSynthesizedTs string `yaml:"last_synthesized_ts,omitempty"`
// FactCountAtSynthesis records the fact count when the skill was synthesized.
FactCountAtSynthesis int `yaml:"fact_count_at_synthesis,omitempty"`
// SafetyScan holds the result of the skill_guard scan.
SafetyScan *SkillSafetyScan `yaml:"safety_scan,omitempty"`
// Tags are for hub indexing.
Tags []string `yaml:"tags,omitempty"`
// RelatedSkills lists other skill slugs this skill overlaps with.
RelatedSkills []string `yaml:"related_skills,omitempty"`
// WorkflowProvider is the provider for workflow-backed skills.
WorkflowProvider string `yaml:"workflow_provider,omitempty"`
// WorkflowKey identifies the workflow within the provider.
WorkflowKey string `yaml:"workflow_key,omitempty"`
// WorkflowDefinition is the inline workflow definition.
WorkflowDefinition string `yaml:"workflow_definition,omitempty"`
// WorkflowSchedule is the cron schedule for scheduled workflow skills.
WorkflowSchedule string `yaml:"workflow_schedule,omitempty"`
// RelayID is the relay event subscription ID.
RelayID string `yaml:"relay_id,omitempty"`
// RelayPlatform is the relay event source platform.
RelayPlatform string `yaml:"relay_platform,omitempty"`
// RelayEventTypes lists the relay event types this skill subscribes to.
RelayEventTypes []string `yaml:"relay_event_types,omitempty"`
}
SkillWuphfMeta carries all WUPHF-specific provenance for a compiled skill.
type Snapshot ¶
type Snapshot struct {
// Pending are extraction entries that have not been tombstoned. They
// may or may not be past their NextRetryNotBefore — callers that
// need "ready now" should filter by NextRetryNotBefore ≤ now.
Pending []DLQEntry `json:"pending"`
// PermanentFailures are entries that crossed their max_retries and
// were promoted to permanent-failures.jsonl. Append-only; callers
// should treat the order as oldest-first (file order).
PermanentFailures []DLQEntry `json:"permanent_failures"`
// CorruptLines is the running count of malformed JSONL rows skipped
// in extractions.jsonl. Non-zero means the queue file has corruption.
CorruptLines uint64 `json:"corrupt_lines"`
// CorruptLinesPerm is the same counter for permanent-failures.jsonl.
CorruptLinesPerm uint64 `json:"corrupt_lines_permanent"`
}
Snapshot is the read-only view of the DLQ returned by Inspect. It is safe to serialise to JSON for operator surfaces.
type Spec ¶ added in v0.193.0
type Spec struct {
Problem string `json:"problem,omitempty"`
TargetOutcome string `json:"targetOutcome,omitempty"`
AcceptanceCriteria []ACItem `json:"acceptanceCriteria,omitempty"`
Assignment string `json:"assignment,omitempty"`
Constraints []string `json:"constraints,omitempty"`
AutoAssign string `json:"autoAssign,omitempty"`
Feedback []FeedbackItem `json:"feedback,omitempty"`
}
Spec is the intake agent's structured output. All fields are optional on the wire (omitempty) so a partial response can still parse for inspection, but the validator enforces the design doc's gate: Problem != "", len(AcceptanceCriteria) >= 1, Assignment != "".
AutoAssign is the optional pre-declared owner agent slug. When non-empty the CLI runs the 3-second auto-assign countdown described in the design doc; Lane B exposes the cancellable countdown API and leaves the terminal UX to Lane F.
Feedback is appended on changes_requested re-entry (Lane D wires that path); v1 intake never populates it on first parse.
type StageBSignalAggregator ¶ added in v0.86.0
type StageBSignalAggregator struct {
// contains filtered or unexported fields
}
StageBSignalAggregator runs the notebook + self-heal signal scanners sequentially and returns their union, capped at maxTotal candidates.
func NewStageBSignalAggregator ¶ added in v0.86.0
func NewStageBSignalAggregator(b *Broker) *StageBSignalAggregator
NewStageBSignalAggregator wires the default scanners against the supplied broker. Tests may construct alternate scanners and assemble an aggregator directly via the exported fields.
func (*StageBSignalAggregator) Scan ¶ added in v0.86.0
func (a *StageBSignalAggregator) Scan(ctx context.Context, maxTotal int) ([]SkillCandidate, error)
Scan runs the notebook scanner first (clusters need a longer history to stabilise) then the self-heal scanner. Both errors are surfaced as a joined error so partial results are still usable. If maxTotal <= 0 we fall back to defaultStageBMaxTotal.
type StageBSynthResult ¶ added in v0.86.0
type StageBSynthResult struct {
CandidatesScanned int `json:"candidates_scanned"`
Synthesized int `json:"synthesized"`
Deduped int `json:"deduped"`
RejectedByGuard int `json:"rejected_by_guard"`
Errors []SynthError `json:"errors,omitempty"`
DurationMs int64 `json:"duration_ms"`
Trigger string `json:"trigger"`
}
StageBSynthResult is the JSON-serializable summary of a single synth pass. Counts mirror ScanResult so callers can fold them into Stage A telemetry.
type StateTransition ¶
type StateTransition struct {
PromotionID string `json:"promotion_id"`
OldState PromotionState `json:"old_state"`
NewState PromotionState `json:"new_state"`
Actor string `json:"actor"`
Rationale string `json:"rationale,omitempty"`
Timestamp time.Time `json:"timestamp"`
}
StateTransition records a single state change — actor, reason, timestamp.
type SubmitPromotionRequest ¶
type SubmitPromotionRequest struct {
SourceSlug string
SourcePath string
TargetPath string
Rationale string
// Override, when non-empty, replaces the resolver-derived reviewer. Used
// when an agent names a specific reviewer via the MCP tool.
ReviewerOverride string
}
SubmitPromotionRequest is the argument shape callers pass when creating a new promotion. ReviewerResolver is injected so tests can stub without spinning up a blueprint.
type SweepResult ¶ added in v0.100.0
type SweepResult struct {
Archived int `json:"archived"`
Skipped int `json:"skipped"`
Errors int `json:"errors"`
ArchivedPaths []string `json:"archived_paths,omitempty"`
}
SweepResult summarises a single WikiArchiver.Sweep run.
type SynthError ¶ added in v0.86.0
type SynthError struct {
CandidateName string `json:"candidate_name"`
Reason string `json:"reason"`
}
SynthError records a single per-candidate failure during a synth pass.
type SynthesisJob ¶
type SynthesisJob struct {
Kind EntityKind
Slug string
RequestBy string
EnqueuedAt time.Time
// ID is a monotonic counter so callers can correlate responses.
ID uint64
}
SynthesisJob is one pending synthesis request for a specific entity.
type SynthesisMode ¶ added in v0.99.0
type SynthesisMode int
SynthesisMode controls when the synthesizer fires LLM calls.
const ( // SynthesisModeAuto (default) fires synthesis automatically when fact // count crosses the threshold at ingest time. SynthesisModeAuto SynthesisMode = iota // SynthesisModeDemand suppresses auto-synthesis at ingest. Synthesis fires // the first time BuildArticle is called on a ghost brief with enough facts. // Opt-in via WUPHF_ENTITY_SYNTHESIS_MODE=demand. SynthesisModeDemand )
type SynthesizerConfig ¶
type SynthesizerConfig struct {
Provider string
Threshold int
Timeout time.Duration
Mode SynthesisMode
// LLMCall is the pluggable shell-out used by tests. Production code
// leaves this nil and the worker falls back to provider.RunConfiguredOneShot.
LLMCall func(ctx context.Context, systemPrompt, userPrompt string) (string, error)
// Graph, when non-nil, gives the synthesizer read access to the cross-
// entity graph so a trailing "## Related" section can be appended
// deterministically after the LLM returns. Passing nil disables the
// section — existing briefs stay unchanged.
Graph *EntityGraph
}
SynthesizerConfig is the tunable knobs for the worker. All fields are optional; defaults match constants above.
type TaskAckRequest ¶ added in v0.105.2
type TaskListRequest ¶ added in v0.105.2
type TaskListRequest struct {
StatusFilter string `json:"status,omitempty"`
MySlug string `json:"my_slug,omitempty"`
ViewerSlug string `json:"viewer_slug,omitempty"`
Channel string `json:"channel,omitempty"`
AllChannels bool `json:"all_channels,omitempty"`
IncludeDone bool `json:"include_done,omitempty"`
}
type TaskListResponse ¶ added in v0.105.2
type TaskListResponse struct {
Channel string `json:"channel,omitempty"`
Tasks []teamTask `json:"tasks"`
}
type TaskMemoryWorkflowReconcileResponse ¶ added in v0.105.2
type TaskMemoryWorkflowReconcileResponse struct {
Report MemoryWorkflowReconcileReport `json:"report"`
}
type TaskMemoryWorkflowRequest ¶ added in v0.105.2
type TaskMemoryWorkflowRequest struct {
Action string `json:"action"`
Event string `json:"event"`
TaskID string `json:"task_id"`
Actor string `json:"actor"`
Query string `json:"query"`
Citations []ContextCitation `json:"citations"`
Artifact MemoryWorkflowArtifact `json:"artifact"`
Artifacts []MemoryWorkflowArtifact `json:"artifacts"`
SkipReason string `json:"skip_reason"`
}
type TaskMemoryWorkflowResponse ¶ added in v0.105.2
type TaskMemoryWorkflowResponse struct {
Task teamTask `json:"task"`
Updated bool `json:"updated"`
}
type TaskMutationError ¶ added in v0.108.5
type TaskMutationError struct {
Kind TaskMutationErrorKind
Message string
Cause error
}
func (*TaskMutationError) Error ¶ added in v0.108.5
func (e *TaskMutationError) Error() string
func (*TaskMutationError) Unwrap ¶ added in v0.108.5
func (e *TaskMutationError) Unwrap() error
type TaskMutationErrorKind ¶ added in v0.108.5
type TaskMutationErrorKind string
const ( TaskMutationInvalid TaskMutationErrorKind = "invalid" TaskMutationForbidden TaskMutationErrorKind = "forbidden" TaskMutationNotFound TaskMutationErrorKind = "not_found" TaskMutationConflict TaskMutationErrorKind = "conflict" TaskMutationWorktreeFailed TaskMutationErrorKind = "worktree_failed" TaskMutationPersistFailed TaskMutationErrorKind = "persist_failed" )
type TaskPlanInput ¶ added in v0.105.2
type TaskPlanRequest ¶ added in v0.105.2
type TaskPlanRequest struct {
Channel string `json:"channel"`
CreatedBy string `json:"created_by"`
Tasks []TaskPlanInput `json:"tasks"`
}
type TaskPostRequest ¶ added in v0.105.2
type TaskPostRequest struct {
Action string `json:"action"`
Channel string `json:"channel"`
ID string `json:"id"`
Title string `json:"title"`
Details string `json:"details"`
Owner string `json:"owner"`
CreatedBy string `json:"created_by"`
ThreadID string `json:"thread_id"`
TaskType string `json:"task_type"`
PipelineID string `json:"pipeline_id"`
ExecutionMode string `json:"execution_mode"`
ReviewState string `json:"review_state"`
SourceSignalID string `json:"source_signal_id"`
SourceDecisionID string `json:"source_decision_id"`
WorktreePath string `json:"worktree_path"`
WorktreeBranch string `json:"worktree_branch"`
DependsOn []string `json:"depends_on"`
MemoryWorkflowOverride bool `json:"memory_workflow_override"`
MemoryWorkflowOverrideActor string `json:"memory_workflow_override_actor"`
MemoryWorkflowOverrideReason string `json:"memory_workflow_override_reason"`
OverrideReason string `json:"override_reason"`
}
type TaskResponse ¶ added in v0.105.2
type TaskResponse struct {
Task teamTask `json:"task"`
}
type TelegramGroup ¶
TelegramGroup represents a Telegram group discovered via getUpdates.
func DiscoverGroups ¶
func DiscoverGroups(token string) ([]TelegramGroup, error)
DiscoverGroups calls getUpdates and extracts unique groups/supergroups the bot has received messages from.
func DiscoverGroupsFromBroker ¶
func DiscoverGroupsFromBroker(broker *Broker) []TelegramGroup
DiscoverGroupsFromBroker returns groups the transport has seen during polling. This is more reliable than getUpdates because the transport records every group it encounters, even after the updates are consumed.
type TelegramTransport ¶
type TelegramTransport struct {
BotToken string
Broker *Broker
// ChatMap maps telegram chat_id (as string) -> office channel slug.
ChatMap map[string]string
// DMChannel is the office channel slug for direct messages (private chats).
// When set, any private message to the bot routes to this channel.
DMChannel string
// UserMap maps telegram username (lowercase) -> office member slug.
// If empty, display names are used verbatim as the "from" field.
UserMap map[string]string
// contains filtered or unexported fields
}
TelegramTransport bridges Telegram chats with the office broker. Each mapped Telegram chat corresponds to an office channel with a "telegram" surface. Inbound Telegram messages are posted to the broker via transport.Host; outbound broker messages on surface channels are sent to Telegram via Send.
func NewTelegramTransport ¶
func NewTelegramTransport(broker *Broker, botToken string) *TelegramTransport
NewTelegramTransport creates a transport from the broker's surface channels. It reads TELEGRAM_BOT_TOKEN from the environment by default, but individual channels can override via their Surface.BotTokenEnv field.
func (*TelegramTransport) Binding ¶ added in v0.118.0
func (t *TelegramTransport) Binding() transport.Binding
Binding returns an empty binding because a single TelegramTransport instance covers multiple channels via ChatMap. There is no single static ChannelSlug to declare here; the per-message channel is carried in each transport.Message.Binding constructed by routeInbound.
func (*TelegramTransport) FormatOutbound ¶ added in v0.160.1
func (t *TelegramTransport) FormatOutbound(msg channelMessage) (transport.Outbound, bool)
FormatOutbound converts a broker channelMessage to a transport.Outbound for the per-transport dispatcher (broker_outbound_dispatch.go). Returns ok=false when no Telegram chat is mapped for the message's channel slug — a missing mapping is a routine skip, not a send failure. The dispatcher logs the skip at Info level and moves on. No side effects: typing indicator + API call happen inside Send so this function stays pure for the dispatcher's convert-then-send loop.
func (*TelegramTransport) HandleInbound ¶
func (t *TelegramTransport) HandleInbound(chatID int64, chatType string, from *telegramUser, text string) error
HandleInbound processes an incoming Telegram message and posts it to the broker.
func (*TelegramTransport) Health ¶ added in v0.118.0
func (t *TelegramTransport) Health() transport.Health
Health returns a point-in-time snapshot of adapter connectivity. O(1) — reads from cached fields updated by pollInbound.
func (*TelegramTransport) Name ¶ added in v0.118.0
func (t *TelegramTransport) Name() string
Name returns "telegram" — the stable adapter name used as AdapterName in every Participant value this transport creates.
func (*TelegramTransport) Run ¶ added in v0.118.0
Run starts the bidirectional bridge and blocks until ctx is cancelled. Inbound Telegram messages are delivered to the office via host; outbound delivery is now driven by the Host-side dispatcher in broker_outbound_dispatch.go (started by launcher_transports.go alongside this Run), so the prior drainOutbound goroutine is gone — the contract intent of "Host calls Send from a per-transport worker goroutine" is honest once again. typingLoop continues to run inside Run because it is a passive presence ping (driven by tagged-agent state on the broker), not a per-message outbound action — keeping it adapter-side avoids leaking telegram-specific "is anyone tagged" polling onto the Host. Implements transport.Transport.
func (*TelegramTransport) Send ¶ added in v0.118.0
Send delivers one outbound message to the Telegram chat mapped to msg.Binding.ChannelSlug. Returns an error if no chat is mapped for that slug or if the Telegram API call fails. Implements transport.Transport.
A "typing" action is sent immediately before the message body so chats see the same UX they got under the prior in-adapter outbound loop: a brief typing bubble, then the formatted message. Sending typing fails silently — it is a UX nicety, not a delivery prerequisite. The typing call is bounded by typingActionMaxLatency rather than the parent ctx because the parent's own timeout is sized for the actual message API call (see SendTypingAction's 30s timeout) — without the tighter cap, a hung chat-action endpoint would gate every Send by the full upstream timeout and serialize into queue backlog on the per-tick dispatcher.
func (*TelegramTransport) SendToTelegram ¶
func (t *TelegramTransport) SendToTelegram(ctx context.Context, chatID string, msg channelMessage) error
SendToTelegram sends a broker message to the specified Telegram chat with HTML formatting.
type TextIndex ¶
type TextIndex interface {
Index(ctx context.Context, f TypedFact) error
Delete(ctx context.Context, factID string) error
Search(ctx context.Context, query string, topK int) ([]SearchHit, error)
Close() error
}
TextIndex is the narrow interface for full-text / BM25 search. blevesearch/bleve/v2 slots in here without wiki_index.go changing.
type TmuxCapability ¶
type TmuxCapability struct {
BinaryPath string
Version string
SocketName string
SessionName string
InsideTmux bool
InsideTmuxEnv string
ServerRunning bool
Sessions []TmuxSessionStatus
ProbeError string
}
func (TmuxCapability) FormatLines ¶
func (t TmuxCapability) FormatLines() []string
type TmuxSessionStatus ¶
type TrashEntry ¶ added in v0.92.0
type TrashEntry struct {
Name string `json:"name"`
TrashID string `json:"trash_id"`
Path string `json:"path"`
ShredAt string `json:"shred_at,omitempty"`
OriginalRuntimeHome string `json:"original_runtime_home,omitempty"`
}
TrashEntry mirrors internal/workspaces.TrashEntry on the wire. Defined here so the broker package's exported surface is self-contained; the adapter in cmd/wuphf maps between the two.
type Triplet ¶
type Triplet struct {
Subject string `json:"subject"`
Predicate string `json:"predicate"`
Object string `json:"object"`
}
Triplet is the subject/predicate/object shape from §4.2. `Object` is a slug, a literal, or `{kind}:{slug}` when the object references another entity.
type TypedFact ¶
type TypedFact struct {
ID string `json:"id"`
EntitySlug string `json:"entity_slug"`
Kind string `json:"kind,omitempty"` // person | company | project | team | workspace
Type string `json:"type,omitempty"` // status | observation | relationship | background
Triplet *Triplet `json:"triplet,omitempty"`
Text string `json:"text"`
Confidence float64 `json:"confidence,omitempty"`
ValidFrom time.Time `json:"valid_from,omitempty"`
ValidUntil *time.Time `json:"valid_until,omitempty"`
Supersedes []string `json:"supersedes,omitempty"`
ContradictsWith []string `json:"contradicts_with,omitempty"`
SourceType string `json:"source_type,omitempty"` // chat | meeting | email | manual | linkedin
SourcePath string `json:"source_path,omitempty"`
SentenceOffset int `json:"sentence_offset,omitempty"`
ArtifactExcerpt string `json:"artifact_excerpt,omitempty"`
CreatedAt time.Time `json:"created_at"`
CreatedBy string `json:"created_by"`
ReinforcedAt *time.Time `json:"reinforced_at,omitempty"`
}
TypedFact is the schema-aligned fact row used by the index.
Maps 1:1 to docs/specs/WIKI-SCHEMA.md §4.2. Every field here has a default per §4.3 so legacy Fact rows (v1.2) parse cleanly with zero-value typed fields — no backfill migration required.
type UpgradeChangelogResponse ¶ added in v0.105.2
type UpgradeChangelogResponse struct {
Commits []upgradecheck.CommitEntry `json:"commits"`
Error string `json:"error,omitempty"`
}
UpgradeChangelogResponse is the stable JSON response served by GET /upgrade-changelog.
type UpgradeCheckErrorResponse ¶ added in v0.105.2
type UpgradeCheckErrorResponse struct {
Current string `json:"current"`
Error string `json:"error"`
}
UpgradeCheckErrorResponse is the cold-upstream-failure shape from GET /upgrade-check when no latest version is available.
type UpgradeCheckResponse ¶ added in v0.105.2
type UpgradeCheckResponse struct {
Current string `json:"current"`
Latest string `json:"latest"`
UpgradeAvailable bool `json:"upgrade_available"`
IsDevBuild bool `json:"is_dev_build"`
CompareURL string `json:"compare_url"`
UpgradeCommand string `json:"upgrade_command"`
InstallMethod string `json:"install_method"`
InstallCommand string `json:"install_command"`
Error string `json:"error,omitempty"`
}
UpgradeCheckResponse is the stable success/partial-success JSON response served by GET /upgrade-check.
type Watching ¶ added in v0.193.0
type Watching struct {
// Files is the list of glob patterns matched against the output of
// `git diff --name-only` between the task's worktree and the task's
// parent branch. Patterns use Go's path/filepath.Match semantics
// (no globstar; ** is not expanded). Each entry must be a valid
// glob — invalid entries are dropped at match time and logged.
Files []string `json:"files,omitempty"`
// WikiPaths is the list of glob patterns matched against the
// wiki-relative paths that appear in the diff (i.e. the same
// `git diff --name-only` output, filtered to entries under the
// repo's wiki root). Same matching semantics as Files.
WikiPaths []string `json:"wiki_paths,omitempty"`
// ToolNames is matched against the union of HeadlessEvent.ToolCalls
// observed for the task across all manifest events on its agent
// stream. Comparison is exact-match on the tool name (no globs);
// tool names are short canonical identifiers.
//
// ToolNames was substituted for an earlier SkillDomains proposal
// because skill-domain tags are not currently recorded in the
// HeadlessEvent pipeline. Adding skill-domain recording is v1.1.
ToolNames []string `json:"tool_names,omitempty"`
// TaskTags is matched against teamTask.Tags. Comparison is
// exact-match (no globs); tags are short canonical identifiers
// that the spec author or owner agent attaches to a task.
TaskTags []string `json:"task_tags,omitempty"`
}
Watching captures the four glob/tag categories an officeMember can declare interest in. Used by the broker's reviewer-routing logic to auto-assign agents to a task entering review.
Each list is matched independently — an empty list means "do not match on this category", not "match everything". An agent is auto-assigned when ANY of its non-empty categories intersects with the task's signals; this matches CODEOWNERS-style "any line touching my path" semantics rather than requiring all categories to overlap (which would make multi-category Watching sets effectively unusable).
type WebBrokerRestartStatus ¶ added in v0.114.1
type WebShareStatus ¶ added in v0.112.0
type WebShareStatus struct {
}
WebShareStatus is the local web UI's view of the team-member invite listener. The listener itself is owned by cmd/wuphf because it reuses the existing share server and private-network bind policy.
type WebTunnelStatus ¶ added in v0.157.0
type WebTunnelStatus struct {
Running bool `json:"running"`
// PublicURL is the trycloudflare.com origin (no path). Empty until
// cloudflared has reported a URL.
PublicURL string `json:"public_url,omitempty"`
// InviteURL is PublicURL + "/join/<token>". Empty until both the
// tunnel and a fresh invite token are ready.
InviteURL string `json:"invite_url,omitempty"`
// Passcode is the second-factor numeric string the joiner must enter
// before the broker accepts their invite. Phase 2 hardening: the
// invite URL alone (e.g. accidentally pasted into a public Slack) is
// no longer sufficient to redeem the invite. Empty in network-share
// mode.
Passcode string `json:"passcode,omitempty"`
ExpiresAt string `json:"expires_at,omitempty"`
Error string `json:"error,omitempty"`
// CloudflaredMissing flips on when the tunnel binary is not on PATH so
// the UI can render install instructions instead of a generic error.
CloudflaredMissing bool `json:"cloudflared_missing,omitempty"`
}
WebTunnelStatus is the local web UI's view of the public-tunnel listener for team-member invites. The listener wraps a `cloudflared` subprocess that publishes a one-off https://*.trycloudflare.com URL pointing at the loopback share HTTP server, so non-technical hosts can hand a link to a teammate without standing up Tailscale or an SSH tunnel themselves.
Owned by cmd/wuphf the same way WebShareStatus is — keeping the broker transport-agnostic.
type WikiArchiver ¶ added in v0.100.0
type WikiArchiver struct {
// contains filtered or unexported fields
}
WikiArchiver sweeps team/ for stale articles and moves them to .archive/. nil readLog means every article is treated as never-read.
func NewWikiArchiver ¶ added in v0.100.0
func NewWikiArchiver(repo *Repo, readLog *ReadLog, cutoff time.Duration) *WikiArchiver
NewWikiArchiver returns an archiver. cutoff=0 uses DefaultArchiveCutoffDays.
func (*WikiArchiver) Sweep ¶ added in v0.100.0
func (a *WikiArchiver) Sweep(ctx context.Context) (SweepResult, error)
Sweep walks team/ and archives every eligible article.
Eligibility (all must hold):
- File age ≥ cutoff (oldest commit via commitBoundsByPath)
- Last read ≥ cutoff days ago (or never read at all)
- Word count ≥ archiveMinWordCount (skip stubs)
- Not already a tombstone (frontmatter archived: true)
type WikiCompressor ¶ added in v0.101.0
type WikiCompressor struct {
// contains filtered or unexported fields
}
WikiCompressor is the broker-level compression worker.
func NewWikiCompressor ¶ added in v0.101.0
func NewWikiCompressor(worker *WikiWorker, cfg CompressorConfig) *WikiCompressor
NewWikiCompressor wires a compressor against the given wiki worker. Config may be the zero value; defaults are filled in here.
func (*WikiCompressor) EnqueueCompress ¶ added in v0.101.0
func (c *WikiCompressor) EnqueueCompress(relPath, requestBy string) (queued, inFlight bool, err error)
EnqueueCompress adds a compression job if none is already in-flight or queued for the same article. Returns (queued, inFlight bool, err).
- queued=true, inFlight=false: fresh job scheduled.
- queued=false, inFlight=true: job already running/queued — debounced.
- queued=false, inFlight=false, err!=nil: queue saturated or compressor stopped.
queued and inFlight are captured atomically under the lock so callers do not need a separate IsInflight call that could race with job completion.
func (*WikiCompressor) IsInflight ¶ added in v0.101.0
func (c *WikiCompressor) IsInflight(relPath string) bool
IsInflight reports whether a compress job is currently running or queued for the given article path.
func (*WikiCompressor) Start ¶ added in v0.101.0
func (c *WikiCompressor) Start(ctx context.Context)
Start launches the compress loop. Returns immediately. Stop via Stop().
func (*WikiCompressor) Stop ¶ added in v0.101.0
func (c *WikiCompressor) Stop()
Stop signals the worker to exit. Pending jobs in the buffered channel are discarded — caller is responsible for only calling this at shutdown.
type WikiIndex ¶
type WikiIndex struct {
// contains filtered or unexported fields
}
WikiIndex composes a FactStore and a TextIndex behind a single handle the rest of the broker uses. Construct via NewWikiIndex.
func NewPersistentWikiIndex ¶
NewPersistentWikiIndex constructs a WikiIndex that stores facts in a SQLite database and uses bleve for full-text (BM25) search — both pure-Go, no cgo.
- sqlite db: indexDir/wiki.sqlite
- bleve dir: indexDir/bleve/
indexDir is created with 0o755 if it does not exist. The caller must call Close() when done. NewWikiIndex (in-memory) remains the default for tests and the fallback path — this constructor is additive only.
func NewWikiIndex ¶
func NewWikiIndex(root string, opts ...IndexOption) *WikiIndex
NewWikiIndex constructs a WikiIndex rooted at the given wiki repo directory. Defaults to in-memory stores so callers can wire up tests without new deps. The real pure-Go backends (modernc.org/sqlite + bleve) replace the defaults behind the same interfaces when Slice 1 benchmarks demand them.
func (*WikiIndex) CanonicalHashAll ¶
CanonicalHashAll returns a composite hash over facts, entities, edges, and redirects. Used by the §7.4 rebuild contract test to verify no silent drift in the entity, edge, or redirect layers after a full reconcile.
func (*WikiIndex) CanonicalHashFacts ¶
CanonicalHashFacts returns a stable hash over all indexed facts, used by the §7.4 rebuild contract test: hash before rebuild must equal hash after.
func (*WikiIndex) ListEdgesForEntity ¶
ListEdgesForEntity returns graph.log edges incident on an entity.
func (*WikiIndex) ListFactsByPredicateObject ¶
func (w *WikiIndex) ListFactsByPredicateObject(ctx context.Context, predicate, object string) ([]TypedFact, error)
ListFactsByPredicateObject passes through to the FactStore. Used by the typed-predicate graph walk for multi_hop queries (Slice 2 Thread A).
func (*WikiIndex) ListFactsByTriplet ¶
func (w *WikiIndex) ListFactsByTriplet(ctx context.Context, subject, predicate, objectPrefix string) ([]TypedFact, error)
ListFactsByTriplet passes through to the FactStore. Used by the typed- predicate graph walk and counterfactual rewrite (Slice 2 Thread A).
func (*WikiIndex) ListFactsForEntity ¶
ListFactsForEntity returns every fact indexed against an entity slug, respecting redirects: if `slug` is a redirect, the survivor's facts are returned.
func (*WikiIndex) ReconcileFromMarkdown ¶
ReconcileFromMarkdown walks the wiki repo from scratch and rebuilds the index. Implements the §7.4 substrate guarantee: `rm -rf .wuphf/index/` → call this → logically-identical index. Safe to run concurrently with reads (writes are serialized inside the store).
The mutex guards only lastBuild. All reconcile I/O runs outside the lock so that Search and GetFact are never blocked during a long boot reconcile. The FactStore's own internal synchronization serializes writes.
func (*WikiIndex) ReconcilePath ¶
ReconcilePath indexes a single file by path relative to the wiki root. Called from WikiWorker.process after every successful commit so the index stays live with the repo (§2 when-to-read).
Paths recognized:
- wiki/facts/{kind}/{slug}.jsonl (new schema)
- team/entities/{kind}-{slug}.facts.jsonl (v1.2 legacy)
- team/{kind}/{slug}.md (entity brief)
- graph.log (typed edges)
- wiki/.lint/report-YYYY-MM-DD.md (lint report; §3 Layer-2)
func (*WikiIndex) Search ¶
Search runs a class-aware retrieval against the index. Multi_hop and counterfactual queries take the typed-predicate graph walk path (Slice 2 Thread A); every other class falls through to plain BM25. `topK` is clamped to [1, 100].
Invariant: the typed walk is additive. If the rewriter fails to parse spans or the FactStore yields no typed hits, the BM25 path answers alone. Recall never falls below the BM25-only baseline.
type WikiIndexSignalAdapter ¶
type WikiIndexSignalAdapter struct {
// contains filtered or unexported fields
}
WikiIndexSignalAdapter adapts a WikiIndex to the SignalIndex interface.
func NewWikiIndexSignalAdapter ¶
func NewWikiIndexSignalAdapter(idx *WikiIndex) *WikiIndexSignalAdapter
NewWikiIndexSignalAdapter constructs an adapter over the given index. A nil idx yields an adapter that returns "not found" for every lookup — useful in tests that do not boot the full index stack.
func (*WikiIndexSignalAdapter) EntityByDomain ¶
func (a *WikiIndexSignalAdapter) EntityByDomain(ctx context.Context, domain string) ([]resolverEntity, error)
EntityByDomain returns every entity associated with the given domain.
func (*WikiIndexSignalAdapter) EntityByEmail ¶
func (a *WikiIndexSignalAdapter) EntityByEmail(ctx context.Context, email string) (resolverEntity, bool, error)
EntityByEmail returns the entity whose signals.email matches (case-insensitive).
func (*WikiIndexSignalAdapter) EntityByName ¶
func (a *WikiIndexSignalAdapter) EntityByName(ctx context.Context, name string) ([]resolverEntity, error)
EntityByName returns every entity whose signals.person_name contains the query as a case-insensitive substring. The resolver applies its own JW threshold on top.
func (*WikiIndexSignalAdapter) EntityBySlug ¶
func (a *WikiIndexSignalAdapter) EntityBySlug(ctx context.Context, slug string) (resolverEntity, bool, error)
EntityBySlug returns the entity with the given canonical slug.
type WikiSearchHit ¶
type WikiSearchHit struct {
Path string `json:"path"`
Line int `json:"line"`
Snippet string `json:"snippet"`
}
WikiSearchHit is a literal substring match returned by the search API.
type WikiSectionsUpdatedEvent ¶
type WikiSectionsUpdatedEvent struct {
Sections []DiscoveredSection `json:"sections"`
Timestamp string `json:"timestamp"`
}
WikiSectionsUpdatedEvent is the SSE payload broadcast when the cached section list changes shape (new section, or a section's count/bounds shift). Content ships as the full section list so the UI can hot-swap without another HTTP roundtrip.
type WikiWorker ¶
type WikiWorker struct {
// contains filtered or unexported fields
}
WikiWorker owns the single goroutine that drains the write request queue.
func NewWikiWorker ¶
func NewWikiWorker(repo *Repo, publisher wikiEventPublisher) *WikiWorker
NewWikiWorker returns a worker ready to Start. The publisher is optional; when nil, events are dropped silently. The worker's index is nil — no derived-cache updates occur. Use NewWikiWorkerWithIndex when an index is available (production path).
func NewWikiWorkerWithIndex ¶
func NewWikiWorkerWithIndex(repo *Repo, publisher wikiEventPublisher, index *WikiIndex) *WikiWorker
NewWikiWorkerWithIndex is the production constructor. It behaves identically to NewWikiWorker but additionally wires up a WikiIndex so that after every successful commit the worker reconciles the affected path into the derived cache (SQLite+bleve in prod, in-memory for tests). The index update runs in a side goroutine tracked by sideGoroutines — WaitForIdle() covers it.
func (*WikiWorker) AgentsWithNotebooks ¶
func (w *WikiWorker) AgentsWithNotebooks() ([]string, error)
AgentsWithNotebooks walks the wiki repo and returns the slugs of every agent that has at least a `agents/{slug}/notebook/` directory. Used by the bookshelf catalog so the UI does not need to pre-enumerate the roster. Order is lexicographic for stable rendering.
func (*WikiWorker) Done ¶
func (w *WikiWorker) Done() <-chan struct{}
Done returns a channel that closes when the drain goroutine has fully exited — including its wait on side goroutines. Tests that started the worker with a cancellable context should `<-worker.Done()` after the cancel so drain's in-flight repo writes settle before t.TempDir() removal.
func (*WikiWorker) Enqueue ¶
func (w *WikiWorker) Enqueue(ctx context.Context, slug, path, content, mode, commitMsg string) (string, int, error)
Enqueue submits a write request to the worker and blocks (up to wikiWriteTimeout) for the reply. Returns ErrQueueSaturated if the queue is full — callers should surface this as a tool error with no hidden retry.
func (*WikiWorker) EnqueueArchiveSweep ¶ added in v0.111.0
func (w *WikiWorker) EnqueueArchiveSweep(ctx context.Context, readLog *ReadLog, minAge time.Duration) (SweepResult, error)
EnqueueArchiveSweep runs WikiArchiver.Sweep on the worker's serialized write queue. The caller controls the timeout with ctx because a sweep can perform many archive commits and legitimately outlive wikiWriteTimeout.
func (*WikiWorker) EnqueueArtifact ¶
func (w *WikiWorker) EnqueueArtifact(ctx context.Context, slug, path, content, commitMsg string) (string, int, error)
EnqueueArtifact submits a raw source artifact write to the shared wiki queue. The path must match wiki/artifacts/{kind}/{sha}.md. On success, the worker fires the extractor hook in a side goroutine — the reply returns as soon as the git commit lands; extraction is best-effort and never fails the commit path.
func (*WikiWorker) EnqueueEntityFact ¶
func (w *WikiWorker) EnqueueEntityFact(ctx context.Context, slug, path, content, commitMsg string) (string, int, error)
EnqueueEntityFact submits a fact-log append to the shared wiki queue. The path must be team/entities/{kind}-{slug}.facts.jsonl and is routed to Repo.CommitEntityFact (which does NOT regen the wiki index).
func (*WikiWorker) EnqueueEntityGraph ¶
func (w *WikiWorker) EnqueueEntityGraph(ctx context.Context, slug, content, commitMsg string) (string, int, error)
EnqueueEntityGraph submits a full rewrite of the cross-entity adjacency log at team/entities/.graph.jsonl. Same single-writer queue as the fact log; routed to Repo.CommitEntityGraph (which does NOT regen the wiki index or backlinks). Caller (EntityGraph) owns append-merge semantics — the worker just replaces the bytes.
func (*WikiWorker) EnqueueFactLog ¶
func (w *WikiWorker) EnqueueFactLog(ctx context.Context, slug, path, content, commitMsg string) (string, int, error)
EnqueueFactLog submits a fact-log mutation to the shared wiki queue. The path must be wiki/facts/**/*.jsonl or team/entities/*.facts.jsonl. Used by lint ResolveContradiction to update supersedes/valid_until/contradicts_with. `content` is the FULL replacement body for the file (not a diff). Prefer EnqueueFactLogAppend for the extractor append path.
func (*WikiWorker) EnqueueFactLogAppend ¶
func (w *WikiWorker) EnqueueFactLogAppend(ctx context.Context, slug, path, content, commitMsg string) (string, int, error)
EnqueueFactLogAppend appends JSONL content to a fact-log file via the shared single-writer queue. `content` is only the new lines — the worker reads the existing file, concatenates, and commits. Used by the extractor to close the §7.4 substrate guarantee: every successfully-submitted fact lands in markdown so a wipe + reconcile rebuilds to a logically-identical index.
The read-modify-write happens inside the worker's repo mutex (AppendFactLog), so callers MUST NOT bypass the queue — that would race two appenders.
func (*WikiWorker) EnqueueHuman ¶
func (w *WikiWorker) EnqueueHuman(ctx context.Context, path, content, commitMsg, expectedSHA string) (string, int, error)
EnqueueHuman submits a human-authored wiki write to the shared single- writer queue with the legacy fallback identity. Retained for backward compatibility with tests and call sites that predate v1.5's per-human identity registry; prefer EnqueueHumanAs for new code.
func (*WikiWorker) EnqueueHumanAs ¶
func (w *WikiWorker) EnqueueHumanAs(ctx context.Context, id HumanIdentity, path, content, commitMsg, expectedSHA string) (string, int, error)
EnqueueHumanAs submits a human-authored wiki write to the shared single-writer queue, stamping the commit with the supplied identity. A zero-value identity falls back to the synthetic `human` author so single-user installs keep working.
The HTTP handler is already gated by the broker bearer token, so the worker trusts the identity it is given — this is belt-and-braces between two authenticated layers, not an anti-spoofing boundary.
Returns ErrWikiSHAMismatch wrapped with the current HEAD SHA (in the SHA return slot) when expected_sha does not match; callers pass that back to the client so the 409 prompt can reload the latest content.
func (*WikiWorker) EnqueueLintReport ¶
func (w *WikiWorker) EnqueueLintReport(ctx context.Context, slug, path, content, commitMsg string) (string, int, error)
EnqueueLintReport submits a lint report write to the shared wiki queue. The path must be wiki/.lint/report-YYYY-MM-DD.md and is routed to Repo.CommitLintReport (which does NOT regen the team-wiki index).
func (*WikiWorker) EnqueuePlaybookCompile ¶
func (w *WikiWorker) EnqueuePlaybookCompile(ctx context.Context, slug, authorSlug string) (string, int, error)
EnqueuePlaybookCompile runs CompilePlaybook against the current on-disk source and submits the output to the queue as a compiled-skill write. The commit is attributed to the archivist identity regardless of who authored the source edit — the compilation is a machine artifact.
authorSlug is the source-edit author on whose behalf compilation was triggered; currently unused by the worker, kept in the signature so callers (auto-recompile, /skill create, tests) can pass it along without branching. When compile observability grows past "did it run" (e.g. a per-trigger log line), this is where the value lands.
func (*WikiWorker) EnqueuePlaybookExecution ¶
func (w *WikiWorker) EnqueuePlaybookExecution(ctx context.Context, slug, path, content, commitMsg string) (string, int, error)
EnqueuePlaybookExecution submits an execution-log append to the shared queue. Used by ExecutionLog.Append; mirrors EnqueueEntityFact shape.
func (*WikiWorker) EnqueueTeamLearning ¶ added in v0.97.0
func (w *WikiWorker) EnqueueTeamLearning(ctx context.Context, slug, path, jsonlContent, markdownContent, commitMsg string) (string, int, error)
EnqueueTeamLearning submits the merged team learnings JSONL plus generated markdown page to the shared wiki queue.
func (*WikiWorker) EnsureNotebookDirs ¶ added in v0.105.1
EnsureNotebookDirs creates the on-disk notebook shelf for each roster slug. It commits only .gitkeep files, never sample content, so a fresh office has visible per-agent notebooks without pretending any agent has written notes.
func (*WikiWorker) Index ¶
func (w *WikiWorker) Index() *WikiIndex
Index returns the derived WikiIndex attached to this worker, or nil when none is wired. Read-only access is safe — the extractor uses this to consult the signal index through an adapter.
func (*WikiWorker) NotebookCommitCount ¶ added in v0.182.9
func (w *WikiWorker) NotebookCommitCount() int
NotebookCommitCount returns the number of successful notebook writes since this worker started. Monotonic; never decreases. Used by PromotionSweep's content-volume gate to skip iterations when no drafted notebook content has changed since the last sweep.
func (*WikiWorker) NotebookList ¶
func (w *WikiWorker) NotebookList(slug string) ([]NotebookEntry, error)
NotebookList returns the agent's notebook entries newest-first. Empty slice (never nil) when the agent has no entries — callers can marshal as JSON without null-guarding.
func (*WikiWorker) NotebookRead ¶
func (w *WikiWorker) NotebookRead(path string) ([]byte, error)
NotebookRead returns raw entry bytes for any agent's notebook. Cross-agent reads are intentional: notebooks are private-by-convention, not by access control.
func (*WikiWorker) NotebookSearch ¶
func (w *WikiWorker) NotebookSearch(slug, pattern string) ([]WikiSearchHit, error)
NotebookSearch runs a literal substring search scoped to a single agent's notebook subtree. Pattern is escaped by the caller path; this function only does substring matching — no regex — so there is no injection surface.
func (*WikiWorker) NotebookWrite ¶
func (w *WikiWorker) NotebookWrite(ctx context.Context, slug, path, content, mode, commitMsg string) (string, int, error)
NotebookWrite submits a notebook write to the shared wiki queue. The slug MUST match the agent slug embedded in the path — enforced here before the request is handed off to the worker.
func (*WikiWorker) NotifyArchived ¶ added in v0.101.0
func (w *WikiWorker) NotifyArchived(ctx context.Context, paths []string)
NotifyArchived fires the same post-commit hooks that process() fires for standard wiki writes, but for paths committed by WikiArchiver.Sweep(). Kept as a public bridge for tests and repair paths; normal sweeps should use EnqueueArchiveSweep so CommitArchive also runs on the single-writer queue.
func (*WikiWorker) QueueLength ¶
func (w *WikiWorker) QueueLength() int
QueueLength returns the current number of pending requests. Useful for diagnostics and tests.
func (*WikiWorker) ReadArticle ¶
func (w *WikiWorker) ReadArticle(relPath string) ([]byte, error)
ReadArticle returns the raw article bytes for a validated path. Exposed so callers in other packages (notably internal/pam after extraction) can read articles without importing internal/team to name *team.Repo.
func (*WikiWorker) Repo ¶
func (w *WikiWorker) Repo() *Repo
Repo returns the underlying wiki repo — used by read-side broker handlers which do not need the serialized write queue.
func (*WikiWorker) SetExtractor ¶
func (w *WikiWorker) SetExtractor(e ExtractorHook)
SetExtractor wires an ExtractorHook onto the worker. Safe to call before or after Start; the hook is only consulted inside process. Passing nil disables the hook (default behaviour).
func (*WikiWorker) Start ¶
func (w *WikiWorker) Start(ctx context.Context)
Start launches the drain goroutine. Returns immediately. The worker stops when ctx is cancelled.
func (*WikiWorker) Stop ¶
func (w *WikiWorker) Stop()
Stop is a test helper that closes the request channel so the drain loop returns. Production code should cancel the context passed to Start instead.
Ordering matters: mark as stopped → wait for any in-flight side goroutines (e.g. auto-recompile helpers that take the queue) → close the channel. Without the wait, a recompile goroutine can attempt to send on a closed channel and panic.
func (*WikiWorker) SubmitFacts ¶
func (w *WikiWorker) SubmitFacts(ctx context.Context, facts []TypedFact, entities []IndexEntity) error
SubmitFacts routes an index mutation — entities + facts — through the single-writer queue. This preserves the single-writer invariant for the extractor loop: agents, the resolver, and the extractor ask; only the worker writes.
Entities are upserted BEFORE facts so fact rows always resolve against a known entity row. Never mutates git — this is a cache-only job that keeps the index live between markdown-reconcile passes.
func (*WikiWorker) WaitForIdle ¶
func (w *WikiWorker) WaitForIdle()
WaitForIdle blocks until every detached side goroutine spawned by the worker (auto-recompile, backup mirror) has finished. Tests register this via t.Cleanup so t.TempDir() RemoveAll does not race in-flight background writes into wiki.bak/ — the symptom is "directory not empty" or "no such file or directory" cleanup errors on Linux CI.
Safe to call after ctx cancellation: the side-goroutine WaitGroup is independent of drain lifecycle.
type Win ¶ added in v0.193.0
Win is one entry on SessionReport.TopWins. Delta is a short labelled magnitude string the UI renders in the leftmost column ("+128 LOC", "x3.2 faster", "-10 deps") so a reader can scan deltas without parsing the description. Description is the prose. Both fields are agent-authored; the broker never synthesises them.
type Workspace ¶ added in v0.92.0
type Workspace struct {
Name string `json:"name"`
RuntimeHome string `json:"runtime_home"`
BrokerPort int `json:"broker_port"`
WebPort int `json:"web_port"`
State string `json:"state"` // running|paused|starting|stopping|never_started|error
Blueprint string `json:"blueprint,omitempty"`
CompanyName string `json:"company_name,omitempty"`
CreatedAt string `json:"created_at,omitempty"`
LastUsedAt string `json:"last_used_at,omitempty"`
PausedAt *string `json:"paused_at,omitempty"`
IsActive bool `json:"is_active,omitempty"`
}
Workspace mirrors the registry shape returned to API consumers. Lane B's internal/workspaces package will define the canonical type; this is the shape Lane C's tests assert against. At merge, cmd/wuphf adapts Lane B's type into this shape (or both packages share a third package).
Source Files
¶
- agent_issue.go
- artifact_commit.go
- auto_notebook_writer.go
- autonomy_coach.go
- bridge_helpers.go
- broker.go
- broker_actor.go
- broker_auth.go
- broker_bridge.go
- broker_channel_access.go
- broker_commands.go
- broker_company_seed.go
- broker_cursors.go
- broker_decision_packet.go
- broker_decision_packet_events.go
- broker_decision_packet_types.go
- broker_defaults.go
- broker_dm.go
- broker_entity.go
- broker_gc.go
- broker_human.go
- broker_human_share.go
- broker_image_providers.go
- broker_image_root.go
- broker_inbox.go
- broker_inbox_handler.go
- broker_inbox_packet_types.go
- broker_intake.go
- broker_intake_types.go
- broker_learning.go
- broker_lifecycle.go
- broker_lifecycle_transition.go
- broker_lint.go
- broker_lint_cron.go
- broker_member_construction.go
- broker_memory_http.go
- broker_messages.go
- broker_middleware.go
- broker_misc_handlers.go
- broker_notebook.go
- broker_notebook_review.go
- broker_office_channels.go
- broker_office_members.go
- broker_onboarding.go
- broker_otlp_usage.go
- broker_outbound_dispatch.go
- broker_pam.go
- broker_pane.go
- broker_persistence.go
- broker_playbook.go
- broker_policies.go
- broker_presence.go
- broker_provider_binding.go
- broker_publish.go
- broker_queues.go
- broker_requests_interviews.go
- broker_review.go
- broker_reviewer_routing.go
- broker_reviewer_routing_types.go
- broker_route_contracts.go
- broker_scan.go
- broker_scheduler.go
- broker_skills.go
- broker_sse.go
- broker_streams.go
- broker_studio.go
- broker_task_queries.go
- broker_tasks.go
- broker_tasks_contracts.go
- broker_tasks_http.go
- broker_tasks_lifecycle.go
- broker_tasks_memory_workflow.go
- broker_tasks_mutation_service.go
- broker_tasks_notifications.go
- broker_tasks_plan.go
- broker_tasks_post.go
- broker_tasks_service.go
- broker_tasks_worktrees.go
- broker_telegram_connect.go
- broker_text.go
- broker_transport.go
- broker_types.go
- broker_upgrade.go
- broker_web_proxy.go
- broker_web_restart.go
- broker_web_share.go
- broker_web_tunnel.go
- broker_wiki_dlq.go
- broker_wiki_extract.go
- broker_wiki_lifecycle.go
- broker_wiki_maintenance.go
- broker_workspaces.go
- capabilities.go
- capability_registry.go
- channel_intent_classifier.go
- domains.go
- entity_commit.go
- entity_facts.go
- entity_frontmatter.go
- entity_graph.go
- entity_graph_commit.go
- entity_minimal_brief.go
- entity_resolver.go
- entity_synthesizer.go
- escalation.go
- headless_activity_classifier.go
- headless_claude.go
- headless_codex.go
- headless_codex_queue.go
- headless_codex_recovery.go
- headless_codex_runner.go
- headless_event.go
- headless_live_chat_relay.go
- headless_logging.go
- headless_openai_compat.go
- headless_openai_compat_mcp.go
- headless_opencode.go
- headless_opencode_mcp.go
- headless_process_unix.go
- headless_progress.go
- headless_task_runners.go
- human_commit.go
- human_identity.go
- human_wiki_intent.go
- jaro_winkler.go
- launcher.go
- launcher_boot.go
- launcher_drain.go
- launcher_loops.go
- launcher_manifest.go
- launcher_membership.go
- launcher_nex.go
- launcher_options.go
- launcher_preflight.go
- launcher_reconfigure.go
- launcher_session.go
- launcher_transports.go
- launcher_web.go
- launcher_wiring.go
- learning_commit.go
- learnings.go
- ledger.go
- local_providers_status.go
- mcp_config.go
- memory_backend.go
- memory_workflow.go
- memory_workflow_reconciler.go
- message_redaction.go
- notebook_signal_scanner.go
- notebook_signal_scanner_embeddings.go
- notebook_worker.go
- notification_context.go
- notifier_delivery.go
- notifier_loops.go
- notifier_targets.go
- office_targets.go
- openai_compat_loop.go
- openai_compat_turn_state.go
- openclaw.go
- openclaw_bootstrap.go
- openclaw_transport.go
- operation_bootstrap.go
- operation_bootstrap_automation.go
- operation_bootstrap_config.go
- operation_bootstrap_helpers.go
- operation_bootstrap_loaders.go
- operation_bootstrap_package.go
- operation_bootstrap_runtime.go
- operation_bootstrap_starter.go
- operation_bootstrap_types.go
- operation_bootstrap_value.go
- operation_bootstrap_workflows.go
- pam.go
- pam_actions.go
- pane_capture.go
- pane_dispatch.go
- pane_lifecycle.go
- pane_lifecycle_spawn.go
- playbook_clusters.go
- playbook_commit.go
- playbook_compiler.go
- playbook_events.go
- playbook_executions.go
- playbook_synthesizer.go
- playbook_synthesizer_v2.go
- policy.go
- promotion_commit.go
- promotion_demand.go
- promotion_log.go
- promotion_state.go
- promotion_sweep.go
- promotion_sweep_adapter.go
- prompt_builder.go
- prompt_escape.go
- prompts.go
- resume.go
- routing.go
- runtime_artifacts.go
- runtime_state.go
- safe_task_id.go
- scheduler.go
- scheduler_runtime.go
- scoped_memory.go
- self_heal_signal.go
- self_healing.go
- session_memory.go
- session_memory_snapshot.go
- session_mode.go
- share_transport.go
- skill_candidate.go
- skill_compile.go
- skill_compile_endpoints.go
- skill_consolidate.go
- skill_counter.go
- skill_crud_endpoints.go
- skill_dedup.go
- skill_frontmatter.go
- skill_guard.go
- skill_migration.go
- skill_proposal_helper.go
- skill_review_nudge.go
- skill_scanner.go
- skill_synth_provider.go
- skill_synthesizer.go
- skill_tombstone.go
- stage_b_signals.go
- task_pipeline.go
- telegram.go
- template.go
- test_support.go
- title.go
- tmux_runner.go
- wiki_archiver.go
- wiki_article.go
- wiki_classifier.go
- wiki_compressor.go
- wiki_dlq.go
- wiki_extractor.go
- wiki_git.go
- wiki_index.go
- wiki_index_bleve.go
- wiki_index_signal_adapter.go
- wiki_index_sqlite.go
- wiki_lint.go
- wiki_lookup.go
- wiki_maintenance.go
- wiki_query.go
- wiki_query_retrieve.go
- wiki_query_rewrite.go
- wiki_reads.go
- wiki_sections.go
- wiki_worker.go
- wiki_worker_archive.go
- worktree.go
Directories
¶
| Path | Synopsis |
|---|---|
|
Package transport defines the contract between the WUPHF broker and external message adapters (Telegram, OpenClaw, human-share, and future integrations).
|
Package transport defines the contract between the WUPHF broker and external message adapters (Telegram, OpenClaw, human-share, and future integrations). |