Implements Phases 1-8 of the TFTSR implementation plan. Rust backend (Tauri 2.x, src-tauri/): - Multi-provider AI: OpenAI-compatible, Anthropic, Gemini, Mistral, Ollama - PII detection engine: 11 regex patterns with overlap resolution - SQLCipher AES-256 encrypted database with 10 versioned migrations - 28 Tauri IPC commands for triage, analysis, document, and system ops - Ollama: hardware probe, model recommendations, pull/delete with events - RCA and blameless post-mortem Markdown document generators - PDF export via printpdf - Audit log: SHA-256 hash of every external data send - Integration stubs for Confluence, ServiceNow, Azure DevOps (v0.2) Frontend (React 18 + TypeScript + Vite, src/): - 9 pages: full triage workflow NewIssue→LogUpload→Triage→Resolution→RCA→Postmortem→History+Settings - 7 components: ChatWindow, TriageProgress, PiiDiffViewer, DocEditor, HardwareReport, ModelSelector, UI primitives - 3 Zustand stores: session, settings (persisted), history - Type-safe tauriCommands.ts matching Rust backend types exactly - 8 IT domain system prompts (Linux, Windows, Network, K8s, DB, Virt, HW, Obs) DevOps: - .woodpecker/test.yml: rustfmt, clippy, cargo test, tsc, vitest on every push - .woodpecker/release.yml: linux/amd64 + linux/arm64 builds, Gogs release upload Verified: - cargo check: zero errors - tsc --noEmit: zero errors - vitest run: 13/13 unit tests passing Co-Authored-By: Claude Sonnet 4.6 (1M context) <noreply@anthropic.com>
163 lines
3.2 KiB
JavaScript
163 lines
3.2 KiB
JavaScript
/**
|
||
* @import {
|
||
* Construct,
|
||
* Resolver,
|
||
* State,
|
||
* TokenizeContext,
|
||
* Tokenizer,
|
||
* Token
|
||
* } from 'micromark-util-types'
|
||
*/
|
||
|
||
import { factorySpace } from 'micromark-factory-space';
|
||
import { markdownLineEnding } from 'micromark-util-character';
|
||
import { subtokenize } from 'micromark-util-subtokenize';
|
||
/**
|
||
* No name because it must not be turned off.
|
||
* @type {Construct}
|
||
*/
|
||
export const content = {
|
||
resolve: resolveContent,
|
||
tokenize: tokenizeContent
|
||
};
|
||
|
||
/** @type {Construct} */
|
||
const continuationConstruct = {
|
||
partial: true,
|
||
tokenize: tokenizeContinuation
|
||
};
|
||
|
||
/**
|
||
* Content is transparent: it’s parsed right now. That way, definitions are also
|
||
* parsed right now: before text in paragraphs (specifically, media) are parsed.
|
||
*
|
||
* @type {Resolver}
|
||
*/
|
||
function resolveContent(events) {
|
||
subtokenize(events);
|
||
return events;
|
||
}
|
||
|
||
/**
|
||
* @this {TokenizeContext}
|
||
* Context.
|
||
* @type {Tokenizer}
|
||
*/
|
||
function tokenizeContent(effects, ok) {
|
||
/** @type {Token | undefined} */
|
||
let previous;
|
||
return chunkStart;
|
||
|
||
/**
|
||
* Before a content chunk.
|
||
*
|
||
* ```markdown
|
||
* > | abc
|
||
* ^
|
||
* ```
|
||
*
|
||
* @type {State}
|
||
*/
|
||
function chunkStart(code) {
|
||
effects.enter("content");
|
||
previous = effects.enter("chunkContent", {
|
||
contentType: "content"
|
||
});
|
||
return chunkInside(code);
|
||
}
|
||
|
||
/**
|
||
* In a content chunk.
|
||
*
|
||
* ```markdown
|
||
* > | abc
|
||
* ^^^
|
||
* ```
|
||
*
|
||
* @type {State}
|
||
*/
|
||
function chunkInside(code) {
|
||
if (code === null) {
|
||
return contentEnd(code);
|
||
}
|
||
|
||
// To do: in `markdown-rs`, each line is parsed on its own, and everything
|
||
// is stitched together resolving.
|
||
if (markdownLineEnding(code)) {
|
||
return effects.check(continuationConstruct, contentContinue, contentEnd)(code);
|
||
}
|
||
|
||
// Data.
|
||
effects.consume(code);
|
||
return chunkInside;
|
||
}
|
||
|
||
/**
|
||
*
|
||
*
|
||
* @type {State}
|
||
*/
|
||
function contentEnd(code) {
|
||
effects.exit("chunkContent");
|
||
effects.exit("content");
|
||
return ok(code);
|
||
}
|
||
|
||
/**
|
||
*
|
||
*
|
||
* @type {State}
|
||
*/
|
||
function contentContinue(code) {
|
||
effects.consume(code);
|
||
effects.exit("chunkContent");
|
||
previous.next = effects.enter("chunkContent", {
|
||
contentType: "content",
|
||
previous
|
||
});
|
||
previous = previous.next;
|
||
return chunkInside;
|
||
}
|
||
}
|
||
|
||
/**
|
||
* @this {TokenizeContext}
|
||
* Context.
|
||
* @type {Tokenizer}
|
||
*/
|
||
function tokenizeContinuation(effects, ok, nok) {
|
||
const self = this;
|
||
return startLookahead;
|
||
|
||
/**
|
||
*
|
||
*
|
||
* @type {State}
|
||
*/
|
||
function startLookahead(code) {
|
||
effects.exit("chunkContent");
|
||
effects.enter("lineEnding");
|
||
effects.consume(code);
|
||
effects.exit("lineEnding");
|
||
return factorySpace(effects, prefixed, "linePrefix");
|
||
}
|
||
|
||
/**
|
||
*
|
||
*
|
||
* @type {State}
|
||
*/
|
||
function prefixed(code) {
|
||
if (code === null || markdownLineEnding(code)) {
|
||
return nok(code);
|
||
}
|
||
|
||
// Always populated by defaults.
|
||
|
||
const tail = self.events[self.events.length - 1];
|
||
if (!self.parser.constructs.disable.null.includes('codeIndented') && tail && tail[1].type === "linePrefix" && tail[2].sliceSerialize(tail[1], true).length >= 4) {
|
||
return ok(code);
|
||
}
|
||
return effects.interrupt(self.parser.constructs.flow, nok, ok)(code);
|
||
}
|
||
} |