Implements Phases 1-8 of the TFTSR implementation plan. Rust backend (Tauri 2.x, src-tauri/): - Multi-provider AI: OpenAI-compatible, Anthropic, Gemini, Mistral, Ollama - PII detection engine: 11 regex patterns with overlap resolution - SQLCipher AES-256 encrypted database with 10 versioned migrations - 28 Tauri IPC commands for triage, analysis, document, and system ops - Ollama: hardware probe, model recommendations, pull/delete with events - RCA and blameless post-mortem Markdown document generators - PDF export via printpdf - Audit log: SHA-256 hash of every external data send - Integration stubs for Confluence, ServiceNow, Azure DevOps (v0.2) Frontend (React 18 + TypeScript + Vite, src/): - 9 pages: full triage workflow NewIssue→LogUpload→Triage→Resolution→RCA→Postmortem→History+Settings - 7 components: ChatWindow, TriageProgress, PiiDiffViewer, DocEditor, HardwareReport, ModelSelector, UI primitives - 3 Zustand stores: session, settings (persisted), history - Type-safe tauriCommands.ts matching Rust backend types exactly - 8 IT domain system prompts (Linux, Windows, Network, K8s, DB, Virt, HW, Obs) DevOps: - .woodpecker/test.yml: rustfmt, clippy, cargo test, tsc, vitest on every push - .woodpecker/release.yml: linux/amd64 + linux/arm64 builds, Gogs release upload Verified: - cargo check: zero errors - tsc --noEmit: zero errors - vitest run: 13/13 unit tests passing Co-Authored-By: Claude Sonnet 4.6 (1M context) <noreply@anthropic.com>
177 lines
3.9 KiB
JavaScript
177 lines
3.9 KiB
JavaScript
/**
|
||
* @import {
|
||
* Construct,
|
||
* State,
|
||
* TokenizeContext,
|
||
* Tokenizer
|
||
* } from 'micromark-util-types'
|
||
*/
|
||
|
||
import { factorySpace } from 'micromark-factory-space';
|
||
import { markdownLineEnding, markdownSpace } from 'micromark-util-character';
|
||
/** @type {Construct} */
|
||
export const codeIndented = {
|
||
name: 'codeIndented',
|
||
tokenize: tokenizeCodeIndented
|
||
};
|
||
|
||
/** @type {Construct} */
|
||
const furtherStart = {
|
||
partial: true,
|
||
tokenize: tokenizeFurtherStart
|
||
};
|
||
|
||
/**
|
||
* @this {TokenizeContext}
|
||
* Context.
|
||
* @type {Tokenizer}
|
||
*/
|
||
function tokenizeCodeIndented(effects, ok, nok) {
|
||
const self = this;
|
||
return start;
|
||
|
||
/**
|
||
* Start of code (indented).
|
||
*
|
||
* > **Parsing note**: it is not needed to check if this first line is a
|
||
* > filled line (that it has a non-whitespace character), because blank lines
|
||
* > are parsed already, so we never run into that.
|
||
*
|
||
* ```markdown
|
||
* > | aaa
|
||
* ^
|
||
* ```
|
||
*
|
||
* @type {State}
|
||
*/
|
||
function start(code) {
|
||
// To do: manually check if interrupting like `markdown-rs`.
|
||
|
||
effects.enter("codeIndented");
|
||
// To do: use an improved `space_or_tab` function like `markdown-rs`,
|
||
// so that we can drop the next state.
|
||
return factorySpace(effects, afterPrefix, "linePrefix", 4 + 1)(code);
|
||
}
|
||
|
||
/**
|
||
* At start, after 1 or 4 spaces.
|
||
*
|
||
* ```markdown
|
||
* > | aaa
|
||
* ^
|
||
* ```
|
||
*
|
||
* @type {State}
|
||
*/
|
||
function afterPrefix(code) {
|
||
const tail = self.events[self.events.length - 1];
|
||
return tail && tail[1].type === "linePrefix" && tail[2].sliceSerialize(tail[1], true).length >= 4 ? atBreak(code) : nok(code);
|
||
}
|
||
|
||
/**
|
||
* At a break.
|
||
*
|
||
* ```markdown
|
||
* > | aaa
|
||
* ^ ^
|
||
* ```
|
||
*
|
||
* @type {State}
|
||
*/
|
||
function atBreak(code) {
|
||
if (code === null) {
|
||
return after(code);
|
||
}
|
||
if (markdownLineEnding(code)) {
|
||
return effects.attempt(furtherStart, atBreak, after)(code);
|
||
}
|
||
effects.enter("codeFlowValue");
|
||
return inside(code);
|
||
}
|
||
|
||
/**
|
||
* In code content.
|
||
*
|
||
* ```markdown
|
||
* > | aaa
|
||
* ^^^^
|
||
* ```
|
||
*
|
||
* @type {State}
|
||
*/
|
||
function inside(code) {
|
||
if (code === null || markdownLineEnding(code)) {
|
||
effects.exit("codeFlowValue");
|
||
return atBreak(code);
|
||
}
|
||
effects.consume(code);
|
||
return inside;
|
||
}
|
||
|
||
/** @type {State} */
|
||
function after(code) {
|
||
effects.exit("codeIndented");
|
||
// To do: allow interrupting like `markdown-rs`.
|
||
// Feel free to interrupt.
|
||
// tokenizer.interrupt = false
|
||
return ok(code);
|
||
}
|
||
}
|
||
|
||
/**
|
||
* @this {TokenizeContext}
|
||
* Context.
|
||
* @type {Tokenizer}
|
||
*/
|
||
function tokenizeFurtherStart(effects, ok, nok) {
|
||
const self = this;
|
||
return furtherStart;
|
||
|
||
/**
|
||
* At eol, trying to parse another indent.
|
||
*
|
||
* ```markdown
|
||
* > | aaa
|
||
* ^
|
||
* | bbb
|
||
* ```
|
||
*
|
||
* @type {State}
|
||
*/
|
||
function furtherStart(code) {
|
||
// To do: improve `lazy` / `pierce` handling.
|
||
// If this is a lazy line, it can’t be code.
|
||
if (self.parser.lazy[self.now().line]) {
|
||
return nok(code);
|
||
}
|
||
if (markdownLineEnding(code)) {
|
||
effects.enter("lineEnding");
|
||
effects.consume(code);
|
||
effects.exit("lineEnding");
|
||
return furtherStart;
|
||
}
|
||
|
||
// To do: the code here in `micromark-js` is a bit different from
|
||
// `markdown-rs` because there it can attempt spaces.
|
||
// We can’t yet.
|
||
//
|
||
// To do: use an improved `space_or_tab` function like `markdown-rs`,
|
||
// so that we can drop the next state.
|
||
return factorySpace(effects, afterPrefix, "linePrefix", 4 + 1)(code);
|
||
}
|
||
|
||
/**
|
||
* At start, after 1 or 4 spaces.
|
||
*
|
||
* ```markdown
|
||
* > | aaa
|
||
* ^
|
||
* ```
|
||
*
|
||
* @type {State}
|
||
*/
|
||
function afterPrefix(code) {
|
||
const tail = self.events[self.events.length - 1];
|
||
return tail && tail[1].type === "linePrefix" && tail[2].sliceSerialize(tail[1], true).length >= 4 ? ok(code) : markdownLineEnding(code) ? furtherStart(code) : nok(code);
|
||
}
|
||
} |