Implements Phases 1-8 of the TFTSR implementation plan. Rust backend (Tauri 2.x, src-tauri/): - Multi-provider AI: OpenAI-compatible, Anthropic, Gemini, Mistral, Ollama - PII detection engine: 11 regex patterns with overlap resolution - SQLCipher AES-256 encrypted database with 10 versioned migrations - 28 Tauri IPC commands for triage, analysis, document, and system ops - Ollama: hardware probe, model recommendations, pull/delete with events - RCA and blameless post-mortem Markdown document generators - PDF export via printpdf - Audit log: SHA-256 hash of every external data send - Integration stubs for Confluence, ServiceNow, Azure DevOps (v0.2) Frontend (React 18 + TypeScript + Vite, src/): - 9 pages: full triage workflow NewIssue→LogUpload→Triage→Resolution→RCA→Postmortem→History+Settings - 7 components: ChatWindow, TriageProgress, PiiDiffViewer, DocEditor, HardwareReport, ModelSelector, UI primitives - 3 Zustand stores: session, settings (persisted), history - Type-safe tauriCommands.ts matching Rust backend types exactly - 8 IT domain system prompts (Linux, Windows, Network, K8s, DB, Virt, HW, Obs) DevOps: - .woodpecker/test.yml: rustfmt, clippy, cargo test, tsc, vitest on every push - .woodpecker/release.yml: linux/amd64 + linux/arm64 builds, Gogs release upload Verified: - cargo check: zero errors - tsc --noEmit: zero errors - vitest run: 13/13 unit tests passing Co-Authored-By: Claude Sonnet 4.6 (1M context) <noreply@anthropic.com>
93 lines
2.1 KiB
JavaScript
93 lines
2.1 KiB
JavaScript
"use strict";Object.defineProperty(exports, "__esModule", {value: true});var _types = require('../parser/tokenizer/types');
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
* Determine information about this named import or named export specifier.
|
|
*
|
|
* This syntax is the `a` from statements like these:
|
|
* import {A} from "./foo";
|
|
* export {A};
|
|
* export {A} from "./foo";
|
|
*
|
|
* As it turns out, we can exactly characterize the syntax meaning by simply
|
|
* counting the number of tokens, which can be from 1 to 4:
|
|
* {A}
|
|
* {type A}
|
|
* {A as B}
|
|
* {type A as B}
|
|
*
|
|
* In the type case, we never actually need the names in practice, so don't get
|
|
* them.
|
|
*
|
|
* TODO: There's some redundancy with the type detection here and the isType
|
|
* flag that's already present on tokens in TS mode. This function could
|
|
* potentially be simplified and/or pushed to the call sites to avoid the object
|
|
* allocation.
|
|
*/
|
|
function getImportExportSpecifierInfo(
|
|
tokens,
|
|
index = tokens.currentIndex(),
|
|
) {
|
|
let endIndex = index + 1;
|
|
if (isSpecifierEnd(tokens, endIndex)) {
|
|
// import {A}
|
|
const name = tokens.identifierNameAtIndex(index);
|
|
return {
|
|
isType: false,
|
|
leftName: name,
|
|
rightName: name,
|
|
endIndex,
|
|
};
|
|
}
|
|
endIndex++;
|
|
if (isSpecifierEnd(tokens, endIndex)) {
|
|
// import {type A}
|
|
return {
|
|
isType: true,
|
|
leftName: null,
|
|
rightName: null,
|
|
endIndex,
|
|
};
|
|
}
|
|
endIndex++;
|
|
if (isSpecifierEnd(tokens, endIndex)) {
|
|
// import {A as B}
|
|
return {
|
|
isType: false,
|
|
leftName: tokens.identifierNameAtIndex(index),
|
|
rightName: tokens.identifierNameAtIndex(index + 2),
|
|
endIndex,
|
|
};
|
|
}
|
|
endIndex++;
|
|
if (isSpecifierEnd(tokens, endIndex)) {
|
|
// import {type A as B}
|
|
return {
|
|
isType: true,
|
|
leftName: null,
|
|
rightName: null,
|
|
endIndex,
|
|
};
|
|
}
|
|
throw new Error(`Unexpected import/export specifier at ${index}`);
|
|
} exports.default = getImportExportSpecifierInfo;
|
|
|
|
function isSpecifierEnd(tokens, index) {
|
|
const token = tokens.tokens[index];
|
|
return token.type === _types.TokenType.braceR || token.type === _types.TokenType.comma;
|
|
}
|