Implements Phases 1-8 of the TFTSR implementation plan. Rust backend (Tauri 2.x, src-tauri/): - Multi-provider AI: OpenAI-compatible, Anthropic, Gemini, Mistral, Ollama - PII detection engine: 11 regex patterns with overlap resolution - SQLCipher AES-256 encrypted database with 10 versioned migrations - 28 Tauri IPC commands for triage, analysis, document, and system ops - Ollama: hardware probe, model recommendations, pull/delete with events - RCA and blameless post-mortem Markdown document generators - PDF export via printpdf - Audit log: SHA-256 hash of every external data send - Integration stubs for Confluence, ServiceNow, Azure DevOps (v0.2) Frontend (React 18 + TypeScript + Vite, src/): - 9 pages: full triage workflow NewIssue→LogUpload→Triage→Resolution→RCA→Postmortem→History+Settings - 7 components: ChatWindow, TriageProgress, PiiDiffViewer, DocEditor, HardwareReport, ModelSelector, UI primitives - 3 Zustand stores: session, settings (persisted), history - Type-safe tauriCommands.ts matching Rust backend types exactly - 8 IT domain system prompts (Linux, Windows, Network, K8s, DB, Virt, HW, Obs) DevOps: - .woodpecker/test.yml: rustfmt, clippy, cargo test, tsc, vitest on every push - .woodpecker/release.yml: linux/amd64 + linux/arm64 builds, Gogs release upload Verified: - cargo check: zero errors - tsc --noEmit: zero errors - vitest run: 13/13 unit tests passing Co-Authored-By: Claude Sonnet 4.6 (1M context) <noreply@anthropic.com>
122 lines
3.8 KiB
JavaScript
122 lines
3.8 KiB
JavaScript
'use strict'
|
|
|
|
const hexify = char => {
|
|
const h = char.charCodeAt(0).toString(16).toUpperCase()
|
|
return '0x' + (h.length % 2 ? '0' : '') + h
|
|
}
|
|
|
|
const parseError = (e, txt, context) => {
|
|
if (!txt) {
|
|
return {
|
|
message: e.message + ' while parsing empty string',
|
|
position: 0,
|
|
}
|
|
}
|
|
const badToken = e.message.match(/^Unexpected token (.) .*position\s+(\d+)/i)
|
|
const errIdx = badToken ? +badToken[2]
|
|
: e.message.match(/^Unexpected end of JSON.*/i) ? txt.length - 1
|
|
: null
|
|
|
|
const msg = badToken ? e.message.replace(/^Unexpected token ./, `Unexpected token ${
|
|
JSON.stringify(badToken[1])
|
|
} (${hexify(badToken[1])})`)
|
|
: e.message
|
|
|
|
if (errIdx !== null && errIdx !== undefined) {
|
|
const start = errIdx <= context ? 0
|
|
: errIdx - context
|
|
|
|
const end = errIdx + context >= txt.length ? txt.length
|
|
: errIdx + context
|
|
|
|
const slice = (start === 0 ? '' : '...') +
|
|
txt.slice(start, end) +
|
|
(end === txt.length ? '' : '...')
|
|
|
|
const near = txt === slice ? '' : 'near '
|
|
|
|
return {
|
|
message: msg + ` while parsing ${near}${JSON.stringify(slice)}`,
|
|
position: errIdx,
|
|
}
|
|
} else {
|
|
return {
|
|
message: msg + ` while parsing '${txt.slice(0, context * 2)}'`,
|
|
position: 0,
|
|
}
|
|
}
|
|
}
|
|
|
|
class JSONParseError extends SyntaxError {
|
|
constructor (er, txt, context, caller) {
|
|
context = context || 20
|
|
const metadata = parseError(er, txt, context)
|
|
super(metadata.message)
|
|
Object.assign(this, metadata)
|
|
this.code = 'EJSONPARSE'
|
|
this.systemError = er
|
|
Error.captureStackTrace(this, caller || this.constructor)
|
|
}
|
|
get name () { return this.constructor.name }
|
|
set name (n) {}
|
|
get [Symbol.toStringTag] () { return this.constructor.name }
|
|
}
|
|
|
|
const kIndent = Symbol.for('indent')
|
|
const kNewline = Symbol.for('newline')
|
|
// only respect indentation if we got a line break, otherwise squash it
|
|
// things other than objects and arrays aren't indented, so ignore those
|
|
// Important: in both of these regexps, the $1 capture group is the newline
|
|
// or undefined, and the $2 capture group is the indent, or undefined.
|
|
const formatRE = /^\s*[{\[]((?:\r?\n)+)([\s\t]*)/
|
|
const emptyRE = /^(?:\{\}|\[\])((?:\r?\n)+)?$/
|
|
|
|
const parseJson = (txt, reviver, context) => {
|
|
const parseText = stripBOM(txt)
|
|
context = context || 20
|
|
try {
|
|
// get the indentation so that we can save it back nicely
|
|
// if the file starts with {" then we have an indent of '', ie, none
|
|
// otherwise, pick the indentation of the next line after the first \n
|
|
// If the pattern doesn't match, then it means no indentation.
|
|
// JSON.stringify ignores symbols, so this is reasonably safe.
|
|
// if the string is '{}' or '[]', then use the default 2-space indent.
|
|
const [, newline = '\n', indent = ' '] = parseText.match(emptyRE) ||
|
|
parseText.match(formatRE) ||
|
|
[, '', '']
|
|
|
|
const result = JSON.parse(parseText, reviver)
|
|
if (result && typeof result === 'object') {
|
|
result[kNewline] = newline
|
|
result[kIndent] = indent
|
|
}
|
|
return result
|
|
} catch (e) {
|
|
if (typeof txt !== 'string' && !Buffer.isBuffer(txt)) {
|
|
const isEmptyArray = Array.isArray(txt) && txt.length === 0
|
|
throw Object.assign(new TypeError(
|
|
`Cannot parse ${isEmptyArray ? 'an empty array' : String(txt)}`
|
|
), {
|
|
code: 'EJSONPARSE',
|
|
systemError: e,
|
|
})
|
|
}
|
|
|
|
throw new JSONParseError(e, parseText, context, parseJson)
|
|
}
|
|
}
|
|
|
|
// Remove byte order marker. This catches EF BB BF (the UTF-8 BOM)
|
|
// because the buffer-to-string conversion in `fs.readFileSync()`
|
|
// translates it to FEFF, the UTF-16 BOM.
|
|
const stripBOM = txt => String(txt).replace(/^\uFEFF/, '')
|
|
|
|
module.exports = parseJson
|
|
parseJson.JSONParseError = JSONParseError
|
|
|
|
parseJson.noExceptions = (txt, reviver) => {
|
|
try {
|
|
return JSON.parse(stripBOM(txt), reviver)
|
|
} catch (e) {}
|
|
}
|