tftsr-devops_investigation/node_modules/execa/lib/io/iterate.js
Shaun Arman 8839075805 feat: initial implementation of TFTSR IT Triage & RCA application
Implements Phases 1-8 of the TFTSR implementation plan.

Rust backend (Tauri 2.x, src-tauri/):
- Multi-provider AI: OpenAI-compatible, Anthropic, Gemini, Mistral, Ollama
- PII detection engine: 11 regex patterns with overlap resolution
- SQLCipher AES-256 encrypted database with 10 versioned migrations
- 28 Tauri IPC commands for triage, analysis, document, and system ops
- Ollama: hardware probe, model recommendations, pull/delete with events
- RCA and blameless post-mortem Markdown document generators
- PDF export via printpdf
- Audit log: SHA-256 hash of every external data send
- Integration stubs for Confluence, ServiceNow, Azure DevOps (v0.2)

Frontend (React 18 + TypeScript + Vite, src/):
- 9 pages: full triage workflow NewIssue→LogUpload→Triage→Resolution→RCA→Postmortem→History+Settings
- 7 components: ChatWindow, TriageProgress, PiiDiffViewer, DocEditor, HardwareReport, ModelSelector, UI primitives
- 3 Zustand stores: session, settings (persisted), history
- Type-safe tauriCommands.ts matching Rust backend types exactly
- 8 IT domain system prompts (Linux, Windows, Network, K8s, DB, Virt, HW, Obs)

DevOps:
- .woodpecker/test.yml: rustfmt, clippy, cargo test, tsc, vitest on every push
- .woodpecker/release.yml: linux/amd64 + linux/arm64 builds, Gogs release upload

Verified:
- cargo check: zero errors
- tsc --noEmit: zero errors
- vitest run: 13/13 unit tests passing

Co-Authored-By: Claude Sonnet 4.6 (1M context) <noreply@anthropic.com>
2026-03-14 22:36:25 -05:00

111 lines
3.6 KiB
JavaScript

import {on} from 'node:events';
import {getDefaultHighWaterMark} from 'node:stream';
import {getEncodingTransformGenerator} from '../transform/encoding-transform.js';
import {getSplitLinesGenerator} from '../transform/split.js';
import {transformChunkSync, finalChunksSync} from '../transform/run-sync.js';
// Iterate over lines of `subprocess.stdout`, used by `subprocess.readable|duplex|iterable()`
export const iterateOnSubprocessStream = ({subprocessStdout, subprocess, binary, shouldEncode, encoding, preserveNewlines}) => {
const controller = new AbortController();
stopReadingOnExit(subprocess, controller);
return iterateOnStream({
stream: subprocessStdout,
controller,
binary,
shouldEncode: !subprocessStdout.readableObjectMode && shouldEncode,
encoding,
shouldSplit: !subprocessStdout.readableObjectMode,
preserveNewlines,
});
};
const stopReadingOnExit = async (subprocess, controller) => {
try {
await subprocess;
} catch {} finally {
controller.abort();
}
};
// Iterate over lines of `subprocess.stdout`, used by `result.stdout` and the `verbose: 'full'` option.
// Applies the `lines` and `encoding` options.
export const iterateForResult = ({stream, onStreamEnd, lines, encoding, stripFinalNewline, allMixed}) => {
const controller = new AbortController();
stopReadingOnStreamEnd(onStreamEnd, controller, stream);
const objectMode = stream.readableObjectMode && !allMixed;
return iterateOnStream({
stream,
controller,
binary: encoding === 'buffer',
shouldEncode: !objectMode,
encoding,
shouldSplit: !objectMode && lines,
preserveNewlines: !stripFinalNewline,
});
};
const stopReadingOnStreamEnd = async (onStreamEnd, controller, stream) => {
try {
await onStreamEnd;
} catch {
stream.destroy();
} finally {
controller.abort();
}
};
const iterateOnStream = ({stream, controller, binary, shouldEncode, encoding, shouldSplit, preserveNewlines}) => {
const onStdoutChunk = on(stream, 'data', {
signal: controller.signal,
highWaterMark: HIGH_WATER_MARK,
// Backward compatibility with older name for this option
// See https://github.com/nodejs/node/pull/52080#discussion_r1525227861
// @todo Remove after removing support for Node 21
highWatermark: HIGH_WATER_MARK,
});
return iterateOnData({
onStdoutChunk,
controller,
binary,
shouldEncode,
encoding,
shouldSplit,
preserveNewlines,
});
};
export const DEFAULT_OBJECT_HIGH_WATER_MARK = getDefaultHighWaterMark(true);
// The `highWaterMark` of `events.on()` is measured in number of events, not in bytes.
// Not knowing the average amount of bytes per `data` event, we use the same heuristic as streams in objectMode, since they have the same issue.
// Therefore, we use the value of `getDefaultHighWaterMark(true)`.
// Note: this option does not exist on Node 18, but this is ok since the logic works without it. It just consumes more memory.
const HIGH_WATER_MARK = DEFAULT_OBJECT_HIGH_WATER_MARK;
const iterateOnData = async function * ({onStdoutChunk, controller, binary, shouldEncode, encoding, shouldSplit, preserveNewlines}) {
const generators = getGenerators({
binary,
shouldEncode,
encoding,
shouldSplit,
preserveNewlines,
});
try {
for await (const [chunk] of onStdoutChunk) {
yield * transformChunkSync(chunk, generators, 0);
}
} catch (error) {
if (!controller.signal.aborted) {
throw error;
}
} finally {
yield * finalChunksSync(generators);
}
};
const getGenerators = ({binary, shouldEncode, encoding, shouldSplit, preserveNewlines}) => [
getEncodingTransformGenerator(binary, encoding, !shouldEncode),
getSplitLinesGenerator(binary, preserveNewlines, !shouldSplit, {}),
].filter(Boolean);