Implements Phases 1-8 of the TFTSR implementation plan. Rust backend (Tauri 2.x, src-tauri/): - Multi-provider AI: OpenAI-compatible, Anthropic, Gemini, Mistral, Ollama - PII detection engine: 11 regex patterns with overlap resolution - SQLCipher AES-256 encrypted database with 10 versioned migrations - 28 Tauri IPC commands for triage, analysis, document, and system ops - Ollama: hardware probe, model recommendations, pull/delete with events - RCA and blameless post-mortem Markdown document generators - PDF export via printpdf - Audit log: SHA-256 hash of every external data send - Integration stubs for Confluence, ServiceNow, Azure DevOps (v0.2) Frontend (React 18 + TypeScript + Vite, src/): - 9 pages: full triage workflow NewIssue→LogUpload→Triage→Resolution→RCA→Postmortem→History+Settings - 7 components: ChatWindow, TriageProgress, PiiDiffViewer, DocEditor, HardwareReport, ModelSelector, UI primitives - 3 Zustand stores: session, settings (persisted), history - Type-safe tauriCommands.ts matching Rust backend types exactly - 8 IT domain system prompts (Linux, Windows, Network, K8s, DB, Virt, HW, Obs) DevOps: - .woodpecker/test.yml: rustfmt, clippy, cargo test, tsc, vitest on every push - .woodpecker/release.yml: linux/amd64 + linux/arm64 builds, Gogs release upload Verified: - cargo check: zero errors - tsc --noEmit: zero errors - vitest run: 13/13 unit tests passing Co-Authored-By: Claude Sonnet 4.6 (1M context) <noreply@anthropic.com>
72 lines
1.5 KiB
JavaScript
72 lines
1.5 KiB
JavaScript
'use strict';
|
|
const Queue = require('yocto-queue');
|
|
|
|
const pLimit = concurrency => {
|
|
if (!((Number.isInteger(concurrency) || concurrency === Infinity) && concurrency > 0)) {
|
|
throw new TypeError('Expected `concurrency` to be a number from 1 and up');
|
|
}
|
|
|
|
const queue = new Queue();
|
|
let activeCount = 0;
|
|
|
|
const next = () => {
|
|
activeCount--;
|
|
|
|
if (queue.size > 0) {
|
|
queue.dequeue()();
|
|
}
|
|
};
|
|
|
|
const run = async (fn, resolve, ...args) => {
|
|
activeCount++;
|
|
|
|
const result = (async () => fn(...args))();
|
|
|
|
resolve(result);
|
|
|
|
try {
|
|
await result;
|
|
} catch {}
|
|
|
|
next();
|
|
};
|
|
|
|
const enqueue = (fn, resolve, ...args) => {
|
|
queue.enqueue(run.bind(null, fn, resolve, ...args));
|
|
|
|
(async () => {
|
|
// This function needs to wait until the next microtask before comparing
|
|
// `activeCount` to `concurrency`, because `activeCount` is updated asynchronously
|
|
// when the run function is dequeued and called. The comparison in the if-statement
|
|
// needs to happen asynchronously as well to get an up-to-date value for `activeCount`.
|
|
await Promise.resolve();
|
|
|
|
if (activeCount < concurrency && queue.size > 0) {
|
|
queue.dequeue()();
|
|
}
|
|
})();
|
|
};
|
|
|
|
const generator = (fn, ...args) => new Promise(resolve => {
|
|
enqueue(fn, resolve, ...args);
|
|
});
|
|
|
|
Object.defineProperties(generator, {
|
|
activeCount: {
|
|
get: () => activeCount
|
|
},
|
|
pendingCount: {
|
|
get: () => queue.size
|
|
},
|
|
clearQueue: {
|
|
value: () => {
|
|
queue.clear();
|
|
}
|
|
}
|
|
});
|
|
|
|
return generator;
|
|
};
|
|
|
|
module.exports = pLimit;
|