Implements Phases 1-8 of the TFTSR implementation plan. Rust backend (Tauri 2.x, src-tauri/): - Multi-provider AI: OpenAI-compatible, Anthropic, Gemini, Mistral, Ollama - PII detection engine: 11 regex patterns with overlap resolution - SQLCipher AES-256 encrypted database with 10 versioned migrations - 28 Tauri IPC commands for triage, analysis, document, and system ops - Ollama: hardware probe, model recommendations, pull/delete with events - RCA and blameless post-mortem Markdown document generators - PDF export via printpdf - Audit log: SHA-256 hash of every external data send - Integration stubs for Confluence, ServiceNow, Azure DevOps (v0.2) Frontend (React 18 + TypeScript + Vite, src/): - 9 pages: full triage workflow NewIssue→LogUpload→Triage→Resolution→RCA→Postmortem→History+Settings - 7 components: ChatWindow, TriageProgress, PiiDiffViewer, DocEditor, HardwareReport, ModelSelector, UI primitives - 3 Zustand stores: session, settings (persisted), history - Type-safe tauriCommands.ts matching Rust backend types exactly - 8 IT domain system prompts (Linux, Windows, Network, K8s, DB, Virt, HW, Obs) DevOps: - .woodpecker/test.yml: rustfmt, clippy, cargo test, tsc, vitest on every push - .woodpecker/release.yml: linux/amd64 + linux/arm64 builds, Gogs release upload Verified: - cargo check: zero errors - tsc --noEmit: zero errors - vitest run: 13/13 unit tests passing Co-Authored-By: Claude Sonnet 4.6 (1M context) <noreply@anthropic.com>
47 lines
1.5 KiB
TypeScript
47 lines
1.5 KiB
TypeScript
import { describe, it, expect, beforeEach } from "vitest";
|
|
import { useSettingsStore } from "@/stores/settingsStore";
|
|
import type { ProviderConfig } from "@/lib/tauriCommands";
|
|
|
|
const mockProvider: ProviderConfig = {
|
|
name: "openai",
|
|
api_url: "https://api.openai.com/v1",
|
|
api_key: "sk-test-key",
|
|
model: "gpt-4o",
|
|
};
|
|
|
|
describe("Settings Store", () => {
|
|
beforeEach(() => {
|
|
useSettingsStore.setState({
|
|
theme: "dark",
|
|
ai_providers: [],
|
|
active_provider: undefined,
|
|
default_provider: "ollama",
|
|
default_model: "llama3.2:3b",
|
|
ollama_url: "http://localhost:11434",
|
|
});
|
|
});
|
|
|
|
it("adds a provider", () => {
|
|
useSettingsStore.getState().addProvider(mockProvider);
|
|
expect(useSettingsStore.getState().ai_providers).toHaveLength(1);
|
|
expect(useSettingsStore.getState().ai_providers[0].name).toBe("openai");
|
|
});
|
|
|
|
it("removes a provider", () => {
|
|
useSettingsStore.getState().addProvider(mockProvider);
|
|
useSettingsStore.getState().removeProvider(0);
|
|
expect(useSettingsStore.getState().ai_providers).toHaveLength(0);
|
|
});
|
|
|
|
it("updates a provider", () => {
|
|
useSettingsStore.getState().addProvider(mockProvider);
|
|
useSettingsStore.getState().updateProvider(0, { ...mockProvider, model: "gpt-4o-mini" });
|
|
expect(useSettingsStore.getState().ai_providers[0].model).toBe("gpt-4o-mini");
|
|
});
|
|
|
|
it("toggles theme", () => {
|
|
useSettingsStore.getState().setTheme("light");
|
|
expect(useSettingsStore.getState().theme).toBe("light");
|
|
});
|
|
});
|