feat: initial implementation of TFTSR IT Triage & RCA application
Implements Phases 1-8 of the TFTSR implementation plan.
Rust backend (Tauri 2.x, src-tauri/):
- Multi-provider AI: OpenAI-compatible, Anthropic, Gemini, Mistral, Ollama
- PII detection engine: 11 regex patterns with overlap resolution
- SQLCipher AES-256 encrypted database with 10 versioned migrations
- 28 Tauri IPC commands for triage, analysis, document, and system ops
- Ollama: hardware probe, model recommendations, pull/delete with events
- RCA and blameless post-mortem Markdown document generators
- PDF export via printpdf
- Audit log: SHA-256 hash of every external data send
- Integration stubs for Confluence, ServiceNow, Azure DevOps (v0.2)
Frontend (React 18 + TypeScript + Vite, src/):
- 9 pages: full triage workflow NewIssue→LogUpload→Triage→Resolution→RCA→Postmortem→History+Settings
- 7 components: ChatWindow, TriageProgress, PiiDiffViewer, DocEditor, HardwareReport, ModelSelector, UI primitives
- 3 Zustand stores: session, settings (persisted), history
- Type-safe tauriCommands.ts matching Rust backend types exactly
- 8 IT domain system prompts (Linux, Windows, Network, K8s, DB, Virt, HW, Obs)
DevOps:
- .woodpecker/test.yml: rustfmt, clippy, cargo test, tsc, vitest on every push
- .woodpecker/release.yml: linux/amd64 + linux/arm64 builds, Gogs release upload
Verified:
- cargo check: zero errors
- tsc --noEmit: zero errors
- vitest run: 13/13 unit tests passing
Co-Authored-By: Claude Sonnet 4.6 (1M context) <noreply@anthropic.com>
2026-03-15 03:36:25 +00:00
|
|
|
use crate::ollama::hardware::HardwareInfo;
|
|
|
|
|
use crate::ollama::ModelRecommendation;
|
|
|
|
|
|
|
|
|
|
pub fn recommend_models(hw: &HardwareInfo) -> Vec<ModelRecommendation> {
|
|
|
|
|
let ram = hw.total_ram_gb;
|
|
|
|
|
let has_gpu = hw.gpu_vendor.is_some();
|
|
|
|
|
|
|
|
|
|
let mut models = vec![
|
|
|
|
|
ModelRecommendation {
|
|
|
|
|
name: "llama3.2:1b".to_string(),
|
|
|
|
|
size: "1.3 GB".to_string(),
|
|
|
|
|
min_ram_gb: 4.0,
|
|
|
|
|
description: "Smallest Llama 3.2 model. Fast, runs on minimal hardware.".to_string(),
|
|
|
|
|
recommended: ram < 8.0,
|
|
|
|
|
},
|
|
|
|
|
ModelRecommendation {
|
|
|
|
|
name: "llama3.2:3b".to_string(),
|
|
|
|
|
size: "2.0 GB".to_string(),
|
|
|
|
|
min_ram_gb: 6.0,
|
|
|
|
|
description: "Balanced Llama 3.2 model. Good for most IT triage tasks.".to_string(),
|
|
|
|
|
recommended: ram >= 8.0 && ram < 16.0,
|
|
|
|
|
},
|
|
|
|
|
ModelRecommendation {
|
|
|
|
|
name: "phi3.5:3.8b".to_string(),
|
|
|
|
|
size: "2.2 GB".to_string(),
|
|
|
|
|
min_ram_gb: 6.0,
|
|
|
|
|
description: "Microsoft Phi-3.5. Excellent reasoning for its size.".to_string(),
|
|
|
|
|
recommended: false,
|
|
|
|
|
},
|
|
|
|
|
ModelRecommendation {
|
|
|
|
|
name: "llama3.1:8b".to_string(),
|
|
|
|
|
size: "4.7 GB".to_string(),
|
|
|
|
|
min_ram_gb: 10.0,
|
|
|
|
|
description: "Llama 3.1 8B. Strong performance for IT analysis.".to_string(),
|
|
|
|
|
recommended: ram >= 16.0 && ram < 32.0,
|
|
|
|
|
},
|
|
|
|
|
ModelRecommendation {
|
|
|
|
|
name: "qwen2.5:14b".to_string(),
|
|
|
|
|
size: "9.0 GB".to_string(),
|
|
|
|
|
min_ram_gb: 16.0,
|
|
|
|
|
description: "Qwen 2.5 14B. Excellent for complex log analysis.".to_string(),
|
|
|
|
|
recommended: ram >= 24.0 && ram < 40.0,
|
|
|
|
|
},
|
|
|
|
|
ModelRecommendation {
|
|
|
|
|
name: "llama3.1:70b".to_string(),
|
|
|
|
|
size: "40 GB".to_string(),
|
|
|
|
|
min_ram_gb: 48.0,
|
|
|
|
|
description: "Full Llama 3.1 70B. Best quality, requires significant RAM.".to_string(),
|
2026-03-15 17:43:46 +00:00
|
|
|
recommended: ram >= 48.0 || (has_gpu && hw.gpu_vram_gb.unwrap_or(0.0) >= 40.0),
|
feat: initial implementation of TFTSR IT Triage & RCA application
Implements Phases 1-8 of the TFTSR implementation plan.
Rust backend (Tauri 2.x, src-tauri/):
- Multi-provider AI: OpenAI-compatible, Anthropic, Gemini, Mistral, Ollama
- PII detection engine: 11 regex patterns with overlap resolution
- SQLCipher AES-256 encrypted database with 10 versioned migrations
- 28 Tauri IPC commands for triage, analysis, document, and system ops
- Ollama: hardware probe, model recommendations, pull/delete with events
- RCA and blameless post-mortem Markdown document generators
- PDF export via printpdf
- Audit log: SHA-256 hash of every external data send
- Integration stubs for Confluence, ServiceNow, Azure DevOps (v0.2)
Frontend (React 18 + TypeScript + Vite, src/):
- 9 pages: full triage workflow NewIssue→LogUpload→Triage→Resolution→RCA→Postmortem→History+Settings
- 7 components: ChatWindow, TriageProgress, PiiDiffViewer, DocEditor, HardwareReport, ModelSelector, UI primitives
- 3 Zustand stores: session, settings (persisted), history
- Type-safe tauriCommands.ts matching Rust backend types exactly
- 8 IT domain system prompts (Linux, Windows, Network, K8s, DB, Virt, HW, Obs)
DevOps:
- .woodpecker/test.yml: rustfmt, clippy, cargo test, tsc, vitest on every push
- .woodpecker/release.yml: linux/amd64 + linux/arm64 builds, Gogs release upload
Verified:
- cargo check: zero errors
- tsc --noEmit: zero errors
- vitest run: 13/13 unit tests passing
Co-Authored-By: Claude Sonnet 4.6 (1M context) <noreply@anthropic.com>
2026-03-15 03:36:25 +00:00
|
|
|
},
|
|
|
|
|
];
|
|
|
|
|
|
|
|
|
|
// Filter out models that don't fit in available RAM (with slight overcommit allowance)
|
|
|
|
|
models.retain(|m| m.min_ram_gb <= ram + 2.0);
|
|
|
|
|
models
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
|
mod tests {
|
|
|
|
|
use super::*;
|
|
|
|
|
|
|
|
|
|
fn hw(ram: f64, gpu: Option<(&str, f64)>) -> HardwareInfo {
|
|
|
|
|
HardwareInfo {
|
|
|
|
|
total_ram_gb: ram,
|
|
|
|
|
cpu_arch: "x86_64".to_string(),
|
|
|
|
|
gpu_vendor: gpu.map(|(name, _)| name.to_string()),
|
|
|
|
|
gpu_vram_gb: gpu.map(|(_, vram)| vram),
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_low_ram_only_small_models() {
|
|
|
|
|
let models = recommend_models(&hw(4.0, None));
|
|
|
|
|
assert!(models.iter().all(|m| m.min_ram_gb <= 6.0));
|
|
|
|
|
assert!(models.iter().any(|m| m.name == "llama3.2:1b"));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_low_ram_recommends_1b() {
|
|
|
|
|
let models = recommend_models(&hw(6.0, None));
|
|
|
|
|
let rec = models.iter().find(|m| m.recommended);
|
|
|
|
|
assert!(rec.is_some());
|
|
|
|
|
assert_eq!(rec.unwrap().name, "llama3.2:1b");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_medium_ram_recommends_3b() {
|
|
|
|
|
let models = recommend_models(&hw(12.0, None));
|
|
|
|
|
let rec: Vec<_> = models.iter().filter(|m| m.recommended).collect();
|
|
|
|
|
assert!(rec.iter().any(|m| m.name == "llama3.2:3b"));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_high_ram_recommends_8b() {
|
|
|
|
|
let models = recommend_models(&hw(20.0, None));
|
|
|
|
|
let rec: Vec<_> = models.iter().filter(|m| m.recommended).collect();
|
|
|
|
|
assert!(rec.iter().any(|m| m.name == "llama3.1:8b"));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_very_high_ram_includes_large_models() {
|
|
|
|
|
let models = recommend_models(&hw(50.0, None));
|
|
|
|
|
assert!(models.iter().any(|m| m.name == "llama3.1:70b"));
|
|
|
|
|
assert!(models.iter().any(|m| m.name == "qwen2.5:14b"));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_gpu_with_high_vram_recommends_70b() {
|
|
|
|
|
let models = recommend_models(&hw(32.0, Some(("NVIDIA RTX 4090", 48.0))));
|
|
|
|
|
let rec: Vec<_> = models.iter().filter(|m| m.recommended).collect();
|
|
|
|
|
assert!(rec.iter().any(|m| m.name == "llama3.1:70b"));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_no_models_below_minimum() {
|
|
|
|
|
let models = recommend_models(&hw(2.0, None));
|
|
|
|
|
// Only 1b model should be available (min_ram 4.0, with +2.0 tolerance allows it)
|
|
|
|
|
assert!(models.len() <= 2);
|
|
|
|
|
}
|
|
|
|
|
}
|