Compare commits

..

No commits in common. "master" and "v0.2.56" have entirely different histories.

39 changed files with 679 additions and 2274 deletions

View File

@ -2,7 +2,7 @@
# All system dependencies are installed once here; CI jobs skip apt-get entirely. # All system dependencies are installed once here; CI jobs skip apt-get entirely.
# Rebuild when: Rust toolchain version changes, webkit2gtk/gtk major version changes, # Rebuild when: Rust toolchain version changes, webkit2gtk/gtk major version changes,
# Node.js major version changes, OpenSSL major version changes (used via OPENSSL_STATIC=1), # Node.js major version changes, OpenSSL major version changes (used via OPENSSL_STATIC=1),
# or Tauri CLI version changes that affect bundler system deps. # Tauri CLI version changes that affect bundler system deps, or linuxdeploy is needed.
# Tag format: rust<VER>-node<VER> # Tag format: rust<VER>-node<VER>
FROM rust:1.88-slim FROM rust:1.88-slim
@ -20,9 +20,18 @@ RUN apt-get update -qq \
perl \ perl \
jq \ jq \
git \ git \
fuse \
&& curl -fsSL https://deb.nodesource.com/setup_22.x | bash - \ && curl -fsSL https://deb.nodesource.com/setup_22.x | bash - \
&& apt-get install -y --no-install-recommends nodejs \ && apt-get install -y --no-install-recommends nodejs \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
# Install linuxdeploy for AppImage bundling (required for Tauri 2.x)
# Download linuxdeploy AppImage and extract to get the binary
RUN curl -Ls https://github.com/tauri-apps/linuxdeploy/releases/download/continuous/linuxdeploy-x86_64.AppImage -o /tmp/linuxdeploy.AppImage \
&& chmod +x /tmp/linuxdeploy.AppImage \
&& /tmp/linuxdeploy.AppImage --appimage-extract \
&& mv squashfs-root/usr/bin/linuxdeploy /usr/local/bin/ \
&& rm -rf /tmp/linuxdeploy.AppImage squashfs-root
RUN rustup target add x86_64-unknown-linux-gnu \ RUN rustup target add x86_64-unknown-linux-gnu \
&& rustup component add rustfmt clippy && rustup component add rustfmt clippy

View File

@ -322,7 +322,7 @@ jobs:
fi fi
echo "Release ID: $RELEASE_ID" echo "Release ID: $RELEASE_ID"
ARTIFACTS=$(find src-tauri/target/x86_64-unknown-linux-gnu/release/bundle -type f \ ARTIFACTS=$(find src-tauri/target/x86_64-unknown-linux-gnu/release/bundle -type f \
\( -name "*.deb" -o -name "*.rpm" \)) \( -name "*.deb" -o -name "*.rpm" -o -name "*.AppImage" \))
if [ -z "$ARTIFACTS" ]; then if [ -z "$ARTIFACTS" ]; then
echo "ERROR: No Linux amd64 artifacts were found to upload." echo "ERROR: No Linux amd64 artifacts were found to upload."
exit 1 exit 1

View File

@ -43,13 +43,13 @@ jobs:
git diff origin/${{ github.base_ref }}..HEAD > /tmp/pr_diff.txt git diff origin/${{ github.base_ref }}..HEAD > /tmp/pr_diff.txt
echo "diff_size=$(wc -l < /tmp/pr_diff.txt | tr -d ' ')" >> $GITHUB_OUTPUT echo "diff_size=$(wc -l < /tmp/pr_diff.txt | tr -d ' ')" >> $GITHUB_OUTPUT
- name: Analyze with LLM - name: Analyze with Ollama
id: analyze id: analyze
if: steps.diff.outputs.diff_size != '0' if: steps.diff.outputs.diff_size != '0'
shell: bash shell: bash
env: env:
LITELLM_URL: http://172.0.0.29:11434/v1 OLLAMA_URL: https://ollama-ui.tftsr.com/ollama/v1
LITELLM_API_KEY: ${{ secrets.OLLAMA_API_KEY }} OLLAMA_API_KEY: ${{ secrets.OLLAMA_API_KEY }}
PR_TITLE: ${{ github.event.pull_request.title }} PR_TITLE: ${{ github.event.pull_request.title }}
PR_NUMBER: ${{ github.event.pull_request.number }} PR_NUMBER: ${{ github.event.pull_request.number }}
run: | run: |
@ -62,32 +62,32 @@ jobs:
| grep -v -E '^[+-].*[A-Za-z0-9+/]{40,}={0,2}([^A-Za-z0-9+/=]|$)') | grep -v -E '^[+-].*[A-Za-z0-9+/]{40,}={0,2}([^A-Za-z0-9+/=]|$)')
PROMPT="Analyze the following code changes for correctness, security issues, and best practices. PR Title: ${PR_TITLE}\n\nDiff:\n${DIFF_CONTENT}\n\nProvide a review with: 1) Summary, 2) Bugs/errors, 3) Security issues, 4) Best practices. Give specific comments with suggested fixes." PROMPT="Analyze the following code changes for correctness, security issues, and best practices. PR Title: ${PR_TITLE}\n\nDiff:\n${DIFF_CONTENT}\n\nProvide a review with: 1) Summary, 2) Bugs/errors, 3) Security issues, 4) Best practices. Give specific comments with suggested fixes."
BODY=$(jq -cn \ BODY=$(jq -cn \
--arg model "qwen2.5-72b" \ --arg model "qwen3-coder-next:latest" \
--arg content "$PROMPT" \ --arg content "$PROMPT" \
'{model: $model, messages: [{role: "user", content: $content}], stream: false}') '{model: $model, messages: [{role: "user", content: $content}], stream: false}')
echo "[$(date -u +%Y-%m-%dT%H:%M:%SZ)] PR #${PR_NUMBER} - Calling liteLLM API (${#BODY} bytes)..." echo "[$(date -u +%Y-%m-%dT%H:%M:%SZ)] PR #${PR_NUMBER} - Calling Ollama API (${#BODY} bytes)..."
HTTP_CODE=$(curl -s --max-time 300 --connect-timeout 30 \ HTTP_CODE=$(curl -s --max-time 120 --connect-timeout 30 \
--retry 3 --retry-delay 10 --retry-connrefused --retry-max-time 300 \ --retry 3 --retry-delay 5 --retry-connrefused --retry-max-time 120 \
-o /tmp/llm_response.json -w "%{http_code}" \ -o /tmp/ollama_response.json -w "%{http_code}" \
-X POST "$LITELLM_URL/chat/completions" \ -X POST "$OLLAMA_URL/chat/completions" \
-H "Authorization: Bearer $LITELLM_API_KEY" \ -H "Authorization: Bearer $OLLAMA_API_KEY" \
-H "Content-Type: application/json" \ -H "Content-Type: application/json" \
-d "$BODY") -d "$BODY")
echo "HTTP status: $HTTP_CODE" echo "HTTP status: $HTTP_CODE"
echo "Response file size: $(wc -c < /tmp/llm_response.json) bytes" echo "Response file size: $(wc -c < /tmp/ollama_response.json) bytes"
if [ "$HTTP_CODE" != "200" ]; then if [ "$HTTP_CODE" != "200" ]; then
echo "ERROR: liteLLM returned HTTP $HTTP_CODE" echo "ERROR: Ollama returned HTTP $HTTP_CODE"
cat /tmp/llm_response.json cat /tmp/ollama_response.json
exit 1 exit 1
fi fi
if ! jq empty /tmp/llm_response.json 2>/dev/null; then if ! jq empty /tmp/ollama_response.json 2>/dev/null; then
echo "ERROR: Invalid JSON response from liteLLM" echo "ERROR: Invalid JSON response from Ollama"
cat /tmp/llm_response.json cat /tmp/ollama_response.json
exit 1 exit 1
fi fi
REVIEW=$(jq -r '.choices[0].message.content // empty' /tmp/llm_response.json) REVIEW=$(jq -r '.choices[0].message.content // empty' /tmp/ollama_response.json)
if [ -z "$REVIEW" ]; then if [ -z "$REVIEW" ]; then
echo "ERROR: No content in liteLLM response" echo "ERROR: No content in Ollama response"
exit 1 exit 1
fi fi
echo "Review length: ${#REVIEW} chars" echo "Review length: ${#REVIEW} chars"
@ -109,11 +109,11 @@ jobs:
if [ -f "/tmp/pr_review.txt" ] && [ -s "/tmp/pr_review.txt" ]; then if [ -f "/tmp/pr_review.txt" ] && [ -s "/tmp/pr_review.txt" ]; then
REVIEW_BODY=$(head -c 65536 /tmp/pr_review.txt) REVIEW_BODY=$(head -c 65536 /tmp/pr_review.txt)
BODY=$(jq -n \ BODY=$(jq -n \
--arg body "Automated PR Review (qwen2.5-72b via liteLLM):\n\n${REVIEW_BODY}\n\n---\n*automated code review*" \ --arg body "🤖 Automated PR Review:\n\n${REVIEW_BODY}\n\n---\n*this is an automated review from Ollama*" \
'{body: $body, event: "COMMENT"}') '{body: $body, event: "COMMENT"}')
else else
BODY=$(jq -n \ BODY=$(jq -n \
'{body: "Automated PR Review could not be completed - LLM analysis failed or produced no output.", event: "COMMENT"}') '{body: "⚠️ Automated PR Review could not be completed — Ollama analysis failed or produced no output.", event: "COMMENT"}')
fi fi
HTTP_CODE=$(curl -s --max-time 30 --connect-timeout 10 \ HTTP_CODE=$(curl -s --max-time 30 --connect-timeout 10 \
-o /tmp/review_post_response.json -w "%{http_code}" \ -o /tmp/review_post_response.json -w "%{http_code}" \
@ -131,4 +131,4 @@ jobs:
- name: Cleanup - name: Cleanup
if: always() if: always()
shell: bash shell: bash
run: rm -f /tmp/pr_diff.txt /tmp/llm_response.json /tmp/pr_review.txt /tmp/review_post_response.json run: rm -f /tmp/pr_diff.txt /tmp/ollama_response.json /tmp/pr_review.txt /tmp/review_post_response.json

View File

@ -1,9 +1,6 @@
name: Test name: Test
on: on:
push:
branches:
- master
pull_request: pull_request:
jobs: jobs:
@ -40,11 +37,6 @@ jobs:
key: ${{ runner.os }}-cargo-linux-amd64-${{ hashFiles('**/Cargo.lock') }} key: ${{ runner.os }}-cargo-linux-amd64-${{ hashFiles('**/Cargo.lock') }}
restore-keys: | restore-keys: |
${{ runner.os }}-cargo-linux-amd64- ${{ runner.os }}-cargo-linux-amd64-
- name: Install dependencies
run: npm install --legacy-peer-deps
- name: Update version from Git
run: node scripts/update-version.mjs
- run: cargo generate-lockfile --manifest-path src-tauri/Cargo.toml
- run: cargo fmt --manifest-path src-tauri/Cargo.toml --check - run: cargo fmt --manifest-path src-tauri/Cargo.toml --check
rust-clippy: rust-clippy:
@ -80,7 +72,7 @@ jobs:
key: ${{ runner.os }}-cargo-linux-amd64-${{ hashFiles('**/Cargo.lock') }} key: ${{ runner.os }}-cargo-linux-amd64-${{ hashFiles('**/Cargo.lock') }}
restore-keys: | restore-keys: |
${{ runner.os }}-cargo-linux-amd64- ${{ runner.os }}-cargo-linux-amd64-
- run: cargo clippy --manifest-path src-tauri/Cargo.toml -- -D warnings - run: cargo clippy --locked --manifest-path src-tauri/Cargo.toml -- -D warnings
rust-tests: rust-tests:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@ -115,7 +107,7 @@ jobs:
key: ${{ runner.os }}-cargo-linux-amd64-${{ hashFiles('**/Cargo.lock') }} key: ${{ runner.os }}-cargo-linux-amd64-${{ hashFiles('**/Cargo.lock') }}
restore-keys: | restore-keys: |
${{ runner.os }}-cargo-linux-amd64- ${{ runner.os }}-cargo-linux-amd64-
- run: cargo test --manifest-path src-tauri/Cargo.toml -- --test-threads=1 - run: cargo test --locked --manifest-path src-tauri/Cargo.toml -- --test-threads=1
frontend-typecheck: frontend-typecheck:
runs-on: ubuntu-latest runs-on: ubuntu-latest

View File

@ -6,72 +6,6 @@ CI, chore, and build changes are excluded.
## [Unreleased] ## [Unreleased]
### Bug Fixes
- Harden timeline event input validation and atomic writes
### Documentation
- Update wiki for timeline events and incident response methodology
### Features
- Add timeline_events table, model, and CRUD commands
- Populate RCA and postmortem docs with real timeline data
- Wire incident response methodology into AI and record triage events
## [0.2.65] — 2026-04-15
### Bug Fixes
- Add --locked to cargo commands and improve version update script
- Remove invalid --locked flag from cargo commands and fix format string
- **integrations**: Security and correctness improvements
- Correct WIQL syntax and escape_wiql implementation
### Features
- Implement dynamic versioning from Git tags
- **integrations**: Implement query expansion for semantic search
### Security
- Fix query expansion issues from PR review
- Address all issues from automated PR review
## [0.2.63] — 2026-04-13
### Bug Fixes
- Add Windows nsis target and update CHANGELOG to v0.2.61
## [0.2.61] — 2026-04-13
### Bug Fixes
- Remove AppImage from upload artifact patterns
## [0.2.59] — 2026-04-13
### Bug Fixes
- Remove AppImage bundling to fix linux-amd64 build
## [0.2.57] — 2026-04-13
### Bug Fixes
- Add fuse dependency for AppImage support
### Refactoring
- Remove custom linuxdeploy install per CI CI uses tauri-downloaded version
- Revert to original Dockerfile without manual linuxdeploy installation
## [0.2.56] — 2026-04-13
### Bug Fixes
- Add missing ai_providers columns and fix linux-amd64 build
- Address AI review findings
- Address critical AI review issues
## [0.2.55] — 2026-04-13
### Bug Fixes
- **ci**: Use Gitea file API to push CHANGELOG.md — eliminates non-fast-forward rejection
- **ci**: Harden CHANGELOG.md API push step per review
## [0.2.54] — 2026-04-13
### Bug Fixes ### Bug Fixes
- **ci**: Correct git-cliff archive path in tar extraction - **ci**: Correct git-cliff archive path in tar extraction

View File

@ -50,7 +50,7 @@ All command handlers receive `State<'_, AppState>` as a Tauri-injected parameter
| `commands/integrations.rs` | Confluence / ServiceNow / ADO — v0.2 stubs | | `commands/integrations.rs` | Confluence / ServiceNow / ADO — v0.2 stubs |
| `ai/provider.rs` | `Provider` trait + `create_provider()` factory | | `ai/provider.rs` | `Provider` trait + `create_provider()` factory |
| `pii/detector.rs` | Multi-pattern PII scanner with overlap resolution | | `pii/detector.rs` | Multi-pattern PII scanner with overlap resolution |
| `db/migrations.rs` | Versioned schema (17 migrations in `_migrations` table) | | `db/migrations.rs` | Versioned schema (12 migrations in `_migrations` table) |
| `db/models.rs` | All DB types — see `IssueDetail` note below | | `db/models.rs` | All DB types — see `IssueDetail` note below |
| `docs/rca.rs` + `docs/postmortem.rs` | Markdown template builders | | `docs/rca.rs` + `docs/postmortem.rs` | Markdown template builders |
| `audit/log.rs` | `write_audit_event()` — called before every external send | | `audit/log.rs` | `write_audit_event()` — called before every external send |
@ -176,55 +176,6 @@ pub struct IssueDetail {
Use `detail.issue.title`, **not** `detail.title`. Use `detail.issue.title`, **not** `detail.title`.
## Incident Response Methodology
The application integrates a comprehensive incident response framework via system prompt injection. The `INCIDENT_RESPONSE_FRAMEWORK` constant in `src/lib/domainPrompts.ts` is appended to all 17 domain-specific system prompts (Linux, Windows, Network, Kubernetes, Databases, Virtualization, Hardware, Observability, and others).
**5-Phase Framework:**
1. **Detection & Evidence Gathering** — Initial issue assessment, log collection, PII redaction
2. **Diagnosis & Hypothesis Testing** — AI-assisted analysis, pattern matching against known incidents
3. **Root Cause Analysis with 5-Whys** — Iterative questioning to identify underlying cause (steps 15)
4. **Resolution & Prevention** — Remediation planning and implementation
5. **Post-Incident Review** — Timeline-based blameless post-mortem and lessons learned
**System Prompt Injection:**
The `chat_message` command accepts an optional `system_prompt` parameter. If provided, it prepends domain expertise before the conversation history. If omitted, the framework selects the appropriate domain prompt based on the issue category. This allows:
- **Specialized expertise**: Different frameworks for Linux vs. Kubernetes vs. Network incidents
- **Flexible override**: Users can inject custom system prompts for cross-domain problems
- **Consistent methodology**: All 17 domain prompts follow the same 5-phase incident response structure
**Timeline Event Recording:**
Timeline events are recorded non-blockingly at key triage moments:
```
Issue Creation → triage_started
Log Upload → log_uploaded (metadata: file_name, file_size)
Why-Level Progression → why_level_advanced (metadata: from_level → to_level)
Root Cause Identified → root_cause_identified (metadata: root_cause, confidence)
RCA Generated → rca_generated (metadata: doc_id, section_count)
Postmortem Generated → postmortem_generated (metadata: doc_id, timeline_events_count)
Document Exported → document_exported (metadata: format, file_path)
```
**Document Generation:**
RCA and Postmortem generators now use real timeline event data instead of placeholders:
- **RCA**: Incorporates timeline to show detection-to-root-cause progression
- **Postmortem**: Uses full timeline to demonstrate the complete incident lifecycle and response effectiveness
Timeline events are stored in the `timeline_events` table (indexed by issue_id and created_at for fast retrieval) and dual-written to `audit_log` for security/compliance purposes.
## Application Startup Sequence ## Application Startup Sequence
``` ```

View File

@ -2,7 +2,7 @@
## Overview ## Overview
TFTSR uses **SQLite** via `rusqlite` with the `bundled-sqlcipher` feature for AES-256 encryption in production. 17 versioned migrations are tracked in the `_migrations` table. TFTSR uses **SQLite** via `rusqlite` with the `bundled-sqlcipher` feature for AES-256 encryption in production. 12 versioned migrations are tracked in the `_migrations` table.
**DB file location:** `{app_data_dir}/tftsr.db` **DB file location:** `{app_data_dir}/tftsr.db`
@ -38,7 +38,7 @@ pub fn init_db(data_dir: &Path) -> anyhow::Result<Connection> {
--- ---
## Schema (17 Migrations) ## Schema (11 Migrations)
### 001 — issues ### 001 — issues
@ -245,51 +245,6 @@ CREATE TABLE image_attachments (
- Basic auth (ServiceNow): Store encrypted password - Basic auth (ServiceNow): Store encrypted password
- One credential per service (enforced by UNIQUE constraint) - One credential per service (enforced by UNIQUE constraint)
### 017 — timeline_events (Incident Response Timeline)
```sql
CREATE TABLE timeline_events (
id TEXT PRIMARY KEY,
issue_id TEXT NOT NULL REFERENCES issues(id) ON DELETE CASCADE,
event_type TEXT NOT NULL,
description TEXT NOT NULL,
metadata TEXT, -- JSON object with event-specific data
created_at TEXT NOT NULL
);
CREATE INDEX idx_timeline_events_issue ON timeline_events(issue_id);
CREATE INDEX idx_timeline_events_time ON timeline_events(created_at);
```
**Event Types:**
- `triage_started` — Incident response begins, initial issue properties recorded
- `log_uploaded` — Log file uploaded and analyzed
- `why_level_advanced` — 5-Whys entry completed, progression to next level
- `root_cause_identified` — Root cause determined from analysis
- `rca_generated` — Root Cause Analysis document created
- `postmortem_generated` — Post-mortem document created
- `document_exported` — Document exported to file (MD or PDF)
**Metadata Structure (JSON):**
```json
{
"triage_started": {"severity": "high", "category": "network"},
"log_uploaded": {"file_name": "app.log", "file_size": 2048576},
"why_level_advanced": {"from_level": 2, "to_level": 3, "question": "Why did the service timeout?"},
"root_cause_identified": {"root_cause": "DNS resolution failure", "confidence": 0.95},
"rca_generated": {"doc_id": "doc_abc123", "section_count": 7},
"postmortem_generated": {"doc_id": "doc_def456", "timeline_events_count": 12},
"document_exported": {"format": "pdf", "file_path": "/home/user/docs/rca.pdf"}
}
```
**Design Notes:**
- Timeline events are **queryable** (indexed by issue_id and created_at) for document generation
- Dual-write: Events recorded to both `timeline_events` and `audit_log` — timeline for chronological reporting, audit_log for security/compliance
- `created_at`: TEXT UTC timestamp (`YYYY-MM-DD HH:MM:SS`)
- Non-blocking writes: Timeline events recorded asynchronously at key triage moments
- Cascade delete from issues ensures cleanup
--- ---
## Key Design Notes ## Key Design Notes
@ -334,13 +289,4 @@ pub struct AuditEntry {
pub user_id: String, pub user_id: String,
pub details: Option<String>, pub details: Option<String>,
} }
pub struct TimelineEvent {
pub id: String,
pub issue_id: String,
pub event_type: String,
pub description: String,
pub metadata: Option<String>, // JSON
pub created_at: String,
}
``` ```

View File

@ -62,27 +62,11 @@ updateFiveWhyCmd(entryId: string, answer: string) → void
``` ```
Sets or updates the answer for an existing 5-Whys entry. Sets or updates the answer for an existing 5-Whys entry.
### `get_timeline_events`
```typescript
getTimelineEventsCmd(issueId: string) → TimelineEvent[]
```
Retrieves all timeline events for an issue, ordered by created_at ascending.
```typescript
interface TimelineEvent {
id: string;
issue_id: string;
event_type: string; // One of: triage_started, log_uploaded, why_level_advanced, etc.
description: string;
metadata?: Record<string, any>; // Event-specific JSON data
created_at: string; // UTC timestamp
}
```
### `add_timeline_event` ### `add_timeline_event`
```typescript ```typescript
addTimelineEventCmd(issueId: string, eventType: string, description: string, metadata?: Record<string, any>) → TimelineEvent addTimelineEventCmd(issueId: string, eventType: string, description: string) → TimelineEvent
``` ```
Records a timestamped event in the issue timeline. Dual-writes to both `timeline_events` (for document generation) and `audit_log` (for security audit trail). Records a timestamped event in the issue timeline.
--- ---
@ -153,9 +137,9 @@ Sends selected (redacted) log files to the AI provider with an analysis prompt.
### `chat_message` ### `chat_message`
```typescript ```typescript
chatMessageCmd(issueId: string, message: string, providerConfig: ProviderConfig, systemPrompt?: string) → ChatResponse chatMessageCmd(issueId: string, message: string, providerConfig: ProviderConfig) → ChatResponse
``` ```
Sends a message in the ongoing triage conversation. Optional `systemPrompt` parameter allows prepending domain expertise before conversation history. If not provided, the domain-specific system prompt for the issue category is injected automatically on first message. AI response is parsed for why-level indicators (15). Sends a message in the ongoing triage conversation. Domain system prompt is injected automatically on first message. AI response is parsed for why-level indicators (15).
### `list_providers` ### `list_providers`
```typescript ```typescript
@ -171,13 +155,13 @@ Returns the list of supported providers with their available models and configur
```typescript ```typescript
generateRcaCmd(issueId: string) → Document generateRcaCmd(issueId: string) → Document
``` ```
Builds an RCA Markdown document from the issue data, 5-Whys answers, and timeline events. Uses real incident response timeline (log uploads, why-level progression, root cause identification) instead of placeholders. Builds an RCA Markdown document from the issue data, 5-Whys answers, and timeline.
### `generate_postmortem` ### `generate_postmortem`
```typescript ```typescript
generatePostmortemCmd(issueId: string) → Document generatePostmortemCmd(issueId: string) → Document
``` ```
Builds a blameless post-mortem Markdown document. Incorporates timeline events to show the full incident lifecycle: detection, diagnosis, resolution, and post-incident review phases. Builds a blameless post-mortem Markdown document.
### `update_document` ### `update_document`
```typescript ```typescript

View File

@ -1,12 +1,11 @@
{ {
"name": "tftsr", "name": "tftsr",
"private": true, "private": true,
"version": "0.2.62", "version": "0.2.50",
"type": "module", "type": "module",
"scripts": { "scripts": {
"dev": "vite", "dev": "vite",
"build": "tsc && vite build", "build": "tsc && vite build",
"version:update": "node scripts/update-version.mjs",
"preview": "vite preview", "preview": "vite preview",
"tauri": "tauri", "tauri": "tauri",
"test": "vitest", "test": "vitest",

View File

@ -1,111 +0,0 @@
#!/usr/bin/env node
import { execSync } from 'child_process';
import { readFileSync, writeFileSync, existsSync, mkdirSync } from 'fs';
import { resolve, dirname } from 'path';
import { fileURLToPath } from 'url';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const projectRoot = resolve(__dirname, '..');
/**
* Validate version is semver-compliant (X.Y.Z)
*/
function isValidSemver(version) {
return /^[0-9]+\.[0-9]+\.[0-9]+$/.test(version);
}
function validateGitRepo(root) {
if (!existsSync(resolve(root, '.git'))) {
throw new Error(`Not a Git repository: ${root}`);
}
}
function getVersionFromGit() {
validateGitRepo(projectRoot);
try {
const output = execSync('git describe --tags --abbrev=0', {
encoding: 'utf-8',
cwd: projectRoot,
shell: false
});
let version = output.trim();
// Remove v prefix
version = version.replace(/^v/, '');
// Validate it's a valid semver
if (!isValidSemver(version)) {
const pkgJsonVersion = getFallbackVersion();
console.warn(`Invalid version format "${version}" from git describe, using package.json fallback: ${pkgJsonVersion}`);
return pkgJsonVersion;
}
return version;
} catch (e) {
const pkgJsonVersion = getFallbackVersion();
console.warn(`Failed to get version from Git tags, using package.json fallback: ${pkgJsonVersion}`);
return pkgJsonVersion;
}
}
function getFallbackVersion() {
const pkgPath = resolve(projectRoot, 'package.json');
if (!existsSync(pkgPath)) {
return '0.2.50';
}
try {
const content = readFileSync(pkgPath, 'utf-8');
const json = JSON.parse(content);
return json.version || '0.2.50';
} catch {
return '0.2.50';
}
}
function updatePackageJson(version) {
const fullPath = resolve(projectRoot, 'package.json');
if (!existsSync(fullPath)) {
throw new Error(`File not found: ${fullPath}`);
}
const content = readFileSync(fullPath, 'utf-8');
const json = JSON.parse(content);
json.version = version;
// Write with 2-space indentation
writeFileSync(fullPath, JSON.stringify(json, null, 2) + '\n', 'utf-8');
console.log(`✓ Updated package.json to ${version}`);
}
function updateTOML(path, version) {
const fullPath = resolve(projectRoot, path);
if (!existsSync(fullPath)) {
throw new Error(`File not found: ${fullPath}`);
}
const content = readFileSync(fullPath, 'utf-8');
const lines = content.split('\n');
const output = [];
for (const line of lines) {
if (line.match(/^\s*version\s*=\s*"/)) {
output.push(`version = "${version}"`);
} else {
output.push(line);
}
}
writeFileSync(fullPath, output.join('\n') + '\n', 'utf-8');
console.log(`✓ Updated ${path} to ${version}`);
}
const version = getVersionFromGit();
console.log(`Setting version to: ${version}`);
updatePackageJson(version);
updateTOML('src-tauri/Cargo.toml', version);
updateTOML('src-tauri/tauri.conf.json', version);
console.log(`✓ All version fields updated to ${version}`);

3
src-tauri/Cargo.lock generated
View File

@ -6139,7 +6139,7 @@ dependencies = [
[[package]] [[package]]
name = "trcaa" name = "trcaa"
version = "0.2.62" version = "0.2.50"
dependencies = [ dependencies = [
"aes-gcm", "aes-gcm",
"aho-corasick", "aho-corasick",
@ -6174,7 +6174,6 @@ dependencies = [
"tokio-test", "tokio-test",
"tracing", "tracing",
"tracing-subscriber", "tracing-subscriber",
"url",
"urlencoding", "urlencoding",
"uuid", "uuid",
"warp", "warp",

View File

@ -1,6 +1,6 @@
[package] [package]
name = "trcaa" name = "trcaa"
version = "0.2.62" version = "0.2.50"
edition = "2021" edition = "2021"
[lib] [lib]
@ -44,7 +44,6 @@ lazy_static = "1.4"
warp = "0.3" warp = "0.3"
urlencoding = "2" urlencoding = "2"
infer = "0.15" infer = "0.15"
url = "2.5.8"
[dev-dependencies] [dev-dependencies]
tokio-test = "0.4" tokio-test = "0.4"
@ -53,7 +52,3 @@ mockito = "1.2"
[profile.release] [profile.release]
opt-level = "s" opt-level = "s"
strip = true strip = true

View File

@ -1,30 +1,3 @@
fn main() { fn main() {
let version = get_version_from_git();
println!("cargo:rustc-env=APP_VERSION={version}");
println!("cargo:rerun-if-changed=.git/refs/heads/master");
println!("cargo:rerun-if-changed=.git/refs/tags");
tauri_build::build() tauri_build::build()
} }
fn get_version_from_git() -> String {
if let Ok(output) = std::process::Command::new("git")
.arg("describe")
.arg("--tags")
.arg("--abbrev=0")
.output()
{
if output.status.success() {
let version = String::from_utf8_lossy(&output.stdout)
.trim()
.trim_start_matches('v')
.to_string();
if !version.is_empty() {
return version;
}
}
}
"0.2.50".to_string()
}

View File

@ -165,7 +165,6 @@ pub async fn chat_message(
issue_id: String, issue_id: String,
message: String, message: String,
provider_config: ProviderConfig, provider_config: ProviderConfig,
system_prompt: Option<String>,
app_handle: tauri::AppHandle, app_handle: tauri::AppHandle,
state: State<'_, AppState>, state: State<'_, AppState>,
) -> Result<ChatResponse, String> { ) -> Result<ChatResponse, String> {
@ -233,21 +232,7 @@ pub async fn chat_message(
// Search integration sources for relevant context // Search integration sources for relevant context
let integration_context = search_integration_sources(&message, &app_handle, &state).await; let integration_context = search_integration_sources(&message, &app_handle, &state).await;
let mut messages = Vec::new(); let mut messages = history;
// Inject domain system prompt if provided
if let Some(ref prompt) = system_prompt {
if !prompt.is_empty() {
messages.push(Message {
role: "system".into(),
content: prompt.clone(),
tool_call_id: None,
tool_calls: None,
});
}
}
messages.extend(history);
// If we found integration content, add it to the conversation context // If we found integration content, add it to the conversation context
if !integration_context.is_empty() { if !integration_context.is_empty() {

View File

@ -2,7 +2,7 @@ use tauri::State;
use crate::db::models::{ use crate::db::models::{
AiConversation, AiMessage, ImageAttachment, Issue, IssueDetail, IssueFilter, IssueSummary, AiConversation, AiMessage, ImageAttachment, Issue, IssueDetail, IssueFilter, IssueSummary,
IssueUpdate, LogFile, ResolutionStep, TimelineEvent, IssueUpdate, LogFile, ResolutionStep,
}; };
use crate::state::AppState; use crate::state::AppState;
@ -171,35 +171,12 @@ pub async fn get_issue(
.filter_map(|r| r.ok()) .filter_map(|r| r.ok())
.collect(); .collect();
// Load timeline events
let mut te_stmt = db
.prepare(
"SELECT id, issue_id, event_type, description, metadata, created_at \
FROM timeline_events WHERE issue_id = ?1 ORDER BY created_at ASC",
)
.map_err(|e| e.to_string())?;
let timeline_events: Vec<TimelineEvent> = te_stmt
.query_map([&issue_id], |row| {
Ok(TimelineEvent {
id: row.get(0)?,
issue_id: row.get(1)?,
event_type: row.get(2)?,
description: row.get(3)?,
metadata: row.get(4)?,
created_at: row.get(5)?,
})
})
.map_err(|e| e.to_string())?
.filter_map(|r| r.ok())
.collect();
Ok(IssueDetail { Ok(IssueDetail {
issue, issue,
log_files, log_files,
image_attachments, image_attachments,
resolution_steps, resolution_steps,
conversations, conversations,
timeline_events,
}) })
} }
@ -325,11 +302,6 @@ pub async fn delete_issue(issue_id: String, state: State<'_, AppState>) -> Resul
[&issue_id], [&issue_id],
) )
.map_err(|e| e.to_string())?; .map_err(|e| e.to_string())?;
db.execute(
"DELETE FROM timeline_events WHERE issue_id = ?1",
[&issue_id],
)
.map_err(|e| e.to_string())?;
db.execute("DELETE FROM issues WHERE id = ?1", [&issue_id]) db.execute("DELETE FROM issues WHERE id = ?1", [&issue_id])
.map_err(|e| e.to_string())?; .map_err(|e| e.to_string())?;
@ -533,105 +505,37 @@ pub async fn update_five_why(
Ok(()) Ok(())
} }
const VALID_EVENT_TYPES: &[&str] = &[
"triage_started",
"log_uploaded",
"why_level_advanced",
"root_cause_identified",
"rca_generated",
"postmortem_generated",
"document_exported",
];
#[tauri::command] #[tauri::command]
pub async fn add_timeline_event( pub async fn add_timeline_event(
issue_id: String, issue_id: String,
event_type: String, event_type: String,
description: String, description: String,
metadata: Option<String>,
state: State<'_, AppState>, state: State<'_, AppState>,
) -> Result<TimelineEvent, String> { ) -> Result<(), String> {
if !VALID_EVENT_TYPES.contains(&event_type.as_str()) { // Use audit_log for timeline tracking
return Err(format!("Invalid event_type: {event_type}")); let db = state.db.lock().map_err(|e| e.to_string())?;
} let entry = crate::db::models::AuditEntry::new(
event_type,
let meta = metadata.unwrap_or_else(|| "{}".to_string()); "issue".to_string(),
if meta.len() > 10240 {
return Err("metadata exceeds maximum size of 10KB".to_string());
}
serde_json::from_str::<serde_json::Value>(&meta)
.map_err(|_| "metadata must be valid JSON".to_string())?;
let event = TimelineEvent::new(
issue_id.clone(), issue_id.clone(),
event_type.clone(), serde_json::json!({ "description": description }).to_string(),
description.clone(),
meta,
); );
let mut db = state.db.lock().map_err(|e| e.to_string())?;
let tx = db.transaction().map_err(|e| e.to_string())?;
tx.execute(
"INSERT INTO timeline_events (id, issue_id, event_type, description, metadata, created_at) \
VALUES (?1, ?2, ?3, ?4, ?5, ?6)",
rusqlite::params![
event.id,
event.issue_id,
event.event_type,
event.description,
event.metadata,
event.created_at,
],
)
.map_err(|e| e.to_string())?;
crate::audit::log::write_audit_event( crate::audit::log::write_audit_event(
&tx, &db,
&event_type, &entry.action,
"issue", &entry.entity_type,
&issue_id, &entry.entity_id,
&serde_json::json!({ "description": description, "metadata": event.metadata }).to_string(), &entry.details,
) )
.map_err(|_| "Failed to write security audit entry".to_string())?; .map_err(|_| "Failed to write security audit entry".to_string())?;
// Update issue timestamp
let now = chrono::Utc::now().format("%Y-%m-%d %H:%M:%S").to_string(); let now = chrono::Utc::now().format("%Y-%m-%d %H:%M:%S").to_string();
tx.execute( db.execute(
"UPDATE issues SET updated_at = ?1 WHERE id = ?2", "UPDATE issues SET updated_at = ?1 WHERE id = ?2",
rusqlite::params![now, issue_id], rusqlite::params![now, issue_id],
) )
.map_err(|e| e.to_string())?; .map_err(|e| e.to_string())?;
tx.commit().map_err(|e| e.to_string())?; Ok(())
Ok(event)
}
#[tauri::command]
pub async fn get_timeline_events(
issue_id: String,
state: State<'_, AppState>,
) -> Result<Vec<TimelineEvent>, String> {
let db = state.db.lock().map_err(|e| e.to_string())?;
let mut stmt = db
.prepare(
"SELECT id, issue_id, event_type, description, metadata, created_at \
FROM timeline_events WHERE issue_id = ?1 ORDER BY created_at ASC",
)
.map_err(|e| e.to_string())?;
let events = stmt
.query_map([&issue_id], |row| {
Ok(TimelineEvent {
id: row.get(0)?,
issue_id: row.get(1)?,
event_type: row.get(2)?,
description: row.get(3)?,
metadata: row.get(4)?,
created_at: row.get(5)?,
})
})
.map_err(|e| e.to_string())?
.filter_map(|r| r.ok())
.collect();
Ok(events)
} }

View File

@ -4,7 +4,6 @@ use crate::ollama::{
OllamaStatus, OllamaStatus,
}; };
use crate::state::{AppSettings, AppState, ProviderConfig}; use crate::state::{AppSettings, AppState, ProviderConfig};
use std::env;
// --- Ollama commands --- // --- Ollama commands ---
@ -276,11 +275,3 @@ pub async fn delete_ai_provider(
Ok(()) Ok(())
} }
/// Get the application version from build-time environment
#[tauri::command]
pub async fn get_app_version() -> Result<String, String> {
env::var("APP_VERSION")
.or_else(|_| env::var("CARGO_PKG_VERSION"))
.map_err(|e| format!("Failed to get version: {e}"))
}

View File

@ -199,20 +199,6 @@ pub fn run_migrations(conn: &Connection) -> anyhow::Result<()> {
"016_add_created_at", "016_add_created_at",
"ALTER TABLE ai_providers ADD COLUMN created_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%d %H:%M:%S', 'now'))", "ALTER TABLE ai_providers ADD COLUMN created_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%d %H:%M:%S', 'now'))",
), ),
(
"017_create_timeline_events",
"CREATE TABLE IF NOT EXISTS timeline_events (
id TEXT PRIMARY KEY,
issue_id TEXT NOT NULL,
event_type TEXT NOT NULL,
description TEXT NOT NULL DEFAULT '',
metadata TEXT NOT NULL DEFAULT '{}',
created_at TEXT NOT NULL,
FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE
);
CREATE INDEX idx_timeline_events_issue ON timeline_events(issue_id);
CREATE INDEX idx_timeline_events_time ON timeline_events(created_at);",
),
]; ];
for (name, sql) in migrations { for (name, sql) in migrations {
@ -712,82 +698,4 @@ mod tests {
// Should not fail even though columns already exist // Should not fail even though columns already exist
run_migrations(&conn).unwrap(); run_migrations(&conn).unwrap();
} }
#[test]
fn test_timeline_events_table_exists() {
let conn = setup_test_db();
let count: i64 = conn
.query_row(
"SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name='timeline_events'",
[],
|r| r.get(0),
)
.unwrap();
assert_eq!(count, 1);
let mut stmt = conn.prepare("PRAGMA table_info(timeline_events)").unwrap();
let columns: Vec<String> = stmt
.query_map([], |row| row.get::<_, String>(1))
.unwrap()
.collect::<Result<Vec<_>, _>>()
.unwrap();
assert!(columns.contains(&"id".to_string()));
assert!(columns.contains(&"issue_id".to_string()));
assert!(columns.contains(&"event_type".to_string()));
assert!(columns.contains(&"description".to_string()));
assert!(columns.contains(&"metadata".to_string()));
assert!(columns.contains(&"created_at".to_string()));
}
#[test]
fn test_timeline_events_cascade_delete() {
let conn = setup_test_db();
conn.execute("PRAGMA foreign_keys = ON", []).unwrap();
let now = chrono::Utc::now().format("%Y-%m-%d %H:%M:%S").to_string();
conn.execute(
"INSERT INTO issues (id, title, created_at, updated_at) VALUES (?1, ?2, ?3, ?4)",
rusqlite::params!["issue-1", "Test Issue", now, now],
)
.unwrap();
conn.execute(
"INSERT INTO timeline_events (id, issue_id, event_type, description, metadata, created_at) VALUES (?1, ?2, ?3, ?4, ?5, ?6)",
rusqlite::params!["te-1", "issue-1", "triage_started", "Started triage", "{}", "2025-01-15 10:00:00 UTC"],
)
.unwrap();
// Verify event exists
let count: i64 = conn
.query_row("SELECT COUNT(*) FROM timeline_events", [], |r| r.get(0))
.unwrap();
assert_eq!(count, 1);
// Delete issue — cascade should remove timeline event
conn.execute("DELETE FROM issues WHERE id = 'issue-1'", [])
.unwrap();
let count: i64 = conn
.query_row("SELECT COUNT(*) FROM timeline_events", [], |r| r.get(0))
.unwrap();
assert_eq!(count, 0);
}
#[test]
fn test_timeline_events_indexes() {
let conn = setup_test_db();
let mut stmt = conn
.prepare(
"SELECT name FROM sqlite_master WHERE type='index' AND tbl_name='timeline_events'",
)
.unwrap();
let indexes: Vec<String> = stmt
.query_map([], |row| row.get(0))
.unwrap()
.filter_map(|r| r.ok())
.collect();
assert!(indexes.contains(&"idx_timeline_events_issue".to_string()));
assert!(indexes.contains(&"idx_timeline_events_time".to_string()));
}
} }

View File

@ -47,7 +47,6 @@ pub struct IssueDetail {
pub image_attachments: Vec<ImageAttachment>, pub image_attachments: Vec<ImageAttachment>,
pub resolution_steps: Vec<ResolutionStep>, pub resolution_steps: Vec<ResolutionStep>,
pub conversations: Vec<AiConversation>, pub conversations: Vec<AiConversation>,
pub timeline_events: Vec<TimelineEvent>,
} }
/// Lightweight row returned by list/search commands. /// Lightweight row returned by list/search commands.
@ -122,31 +121,9 @@ pub struct FiveWhyEntry {
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TimelineEvent { pub struct TimelineEvent {
pub id: String, pub id: String,
pub issue_id: String,
pub event_type: String, pub event_type: String,
pub description: String, pub description: String,
pub metadata: String, pub created_at: i64,
pub created_at: String,
}
impl TimelineEvent {
pub fn new(
issue_id: String,
event_type: String,
description: String,
metadata: String,
) -> Self {
TimelineEvent {
id: Uuid::now_v7().to_string(),
issue_id,
event_type,
description,
metadata,
created_at: chrono::Utc::now()
.format("%Y-%m-%d %H:%M:%S UTC")
.to_string(),
}
}
} }
// ─── Log File ─────────────────────────────────────────────────────────────── // ─── Log File ───────────────────────────────────────────────────────────────

View File

@ -1,5 +1,4 @@
use crate::db::models::IssueDetail; use crate::db::models::IssueDetail;
use crate::docs::rca::{calculate_duration, format_event_type};
pub fn generate_postmortem_markdown(detail: &IssueDetail) -> String { pub fn generate_postmortem_markdown(detail: &IssueDetail) -> String {
let issue = &detail.issue; let issue = &detail.issue;
@ -52,16 +51,7 @@ pub fn generate_postmortem_markdown(detail: &IssueDetail) -> String {
// Impact // Impact
md.push_str("## Impact\n\n"); md.push_str("## Impact\n\n");
if detail.timeline_events.len() >= 2 { md.push_str("- **Duration:** _[How long did the incident last?]_\n");
let first = &detail.timeline_events[0].created_at;
let last = &detail.timeline_events[detail.timeline_events.len() - 1].created_at;
md.push_str(&format!(
"- **Duration:** {}\n",
calculate_duration(first, last)
));
} else {
md.push_str("- **Duration:** _[How long did the incident last?]_\n");
}
md.push_str("- **Users Affected:** _[Number/percentage of affected users]_\n"); md.push_str("- **Users Affected:** _[Number/percentage of affected users]_\n");
md.push_str("- **Revenue Impact:** _[Financial impact, if applicable]_\n"); md.push_str("- **Revenue Impact:** _[Financial impact, if applicable]_\n");
md.push_str("- **SLA Impact:** _[Were any SLAs breached?]_\n\n"); md.push_str("- **SLA Impact:** _[Were any SLAs breached?]_\n\n");
@ -77,19 +67,7 @@ pub fn generate_postmortem_markdown(detail: &IssueDetail) -> String {
if let Some(ref resolved) = issue.resolved_at { if let Some(ref resolved) = issue.resolved_at {
md.push_str(&format!("| {resolved} | Issue resolved |\n")); md.push_str(&format!("| {resolved} | Issue resolved |\n"));
} }
if detail.timeline_events.is_empty() { md.push_str("| _HH:MM_ | _[Add additional timeline events]_ |\n\n");
md.push_str("| _HH:MM_ | _[Add additional timeline events]_ |\n");
} else {
for event in &detail.timeline_events {
md.push_str(&format!(
"| {} | {} - {} |\n",
event.created_at,
format_event_type(&event.event_type),
event.description
));
}
}
md.push('\n');
// Root Cause Analysis // Root Cause Analysis
md.push_str("## Root Cause Analysis\n\n"); md.push_str("## Root Cause Analysis\n\n");
@ -136,19 +114,6 @@ pub fn generate_postmortem_markdown(detail: &IssueDetail) -> String {
// What Went Well // What Went Well
md.push_str("## What Went Well\n\n"); md.push_str("## What Went Well\n\n");
if !detail.resolution_steps.is_empty() {
md.push_str(&format!(
"- Systematic 5-whys analysis conducted ({} steps completed)\n",
detail.resolution_steps.len()
));
}
if detail
.timeline_events
.iter()
.any(|e| e.event_type == "root_cause_identified")
{
md.push_str("- Root cause was identified during triage\n");
}
md.push_str("- _[e.g., Quick detection through existing alerts]_\n"); md.push_str("- _[e.g., Quick detection through existing alerts]_\n");
md.push_str("- _[e.g., Effective cross-team collaboration]_\n"); md.push_str("- _[e.g., Effective cross-team collaboration]_\n");
md.push_str("- _[e.g., Smooth communication with stakeholders]_\n\n"); md.push_str("- _[e.g., Smooth communication with stakeholders]_\n\n");
@ -193,7 +158,7 @@ pub fn generate_postmortem_markdown(detail: &IssueDetail) -> String {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::db::models::{Issue, IssueDetail, ResolutionStep, TimelineEvent}; use crate::db::models::{Issue, IssueDetail, ResolutionStep};
fn make_test_detail() -> IssueDetail { fn make_test_detail() -> IssueDetail {
IssueDetail { IssueDetail {
@ -223,7 +188,6 @@ mod tests {
created_at: "2025-02-10 09:00:00".to_string(), created_at: "2025-02-10 09:00:00".to_string(),
}], }],
conversations: vec![], conversations: vec![],
timeline_events: vec![],
} }
} }
@ -282,76 +246,4 @@ mod tests {
assert!(md.contains("| Priority | Action | Owner | Due Date | Status |")); assert!(md.contains("| Priority | Action | Owner | Due Date | Status |"));
assert!(md.contains("| P0 |")); assert!(md.contains("| P0 |"));
} }
#[test]
fn test_postmortem_timeline_with_real_events() {
let mut detail = make_test_detail();
detail.timeline_events = vec![
TimelineEvent {
id: "te-1".to_string(),
issue_id: "pm-456".to_string(),
event_type: "triage_started".to_string(),
description: "Triage initiated".to_string(),
metadata: "{}".to_string(),
created_at: "2025-02-10 08:05:00 UTC".to_string(),
},
TimelineEvent {
id: "te-2".to_string(),
issue_id: "pm-456".to_string(),
event_type: "root_cause_identified".to_string(),
description: "Certificate expiry confirmed".to_string(),
metadata: "{}".to_string(),
created_at: "2025-02-10 10:30:00 UTC".to_string(),
},
];
let md = generate_postmortem_markdown(&detail);
assert!(md.contains("## Timeline"));
assert!(md.contains("| 2025-02-10 08:05:00 UTC | Triage Started - Triage initiated |"));
assert!(md.contains(
"| 2025-02-10 10:30:00 UTC | Root Cause Identified - Certificate expiry confirmed |"
));
assert!(!md.contains("_[Add additional timeline events]_"));
}
#[test]
fn test_postmortem_impact_with_duration() {
let mut detail = make_test_detail();
detail.timeline_events = vec![
TimelineEvent {
id: "te-1".to_string(),
issue_id: "pm-456".to_string(),
event_type: "triage_started".to_string(),
description: "Triage initiated".to_string(),
metadata: "{}".to_string(),
created_at: "2025-02-10 08:00:00 UTC".to_string(),
},
TimelineEvent {
id: "te-2".to_string(),
issue_id: "pm-456".to_string(),
event_type: "root_cause_identified".to_string(),
description: "Found it".to_string(),
metadata: "{}".to_string(),
created_at: "2025-02-10 10:30:00 UTC".to_string(),
},
];
let md = generate_postmortem_markdown(&detail);
assert!(md.contains("**Duration:** 2h 30m"));
assert!(!md.contains("_[How long did the incident last?]_"));
}
#[test]
fn test_postmortem_what_went_well_with_steps() {
let mut detail = make_test_detail();
detail.timeline_events = vec![TimelineEvent {
id: "te-1".to_string(),
issue_id: "pm-456".to_string(),
event_type: "root_cause_identified".to_string(),
description: "Root cause found".to_string(),
metadata: "{}".to_string(),
created_at: "2025-02-10 10:00:00 UTC".to_string(),
}];
let md = generate_postmortem_markdown(&detail);
assert!(md.contains("Systematic 5-whys analysis conducted (1 steps completed)"));
assert!(md.contains("Root cause was identified during triage"));
}
} }

View File

@ -1,48 +1,5 @@
use crate::db::models::IssueDetail; use crate::db::models::IssueDetail;
pub fn format_event_type(event_type: &str) -> &str {
match event_type {
"triage_started" => "Triage Started",
"log_uploaded" => "Log File Uploaded",
"why_level_advanced" => "Why Level Advanced",
"root_cause_identified" => "Root Cause Identified",
"rca_generated" => "RCA Document Generated",
"postmortem_generated" => "Post-Mortem Generated",
"document_exported" => "Document Exported",
other => other,
}
}
pub fn calculate_duration(start: &str, end: &str) -> String {
let fmt = "%Y-%m-%d %H:%M:%S UTC";
let start_dt = match chrono::NaiveDateTime::parse_from_str(start, fmt) {
Ok(dt) => dt,
Err(_) => return "N/A".to_string(),
};
let end_dt = match chrono::NaiveDateTime::parse_from_str(end, fmt) {
Ok(dt) => dt,
Err(_) => return "N/A".to_string(),
};
let duration = end_dt.signed_duration_since(start_dt);
let total_minutes = duration.num_minutes();
if total_minutes < 0 {
return "N/A".to_string();
}
let days = total_minutes / (24 * 60);
let hours = (total_minutes % (24 * 60)) / 60;
let minutes = total_minutes % 60;
if days > 0 {
format!("{days}d {hours}h")
} else if hours > 0 {
format!("{hours}h {minutes}m")
} else {
format!("{minutes}m")
}
}
pub fn generate_rca_markdown(detail: &IssueDetail) -> String { pub fn generate_rca_markdown(detail: &IssueDetail) -> String {
let issue = &detail.issue; let issue = &detail.issue;
@ -100,52 +57,6 @@ pub fn generate_rca_markdown(detail: &IssueDetail) -> String {
md.push_str("\n\n"); md.push_str("\n\n");
} }
// Incident Timeline
md.push_str("## Incident Timeline\n\n");
if detail.timeline_events.is_empty() {
md.push_str("_No timeline events recorded._\n\n");
} else {
md.push_str("| Time (UTC) | Event | Description |\n");
md.push_str("|------------|-------|-------------|\n");
for event in &detail.timeline_events {
md.push_str(&format!(
"| {} | {} | {} |\n",
event.created_at,
format_event_type(&event.event_type),
event.description
));
}
md.push('\n');
}
// Incident Metrics
md.push_str("## Incident Metrics\n\n");
md.push_str(&format!(
"- **Total Events:** {}\n",
detail.timeline_events.len()
));
if detail.timeline_events.len() >= 2 {
let first = &detail.timeline_events[0].created_at;
let last = &detail.timeline_events[detail.timeline_events.len() - 1].created_at;
md.push_str(&format!(
"- **Incident Duration:** {}\n",
calculate_duration(first, last)
));
} else {
md.push_str("- **Incident Duration:** N/A\n");
}
let root_cause_event = detail
.timeline_events
.iter()
.find(|e| e.event_type == "root_cause_identified");
if let (Some(first), Some(rc)) = (detail.timeline_events.first(), root_cause_event) {
md.push_str(&format!(
"- **Time to Root Cause:** {}\n",
calculate_duration(&first.created_at, &rc.created_at)
));
}
md.push('\n');
// 5 Whys Analysis // 5 Whys Analysis
md.push_str("## 5 Whys Analysis\n\n"); md.push_str("## 5 Whys Analysis\n\n");
if detail.resolution_steps.is_empty() { if detail.resolution_steps.is_empty() {
@ -232,7 +143,7 @@ pub fn generate_rca_markdown(detail: &IssueDetail) -> String {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::db::models::{Issue, IssueDetail, LogFile, ResolutionStep, TimelineEvent}; use crate::db::models::{Issue, IssueDetail, LogFile, ResolutionStep};
fn make_test_detail() -> IssueDetail { fn make_test_detail() -> IssueDetail {
IssueDetail { IssueDetail {
@ -283,7 +194,6 @@ mod tests {
}, },
], ],
conversations: vec![], conversations: vec![],
timeline_events: vec![],
} }
} }
@ -337,135 +247,4 @@ mod tests {
let md = generate_rca_markdown(&detail); let md = generate_rca_markdown(&detail);
assert!(md.contains("Unassigned")); assert!(md.contains("Unassigned"));
} }
#[test]
fn test_rca_timeline_section_with_events() {
let mut detail = make_test_detail();
detail.timeline_events = vec![
TimelineEvent {
id: "te-1".to_string(),
issue_id: "test-123".to_string(),
event_type: "triage_started".to_string(),
description: "Triage initiated by oncall".to_string(),
metadata: "{}".to_string(),
created_at: "2025-01-15 10:00:00 UTC".to_string(),
},
TimelineEvent {
id: "te-2".to_string(),
issue_id: "test-123".to_string(),
event_type: "log_uploaded".to_string(),
description: "app.log uploaded".to_string(),
metadata: "{}".to_string(),
created_at: "2025-01-15 10:30:00 UTC".to_string(),
},
TimelineEvent {
id: "te-3".to_string(),
issue_id: "test-123".to_string(),
event_type: "root_cause_identified".to_string(),
description: "Connection pool leak found".to_string(),
metadata: "{}".to_string(),
created_at: "2025-01-15 12:15:00 UTC".to_string(),
},
];
let md = generate_rca_markdown(&detail);
assert!(md.contains("## Incident Timeline"));
assert!(md.contains("| Time (UTC) | Event | Description |"));
assert!(md
.contains("| 2025-01-15 10:00:00 UTC | Triage Started | Triage initiated by oncall |"));
assert!(md.contains("| 2025-01-15 10:30:00 UTC | Log File Uploaded | app.log uploaded |"));
assert!(md.contains(
"| 2025-01-15 12:15:00 UTC | Root Cause Identified | Connection pool leak found |"
));
}
#[test]
fn test_rca_timeline_section_empty() {
let detail = make_test_detail();
let md = generate_rca_markdown(&detail);
assert!(md.contains("## Incident Timeline"));
assert!(md.contains("_No timeline events recorded._"));
}
#[test]
fn test_rca_metrics_section() {
let mut detail = make_test_detail();
detail.timeline_events = vec![
TimelineEvent {
id: "te-1".to_string(),
issue_id: "test-123".to_string(),
event_type: "triage_started".to_string(),
description: "Triage started".to_string(),
metadata: "{}".to_string(),
created_at: "2025-01-15 10:00:00 UTC".to_string(),
},
TimelineEvent {
id: "te-2".to_string(),
issue_id: "test-123".to_string(),
event_type: "root_cause_identified".to_string(),
description: "Root cause found".to_string(),
metadata: "{}".to_string(),
created_at: "2025-01-15 12:15:00 UTC".to_string(),
},
];
let md = generate_rca_markdown(&detail);
assert!(md.contains("## Incident Metrics"));
assert!(md.contains("**Total Events:** 2"));
assert!(md.contains("**Incident Duration:** 2h 15m"));
assert!(md.contains("**Time to Root Cause:** 2h 15m"));
}
#[test]
fn test_calculate_duration_hours_minutes() {
assert_eq!(
calculate_duration("2025-01-15 10:00:00 UTC", "2025-01-15 12:15:00 UTC"),
"2h 15m"
);
}
#[test]
fn test_calculate_duration_days() {
assert_eq!(
calculate_duration("2025-01-15 10:00:00 UTC", "2025-01-18 11:00:00 UTC"),
"3d 1h"
);
}
#[test]
fn test_calculate_duration_minutes_only() {
assert_eq!(
calculate_duration("2025-01-15 10:00:00 UTC", "2025-01-15 10:45:00 UTC"),
"45m"
);
}
#[test]
fn test_calculate_duration_invalid() {
assert_eq!(calculate_duration("bad-date", "also-bad"), "N/A");
}
#[test]
fn test_format_event_type_known() {
assert_eq!(format_event_type("triage_started"), "Triage Started");
assert_eq!(format_event_type("log_uploaded"), "Log File Uploaded");
assert_eq!(
format_event_type("why_level_advanced"),
"Why Level Advanced"
);
assert_eq!(
format_event_type("root_cause_identified"),
"Root Cause Identified"
);
assert_eq!(format_event_type("rca_generated"), "RCA Document Generated");
assert_eq!(
format_event_type("postmortem_generated"),
"Post-Mortem Generated"
);
assert_eq!(format_event_type("document_exported"), "Document Exported");
}
#[test]
fn test_format_event_type_unknown() {
assert_eq!(format_event_type("custom_event"), "custom_event");
assert_eq!(format_event_type(""), "");
}
} }

View File

@ -629,10 +629,11 @@ mod tests {
#[test] #[test]
fn test_derive_aes_key_is_stable_for_same_input() { fn test_derive_aes_key_is_stable_for_same_input() {
// Use deterministic helper to avoid env var race conditions in parallel tests std::env::set_var("TFTSR_ENCRYPTION_KEY", "stable-test-key");
let k1 = derive_aes_key_from_str("stable-test-key").unwrap(); let k1 = derive_aes_key().unwrap();
let k2 = derive_aes_key_from_str("stable-test-key").unwrap(); let k2 = derive_aes_key().unwrap();
assert_eq!(k1, k2); assert_eq!(k1, k2);
std::env::remove_var("TFTSR_ENCRYPTION_KEY");
} }
// Test helper functions that accept key directly (bypass env var) // Test helper functions that accept key directly (bypass env var)

View File

@ -1,40 +1,4 @@
use super::confluence_search::SearchResult; use super::confluence_search::SearchResult;
use crate::integrations::query_expansion::expand_query;
const MAX_EXPANDED_QUERIES: usize = 3;
fn escape_wiql(s: &str) -> String {
s.replace('\'', "''")
.replace('"', "\\\"")
.replace('\\', "\\\\")
.replace('(', "\\(")
.replace(')', "\\)")
.replace(';', "\\;")
.replace('=', "\\=")
}
/// Basic HTML tag stripping to prevent XSS in excerpts
fn strip_html_tags(html: &str) -> String {
let mut result = String::new();
let mut in_tag = false;
for ch in html.chars() {
match ch {
'<' => in_tag = true,
'>' => in_tag = false,
_ if !in_tag => result.push(ch),
_ => {}
}
}
// Clean up whitespace
result
.split_whitespace()
.collect::<Vec<_>>()
.join(" ")
.trim()
.to_string()
}
/// Search Azure DevOps Wiki for content matching the query /// Search Azure DevOps Wiki for content matching the query
pub async fn search_wiki( pub async fn search_wiki(
@ -46,94 +10,90 @@ pub async fn search_wiki(
let cookie_header = crate::integrations::webview_auth::cookies_to_header(cookies); let cookie_header = crate::integrations::webview_auth::cookies_to_header(cookies);
let client = reqwest::Client::new(); let client = reqwest::Client::new();
let expanded_queries = expand_query(query); // Use Azure DevOps Search API
let search_url = format!(
"{}/_apis/search/wikisearchresults?api-version=7.0",
org_url.trim_end_matches('/')
);
let mut all_results = Vec::new(); let search_body = serde_json::json!({
"searchText": query,
for expanded_query in expanded_queries.iter().take(MAX_EXPANDED_QUERIES) { "$top": 5,
// Use Azure DevOps Search API "filters": {
let search_url = format!( "ProjectFilters": [project]
"{}/_apis/search/wikisearchresults?api-version=7.0",
org_url.trim_end_matches('/')
);
let search_body = serde_json::json!({
"searchText": expanded_query,
"$top": 5,
"filters": {
"ProjectFilters": [project]
}
});
tracing::info!("Searching Azure DevOps Wiki with query: {}", expanded_query);
let resp = client
.post(&search_url)
.header("Cookie", &cookie_header)
.header("Accept", "application/json")
.header("Content-Type", "application/json")
.json(&search_body)
.send()
.await
.map_err(|e| format!("Azure DevOps wiki search failed: {e}"))?;
if !resp.status().is_success() {
let status = resp.status();
let text = resp.text().await.unwrap_or_default();
tracing::warn!("Azure DevOps wiki search failed with status {status}: {text}");
continue;
} }
});
let json: serde_json::Value = resp tracing::info!("Searching Azure DevOps Wiki: {}", search_url);
.json()
.await
.map_err(|e| format!("Failed to parse ADO wiki search response: {e}"))?;
if let Some(results_array) = json["results"].as_array() { let resp = client
for item in results_array.iter().take(MAX_EXPANDED_QUERIES) { .post(&search_url)
let title = item["fileName"].as_str().unwrap_or("Untitled").to_string(); .header("Cookie", &cookie_header)
.header("Accept", "application/json")
.header("Content-Type", "application/json")
.json(&search_body)
.send()
.await
.map_err(|e| format!("Azure DevOps wiki search failed: {e}"))?;
let path = item["path"].as_str().unwrap_or(""); if !resp.status().is_success() {
let url = format!( let status = resp.status();
"{}/_wiki/wikis/{}/{}", let text = resp.text().await.unwrap_or_default();
org_url.trim_end_matches('/'), return Err(format!(
project, "Azure DevOps wiki search failed with status {status}: {text}"
path ));
); }
let excerpt = strip_html_tags(item["content"].as_str().unwrap_or("")) let json: serde_json::Value = resp
.chars() .json()
.take(300) .await
.collect::<String>(); .map_err(|e| format!("Failed to parse ADO wiki search response: {e}"))?;
// Fetch full wiki page content let mut results = Vec::new();
let content = if let Some(wiki_id) = item["wiki"]["id"].as_str() {
if let Some(page_path) = item["path"].as_str() { if let Some(results_array) = json["results"].as_array() {
fetch_wiki_page(org_url, wiki_id, page_path, &cookie_header) for item in results_array.iter().take(3) {
.await let title = item["fileName"].as_str().unwrap_or("Untitled").to_string();
.ok()
} else { let path = item["path"].as_str().unwrap_or("");
None let url = format!(
} "{}/_wiki/wikis/{}/{}",
org_url.trim_end_matches('/'),
project,
path
);
let excerpt = item["content"]
.as_str()
.unwrap_or("")
.chars()
.take(300)
.collect::<String>();
// Fetch full wiki page content
let content = if let Some(wiki_id) = item["wiki"]["id"].as_str() {
if let Some(page_path) = item["path"].as_str() {
fetch_wiki_page(org_url, wiki_id, page_path, &cookie_header)
.await
.ok()
} else { } else {
None None
}; }
} else {
None
};
all_results.push(SearchResult { results.push(SearchResult {
title, title,
url, url,
excerpt, excerpt,
content, content,
source: "Azure DevOps".to_string(), source: "Azure DevOps".to_string(),
}); });
}
} }
} }
all_results.sort_by(|a, b| a.url.cmp(&b.url)); Ok(results)
all_results.dedup_by(|a, b| a.url == b.url);
Ok(all_results)
} }
/// Fetch full wiki page content /// Fetch full wiki page content
@ -191,68 +151,55 @@ pub async fn search_work_items(
let cookie_header = crate::integrations::webview_auth::cookies_to_header(cookies); let cookie_header = crate::integrations::webview_auth::cookies_to_header(cookies);
let client = reqwest::Client::new(); let client = reqwest::Client::new();
let expanded_queries = expand_query(query); // Use WIQL (Work Item Query Language)
let wiql_url = format!(
"{}/_apis/wit/wiql?api-version=7.0",
org_url.trim_end_matches('/')
);
let mut all_results = Vec::new(); let wiql_query = format!(
"SELECT [System.Id], [System.Title], [System.Description], [System.State] FROM WorkItems WHERE [System.TeamProject] = '{project}' AND ([System.Title] CONTAINS '{query}' OR [System.Description] CONTAINS '{query}') ORDER BY [System.ChangedDate] DESC"
);
for expanded_query in expanded_queries.iter().take(MAX_EXPANDED_QUERIES) { let wiql_body = serde_json::json!({
// Use WIQL (Work Item Query Language) "query": wiql_query
let wiql_url = format!( });
"{}/_apis/wit/wiql?api-version=7.0",
org_url.trim_end_matches('/')
);
let safe_query = escape_wiql(expanded_query); tracing::info!("Searching Azure DevOps work items");
let wiql_query = format!(
"SELECT [System.Id], [System.Title], [System.Description], [System.State] FROM WorkItems WHERE [System.TeamProject] = '{project}' AND ([System.Title] ~ '{safe_query}' OR [System.Description] ~ '{safe_query}') ORDER BY [System.ChangedDate] DESC"
);
let wiql_body = serde_json::json!({ let resp = client
"query": wiql_query .post(&wiql_url)
}); .header("Cookie", &cookie_header)
.header("Accept", "application/json")
.header("Content-Type", "application/json")
.json(&wiql_body)
.send()
.await
.map_err(|e| format!("ADO work item search failed: {e}"))?;
tracing::info!( if !resp.status().is_success() {
"Searching Azure DevOps work items with query: {}", return Ok(Vec::new()); // Don't fail if work item search fails
expanded_query }
);
let resp = client let json: serde_json::Value = resp
.post(&wiql_url) .json()
.header("Cookie", &cookie_header) .await
.header("Accept", "application/json") .map_err(|_| "Failed to parse work item response".to_string())?;
.header("Content-Type", "application/json")
.json(&wiql_body)
.send()
.await
.map_err(|e| format!("ADO work item search failed: {e}"))?;
if !resp.status().is_success() { let mut results = Vec::new();
continue; // Don't fail if work item search fails
}
let json: serde_json::Value = resp if let Some(work_items) = json["workItems"].as_array() {
.json() // Fetch details for top 3 work items
.await for item in work_items.iter().take(3) {
.map_err(|_| "Failed to parse work item response".to_string())?; if let Some(id) = item["id"].as_i64() {
if let Ok(work_item) = fetch_work_item_details(org_url, id, &cookie_header).await {
if let Some(work_items) = json["workItems"].as_array() { results.push(work_item);
// Fetch details for top 3 work items
for item in work_items.iter().take(MAX_EXPANDED_QUERIES) {
if let Some(id) = item["id"].as_i64() {
if let Ok(work_item) =
fetch_work_item_details(org_url, id, &cookie_header).await
{
all_results.push(work_item);
}
} }
} }
} }
} }
all_results.sort_by(|a, b| a.url.cmp(&b.url)); Ok(results)
all_results.dedup_by(|a, b| a.url == b.url);
Ok(all_results)
} }
/// Fetch work item details /// Fetch work item details
@ -316,53 +263,3 @@ async fn fetch_work_item_details(
source: "Azure DevOps".to_string(), source: "Azure DevOps".to_string(),
}) })
} }
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_escape_wiql_escapes_single_quotes() {
assert_eq!(escape_wiql("test'single"), "test''single");
}
#[test]
fn test_escape_wiql_escapes_double_quotes() {
assert_eq!(escape_wiql("test\"double"), "test\\\\\"double");
}
#[test]
fn test_escape_wiql_escapes_backslash() {
assert_eq!(escape_wiql("test\\backslash"), r#"test\\backslash"#);
}
#[test]
fn test_escape_wiql_escapes_parens() {
assert_eq!(escape_wiql("test(paren"), r#"test\(paren"#);
assert_eq!(escape_wiql("test)paren"), r#"test\)paren"#);
}
#[test]
fn test_escape_wiql_escapes_semicolon() {
assert_eq!(escape_wiql("test;semi"), r#"test\;semi"#);
}
#[test]
fn test_escape_wiql_escapes_equals() {
assert_eq!(escape_wiql("test=equal"), r#"test\=equal"#);
}
#[test]
fn test_escape_wiql_no_special_chars() {
assert_eq!(escape_wiql("simple query"), "simple query");
}
#[test]
fn test_strip_html_tags() {
let html = "<p>Hello <strong>world</strong>!</p>";
assert_eq!(strip_html_tags(html), "Hello world!");
let html2 = "<div><h1>Title</h1><p>Content</p></div>";
assert_eq!(strip_html_tags(html2), "TitleContent");
}
}

View File

@ -1,9 +1,4 @@
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use url::Url;
use super::query_expansion::expand_query;
const MAX_EXPANDED_QUERIES: usize = 3;
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SearchResult { pub struct SearchResult {
@ -11,36 +6,10 @@ pub struct SearchResult {
pub url: String, pub url: String,
pub excerpt: String, pub excerpt: String,
pub content: Option<String>, pub content: Option<String>,
pub source: String, pub source: String, // "confluence", "servicenow", "azuredevops"
}
fn canonicalize_url(url: &str) -> String {
Url::parse(url)
.ok()
.map(|u| {
let mut u = u.clone();
u.set_fragment(None);
u.set_query(None);
u.to_string()
})
.unwrap_or_else(|| url.to_string())
}
fn escape_cql(s: &str) -> String {
s.replace('"', "\\\"")
.replace(')', "\\)")
.replace('(', "\\(")
.replace('~', "\\~")
.replace('&', "\\&")
.replace('|', "\\|")
.replace('+', "\\+")
.replace('-', "\\-")
} }
/// Search Confluence for content matching the query /// Search Confluence for content matching the query
///
/// This function expands the user query with related terms, synonyms, and variations
/// to improve search coverage across Confluence spaces.
pub async fn search_confluence( pub async fn search_confluence(
base_url: &str, base_url: &str,
query: &str, query: &str,
@ -49,89 +18,86 @@ pub async fn search_confluence(
let cookie_header = crate::integrations::webview_auth::cookies_to_header(cookies); let cookie_header = crate::integrations::webview_auth::cookies_to_header(cookies);
let client = reqwest::Client::new(); let client = reqwest::Client::new();
let expanded_queries = expand_query(query); // Use Confluence CQL search
let search_url = format!(
"{}/rest/api/search?cql=text~\"{}\"&limit=5",
base_url.trim_end_matches('/'),
urlencoding::encode(query)
);
let mut all_results = Vec::new(); tracing::info!("Searching Confluence: {}", search_url);
for expanded_query in expanded_queries.iter().take(MAX_EXPANDED_QUERIES) { let resp = client
let safe_query = escape_cql(expanded_query); .get(&search_url)
let search_url = format!( .header("Cookie", &cookie_header)
"{}/rest/api/search?cql=text~\"{}\"&limit=5", .header("Accept", "application/json")
base_url.trim_end_matches('/'), .send()
urlencoding::encode(&safe_query) .await
); .map_err(|e| format!("Confluence search request failed: {e}"))?;
tracing::info!( if !resp.status().is_success() {
"Searching Confluence with expanded query: {}", let status = resp.status();
expanded_query let text = resp.text().await.unwrap_or_default();
); return Err(format!(
"Confluence search failed with status {status}: {text}"
));
}
let resp = client let json: serde_json::Value = resp
.get(&search_url) .json()
.header("Cookie", &cookie_header) .await
.header("Accept", "application/json") .map_err(|e| format!("Failed to parse Confluence search response: {e}"))?;
.send()
.await
.map_err(|e| format!("Confluence search request failed: {e}"))?;
if !resp.status().is_success() { let mut results = Vec::new();
let status = resp.status();
let text = resp.text().await.unwrap_or_default();
tracing::warn!("Confluence search failed with status {status}: {text}");
continue;
}
let json: serde_json::Value = resp if let Some(results_array) = json["results"].as_array() {
.json() for item in results_array.iter().take(3) {
.await // Take top 3 results
.map_err(|e| format!("Failed to parse Confluence search response: {e}"))?; let title = item["title"].as_str().unwrap_or("Untitled").to_string();
if let Some(results_array) = json["results"].as_array() { let id = item["content"]["id"].as_str();
for item in results_array.iter().take(MAX_EXPANDED_QUERIES) { let space_key = item["content"]["space"]["key"].as_str();
let title = item["title"].as_str().unwrap_or("Untitled").to_string();
let id = item["content"]["id"].as_str(); // Build URL
let space_key = item["content"]["space"]["key"].as_str(); let url = if let (Some(id_str), Some(space)) = (id, space_key) {
format!(
"{}/display/{}/{}",
base_url.trim_end_matches('/'),
space,
id_str
)
} else {
base_url.to_string()
};
let url = if let (Some(id_str), Some(space)) = (id, space_key) { // Get excerpt from search result
format!( let excerpt = item["excerpt"]
"{}/display/{}/{}", .as_str()
base_url.trim_end_matches('/'), .unwrap_or("")
space, .to_string()
id_str .replace("<span class=\"highlight\">", "")
) .replace("</span>", "");
} else {
base_url.to_string()
};
let excerpt = strip_html_tags(item["excerpt"].as_str().unwrap_or("")) // Fetch full page content
.chars() let content = if let Some(content_id) = id {
.take(300) fetch_page_content(base_url, content_id, &cookie_header)
.collect::<String>(); .await
.ok()
} else {
None
};
let content = if let Some(content_id) = id { results.push(SearchResult {
fetch_page_content(base_url, content_id, &cookie_header) title,
.await url,
.ok() excerpt,
} else { content,
None source: "Confluence".to_string(),
}; });
all_results.push(SearchResult {
title,
url,
excerpt,
content,
source: "Confluence".to_string(),
});
}
} }
} }
all_results.sort_by(|a, b| canonicalize_url(&a.url).cmp(&canonicalize_url(&b.url))); Ok(results)
all_results.dedup_by(|a, b| canonicalize_url(&a.url) == canonicalize_url(&b.url));
Ok(all_results)
} }
/// Fetch full content of a Confluence page /// Fetch full content of a Confluence page
@ -219,43 +185,4 @@ mod tests {
let html2 = "<div><h1>Title</h1><p>Content</p></div>"; let html2 = "<div><h1>Title</h1><p>Content</p></div>";
assert_eq!(strip_html_tags(html2), "TitleContent"); assert_eq!(strip_html_tags(html2), "TitleContent");
} }
#[test]
fn test_escape_cql_escapes_special_chars() {
assert_eq!(escape_cql("test\"quote"), r#"test\"quote"#);
assert_eq!(escape_cql("test(paren"), r#"test\(paren"#);
assert_eq!(escape_cql("test)paren"), r#"test\)paren"#);
assert_eq!(escape_cql("test~tilde"), r#"test\~tilde"#);
assert_eq!(escape_cql("test&and"), r#"test\&and"#);
assert_eq!(escape_cql("test|or"), r#"test\|or"#);
assert_eq!(escape_cql("test+plus"), r#"test\+plus"#);
assert_eq!(escape_cql("test-minus"), r#"test\-minus"#);
}
#[test]
fn test_escape_cql_no_special_chars() {
assert_eq!(escape_cql("simple query"), "simple query");
}
#[test]
fn test_canonicalize_url_removes_fragment() {
assert_eq!(
canonicalize_url("https://example.com/page#section"),
"https://example.com/page"
);
}
#[test]
fn test_canonicalize_url_removes_query() {
assert_eq!(
canonicalize_url("https://example.com/page?param=value"),
"https://example.com/page"
);
}
#[test]
fn test_canonicalize_url_handles_malformed() {
// Malformed URLs fall back to original
assert_eq!(canonicalize_url("not a url"), "not a url");
}
} }

View File

@ -4,7 +4,6 @@ pub mod azuredevops_search;
pub mod callback_server; pub mod callback_server;
pub mod confluence; pub mod confluence;
pub mod confluence_search; pub mod confluence_search;
pub mod query_expansion;
pub mod servicenow; pub mod servicenow;
pub mod servicenow_search; pub mod servicenow_search;
pub mod webview_auth; pub mod webview_auth;

View File

@ -1,290 +0,0 @@
/// Query expansion module for integration search
///
/// This module provides functionality to expand user queries with related terms,
/// synonyms, and variations to improve search results across integrations like
/// Confluence, ServiceNow, and Azure DevOps.
use std::collections::HashSet;
/// Product name synonyms for common product variations
/// Maps common abbreviations/variants to their full names for search expansion
fn get_product_synonyms(query: &str) -> Vec<String> {
let mut synonyms = Vec::new();
// VESTA NXT related synonyms
if query.to_lowercase().contains("vesta") || query.to_lowercase().contains("vnxt") {
synonyms.extend(vec![
"VESTA NXT".to_string(),
"Vesta NXT".to_string(),
"VNXT".to_string(),
"vnxt".to_string(),
"Vesta".to_string(),
"vesta".to_string(),
"VNX".to_string(),
"vnx".to_string(),
]);
}
// Version number patterns (e.g., 1.0.12, 1.1.9)
if query.contains('.') {
// Extract version-like patterns and add variations
let version_parts: Vec<&str> = query.split('.').collect();
if version_parts.len() >= 2 {
// Add variations without dots
let version_no_dots = version_parts.join("");
synonyms.push(version_no_dots);
// Add partial versions
if version_parts.len() >= 2 {
synonyms.push(version_parts[0..2].join("."));
}
if version_parts.len() >= 3 {
synonyms.push(version_parts[0..3].join("."));
}
}
}
// Common upgrade-related terms
if query.to_lowercase().contains("upgrade") || query.to_lowercase().contains("update") {
synonyms.extend(vec![
"upgrade".to_string(),
"update".to_string(),
"migration".to_string(),
"patch".to_string(),
"version".to_string(),
"install".to_string(),
"installation".to_string(),
]);
}
// Remove duplicates and empty strings
synonyms.sort();
synonyms.dedup();
synonyms.retain(|s| !s.is_empty());
synonyms
}
/// Expand a search query with related terms for better search coverage
///
/// This function takes a user query and expands it with:
/// - Product name synonyms (e.g., "VNXT" -> "VESTA NXT", "Vesta NXT")
/// - Version number variations
/// - Related terms based on query content
///
/// # Arguments
/// * `query` - The original user query
///
/// # Returns
/// A vector of query strings to search, with the original query first
/// followed by expanded variations. Returns empty only if input is empty or
/// whitespace-only. Otherwise, always returns at least the original query.
pub fn expand_query(query: &str) -> Vec<String> {
if query.trim().is_empty() {
return Vec::new();
}
let mut expanded = vec![query.to_string()];
// Get product synonyms
let product_synonyms = get_product_synonyms(query);
expanded.extend(product_synonyms);
// Extract keywords from query for additional expansion
let keywords = extract_keywords(query);
// Add keyword variations
for keyword in keywords.iter().take(5) {
if !expanded.contains(keyword) {
expanded.push(keyword.clone());
}
}
// Add common related terms based on query content
let query_lower = query.to_lowercase();
if query_lower.contains("confluence") || query_lower.contains("documentation") {
expanded.push("docs".to_string());
expanded.push("manual".to_string());
expanded.push("guide".to_string());
}
if query_lower.contains("deploy") || query_lower.contains("deployment") {
expanded.push("deploy".to_string());
expanded.push("deployment".to_string());
expanded.push("release".to_string());
expanded.push("build".to_string());
}
if query_lower.contains("kubernetes") || query_lower.contains("k8s") {
expanded.push("kubernetes".to_string());
expanded.push("k8s".to_string());
expanded.push("pod".to_string());
expanded.push("container".to_string());
}
// Remove duplicates and empty strings
expanded.sort();
expanded.dedup();
expanded.retain(|s| !s.is_empty());
expanded
}
/// Extract important keywords from a search query
///
/// This function removes stop words and extracts meaningful terms
/// for search expansion.
///
/// # Arguments
/// * `query` - The original user query
///
/// # Returns
/// A vector of extracted keywords
fn extract_keywords(query: &str) -> Vec<String> {
let stop_words: HashSet<&str> = [
"how", "do", "i", "the", "a", "an", "is", "are", "was", "were", "be", "been", "being",
"have", "has", "had", "having", "do", "does", "did", "doing", "will", "would", "should",
"could", "can", "may", "might", "must", "to", "from", "in", "on", "at", "by", "for",
"with", "about", "as", "of", "or", "and", "but", "not", "what", "when", "where", "which",
"who", "this", "that", "these", "those", "if", "then", "else", "for", "while", "until",
"against", "between", "into", "through", "during", "before", "after", "above", "below",
"up", "down", "out", "off", "over", "under", "again", "further", "then", "once", "here",
"there", "why", "where", "all", "any", "both", "each", "few", "more", "most", "other",
"some", "such", "no", "nor", "only", "own", "same", "so", "than", "too", "very", "can",
"just", "should", "now",
]
.into_iter()
.collect();
let mut keywords = Vec::new();
let mut remaining = query.to_string();
while !remaining.is_empty() {
// Skip leading whitespace
if remaining.starts_with(char::is_whitespace) {
remaining = remaining.trim_start().to_string();
continue;
}
// Try to extract version number (e.g., 1.0.12, 1.1.9)
if remaining.starts_with(|c: char| c.is_ascii_digit()) {
let mut end_pos = 0;
let mut dot_count = 0;
for (i, c) in remaining.chars().enumerate() {
if c.is_ascii_digit() {
end_pos = i + 1;
} else if c == '.' {
end_pos = i + 1;
dot_count += 1;
} else {
break;
}
}
// Only extract if we have at least 2 dots (e.g., 1.0.12)
if dot_count >= 2 && end_pos > 0 {
let version = remaining[..end_pos].to_string();
keywords.push(version.clone());
remaining = remaining[end_pos..].to_string();
continue;
}
}
// Find word boundary - split on whitespace or non-alphanumeric
let mut split_pos = remaining.len();
for (i, c) in remaining.chars().enumerate() {
if c.is_whitespace() || !c.is_alphanumeric() {
split_pos = i;
break;
}
}
// If split_pos is 0, the string starts with a non-alphanumeric character
// Skip it and continue
if split_pos == 0 {
remaining = remaining[1..].to_string();
continue;
}
let word = remaining[..split_pos].to_lowercase();
remaining = remaining[split_pos..].to_string();
// Skip empty words, single chars, and stop words
if word.is_empty() || word.len() < 2 || stop_words.contains(word.as_str()) {
continue;
}
// Add numeric words with 3+ digits
if word.chars().all(|c| c.is_ascii_digit()) && word.len() >= 3 {
keywords.push(word.clone());
continue;
}
// Add words with at least one alphabetic character
if word.chars().any(|c| c.is_alphabetic()) {
keywords.push(word.clone());
}
}
keywords.sort();
keywords.dedup();
keywords
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_expand_query_with_product_synonyms() {
let query = "upgrade vesta nxt to 1.1.9";
let expanded = expand_query(query);
// Should contain original query
assert!(expanded.contains(&query.to_string()));
// Should contain product synonyms
assert!(expanded
.iter()
.any(|s| s.contains("vnxt") || s.contains("vnxt")));
}
#[test]
fn test_expand_query_with_version_numbers() {
let query = "version 1.0.12";
let expanded = expand_query(query);
// Should contain original query
assert!(expanded.contains(&query.to_string()));
}
#[test]
fn test_extract_keywords() {
let query = "How do I upgrade VESTA NXT from 1.0.12 to 1.1.9?";
let keywords = extract_keywords(query);
assert!(keywords.contains(&"upgrade".to_string()));
assert!(keywords.contains(&"vesta".to_string()));
assert!(keywords.contains(&"nxt".to_string()));
assert!(keywords.contains(&"1.0.12".to_string()));
assert!(keywords.contains(&"1.1.9".to_string()));
}
#[test]
fn test_product_synonyms() {
let synonyms = get_product_synonyms("vesta nxt upgrade");
// Should contain VNXT synonym
assert!(synonyms
.iter()
.any(|s| s.contains("VNXT") || s.contains("vnxt")));
}
#[test]
fn test_empty_query() {
let expanded = expand_query("");
assert!(expanded.is_empty() || expanded.contains(&"".to_string()));
}
}

View File

@ -1,7 +1,4 @@
use super::confluence_search::SearchResult; use super::confluence_search::SearchResult;
use crate::integrations::query_expansion::expand_query;
const MAX_EXPANDED_QUERIES: usize = 3;
/// Search ServiceNow Knowledge Base for content matching the query /// Search ServiceNow Knowledge Base for content matching the query
pub async fn search_servicenow( pub async fn search_servicenow(
@ -12,88 +9,82 @@ pub async fn search_servicenow(
let cookie_header = crate::integrations::webview_auth::cookies_to_header(cookies); let cookie_header = crate::integrations::webview_auth::cookies_to_header(cookies);
let client = reqwest::Client::new(); let client = reqwest::Client::new();
let expanded_queries = expand_query(query); // Search Knowledge Base articles
let search_url = format!(
"{}/api/now/table/kb_knowledge?sysparm_query=textLIKE{}^ORshort_descriptionLIKE{}&sysparm_limit=5",
instance_url.trim_end_matches('/'),
urlencoding::encode(query),
urlencoding::encode(query)
);
let mut all_results = Vec::new(); tracing::info!("Searching ServiceNow: {}", search_url);
for expanded_query in expanded_queries.iter().take(MAX_EXPANDED_QUERIES) { let resp = client
// Search Knowledge Base articles .get(&search_url)
let search_url = format!( .header("Cookie", &cookie_header)
"{}/api/now/table/kb_knowledge?sysparm_query=textLIKE{}^ORshort_descriptionLIKE{}&sysparm_limit=5", .header("Accept", "application/json")
instance_url.trim_end_matches('/'), .send()
urlencoding::encode(expanded_query), .await
urlencoding::encode(expanded_query) .map_err(|e| format!("ServiceNow search request failed: {e}"))?;
);
tracing::info!("Searching ServiceNow with query: {}", expanded_query); if !resp.status().is_success() {
let status = resp.status();
let text = resp.text().await.unwrap_or_default();
return Err(format!(
"ServiceNow search failed with status {status}: {text}"
));
}
let resp = client let json: serde_json::Value = resp
.get(&search_url) .json()
.header("Cookie", &cookie_header) .await
.header("Accept", "application/json") .map_err(|e| format!("Failed to parse ServiceNow search response: {e}"))?;
.send()
.await
.map_err(|e| format!("ServiceNow search request failed: {e}"))?;
if !resp.status().is_success() { let mut results = Vec::new();
let status = resp.status();
let text = resp.text().await.unwrap_or_default();
tracing::warn!("ServiceNow search failed with status {status}: {text}");
continue;
}
let json: serde_json::Value = resp if let Some(result_array) = json["result"].as_array() {
.json() for item in result_array.iter().take(3) {
.await // Take top 3 results
.map_err(|e| format!("Failed to parse ServiceNow search response: {e}"))?; let title = item["short_description"]
.as_str()
.unwrap_or("Untitled")
.to_string();
if let Some(result_array) = json["result"].as_array() { let sys_id = item["sys_id"].as_str().unwrap_or("").to_string();
for item in result_array.iter().take(MAX_EXPANDED_QUERIES) {
// Take top 3 results
let title = item["short_description"]
.as_str()
.unwrap_or("Untitled")
.to_string();
let sys_id = item["sys_id"].as_str().unwrap_or("").to_string(); let url = format!(
"{}/kb_view.do?sysparm_article={}",
instance_url.trim_end_matches('/'),
sys_id
);
let url = format!( let excerpt = item["text"]
"{}/kb_view.do?sysparm_article={}", .as_str()
instance_url.trim_end_matches('/'), .unwrap_or("")
sys_id .chars()
); .take(300)
.collect::<String>();
let excerpt = item["text"] // Get full article content
.as_str() let content = item["text"].as_str().map(|text| {
.unwrap_or("") if text.len() > 3000 {
.chars() format!("{}...", &text[..3000])
.take(300) } else {
.collect::<String>(); text.to_string()
}
});
// Get full article content results.push(SearchResult {
let content = item["text"].as_str().map(|text| { title,
if text.len() > 3000 { url,
format!("{}...", &text[..3000]) excerpt,
} else { content,
text.to_string() source: "ServiceNow".to_string(),
} });
});
all_results.push(SearchResult {
title,
url,
excerpt,
content,
source: "ServiceNow".to_string(),
});
}
} }
} }
all_results.sort_by(|a, b| a.url.cmp(&b.url)); Ok(results)
all_results.dedup_by(|a, b| a.url == b.url);
Ok(all_results)
} }
/// Search ServiceNow Incidents for related issues /// Search ServiceNow Incidents for related issues
@ -105,78 +96,68 @@ pub async fn search_incidents(
let cookie_header = crate::integrations::webview_auth::cookies_to_header(cookies); let cookie_header = crate::integrations::webview_auth::cookies_to_header(cookies);
let client = reqwest::Client::new(); let client = reqwest::Client::new();
let expanded_queries = expand_query(query); // Search incidents
let search_url = format!(
"{}/api/now/table/incident?sysparm_query=short_descriptionLIKE{}^ORdescriptionLIKE{}&sysparm_limit=3&sysparm_display_value=true",
instance_url.trim_end_matches('/'),
urlencoding::encode(query),
urlencoding::encode(query)
);
let mut all_results = Vec::new(); tracing::info!("Searching ServiceNow incidents: {}", search_url);
for expanded_query in expanded_queries.iter().take(MAX_EXPANDED_QUERIES) { let resp = client
// Search incidents .get(&search_url)
let search_url = format!( .header("Cookie", &cookie_header)
"{}/api/now/table/incident?sysparm_query=short_descriptionLIKE{}^ORdescriptionLIKE{}&sysparm_limit=3&sysparm_display_value=true", .header("Accept", "application/json")
instance_url.trim_end_matches('/'), .send()
urlencoding::encode(expanded_query), .await
urlencoding::encode(expanded_query) .map_err(|e| format!("ServiceNow incident search failed: {e}"))?;
);
tracing::info!( if !resp.status().is_success() {
"Searching ServiceNow incidents with query: {}", return Ok(Vec::new()); // Don't fail if incident search fails
expanded_query }
);
let resp = client let json: serde_json::Value = resp
.get(&search_url) .json()
.header("Cookie", &cookie_header) .await
.header("Accept", "application/json") .map_err(|_| "Failed to parse incident response".to_string())?;
.send()
.await
.map_err(|e| format!("ServiceNow incident search failed: {e}"))?;
if !resp.status().is_success() { let mut results = Vec::new();
continue; // Don't fail if incident search fails
}
let json: serde_json::Value = resp if let Some(result_array) = json["result"].as_array() {
.json() for item in result_array.iter() {
.await let number = item["number"].as_str().unwrap_or("Unknown");
.map_err(|_| "Failed to parse incident response".to_string())?; let title = format!(
"Incident {}: {}",
number,
item["short_description"].as_str().unwrap_or("No title")
);
if let Some(result_array) = json["result"].as_array() { let sys_id = item["sys_id"].as_str().unwrap_or("");
for item in result_array.iter() { let url = format!(
let number = item["number"].as_str().unwrap_or("Unknown"); "{}/incident.do?sys_id={}",
let title = format!( instance_url.trim_end_matches('/'),
"Incident {}: {}", sys_id
number, );
item["short_description"].as_str().unwrap_or("No title")
);
let sys_id = item["sys_id"].as_str().unwrap_or(""); let description = item["description"].as_str().unwrap_or("").to_string();
let url = format!(
"{}/incident.do?sys_id={}",
instance_url.trim_end_matches('/'),
sys_id
);
let description = item["description"].as_str().unwrap_or("").to_string(); let resolution = item["close_notes"].as_str().unwrap_or("").to_string();
let resolution = item["close_notes"].as_str().unwrap_or("").to_string(); let content = format!("Description: {description}\nResolution: {resolution}");
let content = format!("Description: {description}\nResolution: {resolution}"); let excerpt = content.chars().take(200).collect::<String>();
let excerpt = content.chars().take(200).collect::<String>(); results.push(SearchResult {
title,
all_results.push(SearchResult { url,
title, excerpt,
url, content: Some(content),
excerpt, source: "ServiceNow".to_string(),
content: Some(content), });
source: "ServiceNow".to_string(),
});
}
} }
} }
all_results.sort_by(|a, b| a.url.cmp(&b.url)); Ok(results)
all_results.dedup_by(|a, b| a.url == b.url);
Ok(all_results)
} }

View File

@ -6,7 +6,6 @@ use serde_json::Value;
use tauri::WebviewWindow; use tauri::WebviewWindow;
use super::confluence_search::SearchResult; use super::confluence_search::SearchResult;
use crate::integrations::query_expansion::expand_query;
/// Execute an HTTP request from within the webview context /// Execute an HTTP request from within the webview context
/// This automatically includes all cookies (including HttpOnly) from the authenticated session /// This automatically includes all cookies (including HttpOnly) from the authenticated session
@ -124,113 +123,106 @@ pub async fn search_confluence_webview<R: tauri::Runtime>(
base_url: &str, base_url: &str,
query: &str, query: &str,
) -> Result<Vec<SearchResult>, String> { ) -> Result<Vec<SearchResult>, String> {
let expanded_queries = expand_query(query); // Extract keywords from the query for better search
// Remove common words and extract important terms
let keywords = extract_keywords(query);
let mut all_results = Vec::new(); // Build CQL query with OR logic for keywords
let cql = if keywords.len() > 1 {
// Multiple keywords - search for any of them
let keyword_conditions: Vec<String> =
keywords.iter().map(|k| format!("text ~ \"{k}\"")).collect();
keyword_conditions.join(" OR ")
} else if !keywords.is_empty() {
// Single keyword
let keyword = &keywords[0];
format!("text ~ \"{keyword}\"")
} else {
// Fallback to original query
format!("text ~ \"{query}\"")
};
for expanded_query in expanded_queries.iter().take(3) { let search_url = format!(
// Extract keywords from the query for better search "{}/rest/api/search?cql={}&limit=10",
// Remove common words and extract important terms base_url.trim_end_matches('/'),
let keywords = extract_keywords(expanded_query); urlencoding::encode(&cql)
);
// Build CQL query with OR logic for keywords tracing::info!("Executing Confluence search via webview with CQL: {}", cql);
let cql = if keywords.len() > 1 {
// Multiple keywords - search for any of them
let keyword_conditions: Vec<String> =
keywords.iter().map(|k| format!("text ~ \"{k}\"")).collect();
keyword_conditions.join(" OR ")
} else if !keywords.is_empty() {
// Single keyword
let keyword = &keywords[0];
format!("text ~ \"{keyword}\"")
} else {
// Fallback to expanded query
format!("text ~ \"{expanded_query}\"")
};
let search_url = format!( let response = fetch_from_webview(webview_window, &search_url, "GET", None).await?;
"{}/rest/api/search?cql={}&limit=10",
base_url.trim_end_matches('/'),
urlencoding::encode(&cql)
);
tracing::info!("Executing Confluence search via webview with CQL: {}", cql); let mut results = Vec::new();
let response = fetch_from_webview(webview_window, &search_url, "GET", None).await?; if let Some(results_array) = response.get("results").and_then(|v| v.as_array()) {
for item in results_array.iter().take(5) {
let title = item["title"].as_str().unwrap_or("Untitled").to_string();
let content_id = item["content"]["id"].as_str();
let space_key = item["content"]["space"]["key"].as_str();
if let Some(results_array) = response.get("results").and_then(|v| v.as_array()) { let url = if let (Some(id), Some(space)) = (content_id, space_key) {
for item in results_array.iter().take(5) { format!(
let title = item["title"].as_str().unwrap_or("Untitled").to_string(); "{}/display/{}/{}",
let content_id = item["content"]["id"].as_str(); base_url.trim_end_matches('/'),
let space_key = item["content"]["space"]["key"].as_str(); space,
id
)
} else {
base_url.to_string()
};
let url = if let (Some(id), Some(space)) = (content_id, space_key) { let excerpt = item["excerpt"]
format!( .as_str()
"{}/display/{}/{}", .unwrap_or("")
base_url.trim_end_matches('/'), .replace("<span class=\"highlight\">", "")
space, .replace("</span>", "");
id
)
} else {
base_url.to_string()
};
let excerpt = item["excerpt"] // Fetch full page content
.as_str() let content = if let Some(id) = content_id {
.unwrap_or("") let content_url = format!(
.replace("<span class=\"highlight\">", "") "{}/rest/api/content/{id}?expand=body.storage",
.replace("</span>", ""); base_url.trim_end_matches('/')
);
// Fetch full page content if let Ok(content_resp) =
let content = if let Some(id) = content_id { fetch_from_webview(webview_window, &content_url, "GET", None).await
let content_url = format!( {
"{}/rest/api/content/{id}?expand=body.storage", if let Some(body) = content_resp
base_url.trim_end_matches('/') .get("body")
); .and_then(|b| b.get("storage"))
if let Ok(content_resp) = .and_then(|s| s.get("value"))
fetch_from_webview(webview_window, &content_url, "GET", None).await .and_then(|v| v.as_str())
{ {
if let Some(body) = content_resp let text = strip_html_simple(body);
.get("body") Some(if text.len() > 3000 {
.and_then(|b| b.get("storage")) format!("{}...", &text[..3000])
.and_then(|s| s.get("value"))
.and_then(|v| v.as_str())
{
let text = strip_html_simple(body);
Some(if text.len() > 3000 {
format!("{}...", &text[..3000])
} else {
text
})
} else { } else {
None text
} })
} else { } else {
None None
} }
} else { } else {
None None
}; }
} else {
None
};
all_results.push(SearchResult { results.push(SearchResult {
title, title,
url, url,
excerpt: excerpt.chars().take(300).collect(), excerpt: excerpt.chars().take(300).collect(),
content, content,
source: "Confluence".to_string(), source: "Confluence".to_string(),
}); });
}
} }
} }
all_results.sort_by(|a, b| a.url.cmp(&b.url));
all_results.dedup_by(|a, b| a.url == b.url);
tracing::info!( tracing::info!(
"Confluence webview search returned {} results", "Confluence webview search returned {} results",
all_results.len() results.len()
); );
Ok(all_results) Ok(results)
} }
/// Extract keywords from a search query /// Extract keywords from a search query
@ -304,99 +296,92 @@ pub async fn search_servicenow_webview<R: tauri::Runtime>(
instance_url: &str, instance_url: &str,
query: &str, query: &str,
) -> Result<Vec<SearchResult>, String> { ) -> Result<Vec<SearchResult>, String> {
let expanded_queries = expand_query(query); let mut results = Vec::new();
let mut all_results = Vec::new(); // Search knowledge base
let kb_url = format!(
"{}/api/now/table/kb_knowledge?sysparm_query=textLIKE{}^ORshort_descriptionLIKE{}&sysparm_limit=3",
instance_url.trim_end_matches('/'),
urlencoding::encode(query),
urlencoding::encode(query)
);
for expanded_query in expanded_queries.iter().take(3) { tracing::info!("Executing ServiceNow KB search via webview");
// Search knowledge base
let kb_url = format!(
"{}/api/now/table/kb_knowledge?sysparm_query=textLIKE{}^ORshort_descriptionLIKE{}&sysparm_limit=3",
instance_url.trim_end_matches('/'),
urlencoding::encode(expanded_query),
urlencoding::encode(expanded_query)
);
tracing::info!("Executing ServiceNow KB search via webview with expanded query"); if let Ok(kb_response) = fetch_from_webview(webview_window, &kb_url, "GET", None).await {
if let Some(kb_array) = kb_response.get("result").and_then(|v| v.as_array()) {
for item in kb_array {
let title = item["short_description"]
.as_str()
.unwrap_or("Untitled")
.to_string();
let sys_id = item["sys_id"].as_str().unwrap_or("");
let url = format!(
"{}/kb_view.do?sysparm_article={sys_id}",
instance_url.trim_end_matches('/')
);
let text = item["text"].as_str().unwrap_or("");
let excerpt = text.chars().take(300).collect();
let content = Some(if text.len() > 3000 {
format!("{}...", &text[..3000])
} else {
text.to_string()
});
if let Ok(kb_response) = fetch_from_webview(webview_window, &kb_url, "GET", None).await { results.push(SearchResult {
if let Some(kb_array) = kb_response.get("result").and_then(|v| v.as_array()) { title,
for item in kb_array { url,
let title = item["short_description"] excerpt,
.as_str() content,
.unwrap_or("Untitled") source: "ServiceNow".to_string(),
.to_string(); });
let sys_id = item["sys_id"].as_str().unwrap_or("");
let url = format!(
"{}/kb_view.do?sysparm_article={sys_id}",
instance_url.trim_end_matches('/')
);
let text = item["text"].as_str().unwrap_or("");
let excerpt = text.chars().take(300).collect();
let content = Some(if text.len() > 3000 {
format!("{}...", &text[..3000])
} else {
text.to_string()
});
all_results.push(SearchResult {
title,
url,
excerpt,
content,
source: "ServiceNow".to_string(),
});
}
}
}
// Search incidents
let inc_url = format!(
"{}/api/now/table/incident?sysparm_query=short_descriptionLIKE{}^ORdescriptionLIKE{}&sysparm_limit=3&sysparm_display_value=true",
instance_url.trim_end_matches('/'),
urlencoding::encode(expanded_query),
urlencoding::encode(expanded_query)
);
if let Ok(inc_response) = fetch_from_webview(webview_window, &inc_url, "GET", None).await {
if let Some(inc_array) = inc_response.get("result").and_then(|v| v.as_array()) {
for item in inc_array {
let number = item["number"].as_str().unwrap_or("Unknown");
let title = format!(
"Incident {}: {}",
number,
item["short_description"].as_str().unwrap_or("No title")
);
let sys_id = item["sys_id"].as_str().unwrap_or("");
let url = format!(
"{}/incident.do?sys_id={sys_id}",
instance_url.trim_end_matches('/')
);
let description = item["description"].as_str().unwrap_or("");
let resolution = item["close_notes"].as_str().unwrap_or("");
let content = format!("Description: {description}\nResolution: {resolution}");
let excerpt = content.chars().take(200).collect();
all_results.push(SearchResult {
title,
url,
excerpt,
content: Some(content),
source: "ServiceNow".to_string(),
});
}
} }
} }
} }
all_results.sort_by(|a, b| a.url.cmp(&b.url)); // Search incidents
all_results.dedup_by(|a, b| a.url == b.url); let inc_url = format!(
"{}/api/now/table/incident?sysparm_query=short_descriptionLIKE{}^ORdescriptionLIKE{}&sysparm_limit=3&sysparm_display_value=true",
instance_url.trim_end_matches('/'),
urlencoding::encode(query),
urlencoding::encode(query)
);
if let Ok(inc_response) = fetch_from_webview(webview_window, &inc_url, "GET", None).await {
if let Some(inc_array) = inc_response.get("result").and_then(|v| v.as_array()) {
for item in inc_array {
let number = item["number"].as_str().unwrap_or("Unknown");
let title = format!(
"Incident {}: {}",
number,
item["short_description"].as_str().unwrap_or("No title")
);
let sys_id = item["sys_id"].as_str().unwrap_or("");
let url = format!(
"{}/incident.do?sys_id={sys_id}",
instance_url.trim_end_matches('/')
);
let description = item["description"].as_str().unwrap_or("");
let resolution = item["close_notes"].as_str().unwrap_or("");
let content = format!("Description: {description}\nResolution: {resolution}");
let excerpt = content.chars().take(200).collect();
results.push(SearchResult {
title,
url,
excerpt,
content: Some(content),
source: "ServiceNow".to_string(),
});
}
}
}
tracing::info!( tracing::info!(
"ServiceNow webview search returned {} results", "ServiceNow webview search returned {} results",
all_results.len() results.len()
); );
Ok(all_results) Ok(results)
} }
/// Search Azure DevOps wiki using webview fetch /// Search Azure DevOps wiki using webview fetch
@ -406,89 +391,82 @@ pub async fn search_azuredevops_wiki_webview<R: tauri::Runtime>(
project: &str, project: &str,
query: &str, query: &str,
) -> Result<Vec<SearchResult>, String> { ) -> Result<Vec<SearchResult>, String> {
let expanded_queries = expand_query(query); // Extract keywords for better search
let keywords = extract_keywords(query);
let mut all_results = Vec::new(); let search_text = if !keywords.is_empty() {
keywords.join(" ")
} else {
query.to_string()
};
for expanded_query in expanded_queries.iter().take(3) { // Azure DevOps wiki search API
// Extract keywords for better search let search_url = format!(
let keywords = extract_keywords(expanded_query); "{}/{}/_apis/wiki/wikis?api-version=7.0",
org_url.trim_end_matches('/'),
urlencoding::encode(project)
);
let search_text = if !keywords.is_empty() { tracing::info!(
keywords.join(" ") "Executing Azure DevOps wiki search via webview for: {}",
} else { search_text
expanded_query.clone() );
};
// Azure DevOps wiki search API // First, get list of wikis
let search_url = format!( let wikis_response = fetch_from_webview(webview_window, &search_url, "GET", None).await?;
"{}/{}/_apis/wiki/wikis?api-version=7.0",
org_url.trim_end_matches('/'),
urlencoding::encode(project)
);
tracing::info!( let mut results = Vec::new();
"Executing Azure DevOps wiki search via webview for: {}",
search_text
);
// First, get list of wikis if let Some(wikis_array) = wikis_response.get("value").and_then(|v| v.as_array()) {
let wikis_response = fetch_from_webview(webview_window, &search_url, "GET", None).await?; // Search each wiki
for wiki in wikis_array.iter().take(3) {
let wiki_id = wiki["id"].as_str().unwrap_or("");
if let Some(wikis_array) = wikis_response.get("value").and_then(|v| v.as_array()) { if wiki_id.is_empty() {
// Search each wiki continue;
for wiki in wikis_array.iter().take(3) { }
let wiki_id = wiki["id"].as_str().unwrap_or("");
if wiki_id.is_empty() { // Search wiki pages
continue; let pages_url = format!(
} "{}/{}/_apis/wiki/wikis/{}/pages?recursionLevel=Full&includeContent=true&api-version=7.0",
org_url.trim_end_matches('/'),
urlencoding::encode(project),
urlencoding::encode(wiki_id)
);
// Search wiki pages if let Ok(pages_response) =
let pages_url = format!( fetch_from_webview(webview_window, &pages_url, "GET", None).await
"{}/{}/_apis/wiki/wikis/{}/pages?recursionLevel=Full&includeContent=true&api-version=7.0", {
org_url.trim_end_matches('/'), // Try to get "page" field, or use the response itself if it's the page object
urlencoding::encode(project), if let Some(page) = pages_response.get("page") {
urlencoding::encode(wiki_id) search_page_recursive(
); page,
&search_text,
if let Ok(pages_response) = org_url,
fetch_from_webview(webview_window, &pages_url, "GET", None).await project,
{ wiki_id,
// Try to get "page" field, or use the response itself if it's the page object &mut results,
if let Some(page) = pages_response.get("page") { );
search_page_recursive( } else {
page, // Response might be the page object itself
&search_text, search_page_recursive(
org_url, &pages_response,
project, &search_text,
wiki_id, org_url,
&mut all_results, project,
); wiki_id,
} else { &mut results,
// Response might be the page object itself );
search_page_recursive(
&pages_response,
&search_text,
org_url,
project,
wiki_id,
&mut all_results,
);
}
} }
} }
} }
} }
all_results.sort_by(|a, b| a.url.cmp(&b.url));
all_results.dedup_by(|a, b| a.url == b.url);
tracing::info!( tracing::info!(
"Azure DevOps wiki webview search returned {} results", "Azure DevOps wiki webview search returned {} results",
all_results.len() results.len()
); );
Ok(all_results) Ok(results)
} }
/// Recursively search through wiki pages for matching content /// Recursively search through wiki pages for matching content
@ -566,124 +544,115 @@ pub async fn search_azuredevops_workitems_webview<R: tauri::Runtime>(
project: &str, project: &str,
query: &str, query: &str,
) -> Result<Vec<SearchResult>, String> { ) -> Result<Vec<SearchResult>, String> {
let expanded_queries = expand_query(query); // Extract keywords
let keywords = extract_keywords(query);
let mut all_results = Vec::new(); // Check if query contains a work item ID (pure number)
let work_item_id: Option<i64> = keywords
.iter()
.filter(|k| k.chars().all(|c| c.is_numeric()))
.filter_map(|k| k.parse::<i64>().ok())
.next();
for expanded_query in expanded_queries.iter().take(3) { // Build WIQL query
// Extract keywords let wiql_query = if let Some(id) = work_item_id {
let keywords = extract_keywords(expanded_query); // Search by specific ID
format!(
// Check if query contains a work item ID (pure number) "SELECT [System.Id], [System.Title], [System.Description], [System.WorkItemType] \
let work_item_id: Option<i64> = keywords FROM WorkItems WHERE [System.Id] = {id}"
.iter() )
.filter(|k| k.chars().all(|c| c.is_numeric())) } else {
.filter_map(|k| k.parse::<i64>().ok()) // Search by text in title/description
.next(); let search_terms = if !keywords.is_empty() {
keywords.join(" ")
// Build WIQL query
let wiql_query = if let Some(id) = work_item_id {
// Search by specific ID
format!(
"SELECT [System.Id], [System.Title], [System.Description], [System.WorkItemType] \
FROM WorkItems WHERE [System.Id] = {id}"
)
} else { } else {
// Search by text in title/description query.to_string()
let search_terms = if !keywords.is_empty() {
keywords.join(" ")
} else {
expanded_query.clone()
};
// Use CONTAINS for text search (case-insensitive)
format!(
"SELECT [System.Id], [System.Title], [System.Description], [System.WorkItemType] \
FROM WorkItems WHERE [System.TeamProject] = '{project}' \
AND ([System.Title] CONTAINS '{search_terms}' OR [System.Description] CONTAINS '{search_terms}') \
ORDER BY [System.ChangedDate] DESC"
)
}; };
let wiql_url = format!( // Use CONTAINS for text search (case-insensitive)
"{}/{}/_apis/wit/wiql?api-version=7.0", format!(
org_url.trim_end_matches('/'), "SELECT [System.Id], [System.Title], [System.Description], [System.WorkItemType] \
urlencoding::encode(project) FROM WorkItems WHERE [System.TeamProject] = '{project}' \
); AND ([System.Title] CONTAINS '{search_terms}' OR [System.Description] CONTAINS '{search_terms}') \
ORDER BY [System.ChangedDate] DESC"
)
};
let body = serde_json::json!({ let wiql_url = format!(
"query": wiql_query "{}/{}/_apis/wit/wiql?api-version=7.0",
}) org_url.trim_end_matches('/'),
.to_string(); urlencoding::encode(project)
);
tracing::info!("Executing Azure DevOps work item search via webview"); let body = serde_json::json!({
tracing::debug!("WIQL query: {}", wiql_query); "query": wiql_query
tracing::debug!("Request URL: {}", wiql_url); })
.to_string();
let wiql_response = tracing::info!("Executing Azure DevOps work item search via webview");
fetch_from_webview(webview_window, &wiql_url, "POST", Some(&body)).await?; tracing::debug!("WIQL query: {}", wiql_query);
tracing::debug!("Request URL: {}", wiql_url);
if let Some(work_items) = wiql_response.get("workItems").and_then(|v| v.as_array()) { let wiql_response = fetch_from_webview(webview_window, &wiql_url, "POST", Some(&body)).await?;
// Fetch details for first 5 work items
for item in work_items.iter().take(5) {
if let Some(id) = item.get("id").and_then(|i| i.as_i64()) {
let details_url = format!(
"{}/_apis/wit/workitems/{}?api-version=7.0",
org_url.trim_end_matches('/'),
id
);
if let Ok(details) = let mut results = Vec::new();
fetch_from_webview(webview_window, &details_url, "GET", None).await
{
if let Some(fields) = details.get("fields") {
let title = fields
.get("System.Title")
.and_then(|t| t.as_str())
.unwrap_or("Untitled");
let work_item_type = fields
.get("System.WorkItemType")
.and_then(|t| t.as_str())
.unwrap_or("Item");
let description = fields
.get("System.Description")
.and_then(|d| d.as_str())
.unwrap_or("");
let clean_description = strip_html_simple(description); if let Some(work_items) = wiql_response.get("workItems").and_then(|v| v.as_array()) {
let excerpt = clean_description.chars().take(200).collect(); // Fetch details for first 5 work items
for item in work_items.iter().take(5) {
if let Some(id) = item.get("id").and_then(|i| i.as_i64()) {
let details_url = format!(
"{}/_apis/wit/workitems/{}?api-version=7.0",
org_url.trim_end_matches('/'),
id
);
let url = if let Ok(details) =
format!("{}/_workitems/edit/{id}", org_url.trim_end_matches('/')); fetch_from_webview(webview_window, &details_url, "GET", None).await
{
if let Some(fields) = details.get("fields") {
let title = fields
.get("System.Title")
.and_then(|t| t.as_str())
.unwrap_or("Untitled");
let work_item_type = fields
.get("System.WorkItemType")
.and_then(|t| t.as_str())
.unwrap_or("Item");
let description = fields
.get("System.Description")
.and_then(|d| d.as_str())
.unwrap_or("");
let full_content = if clean_description.len() > 3000 { let clean_description = strip_html_simple(description);
format!("{}...", &clean_description[..3000]) let excerpt = clean_description.chars().take(200).collect();
} else {
clean_description.clone()
};
all_results.push(SearchResult { let url = format!("{}/_workitems/edit/{id}", org_url.trim_end_matches('/'));
title: format!("{work_item_type} #{id}: {title}"),
url, let full_content = if clean_description.len() > 3000 {
excerpt, format!("{}...", &clean_description[..3000])
content: Some(full_content), } else {
source: "Azure DevOps".to_string(), clean_description.clone()
}); };
}
results.push(SearchResult {
title: format!("{work_item_type} #{id}: {title}"),
url,
excerpt,
content: Some(full_content),
source: "Azure DevOps".to_string(),
});
} }
} }
} }
} }
} }
all_results.sort_by(|a, b| a.url.cmp(&b.url));
all_results.dedup_by(|a, b| a.url == b.url);
tracing::info!( tracing::info!(
"Azure DevOps work items webview search returned {} results", "Azure DevOps work items webview search returned {} results",
all_results.len() results.len()
); );
Ok(all_results) Ok(results)
} }
/// Add a comment to an Azure DevOps work item /// Add a comment to an Azure DevOps work item

View File

@ -69,7 +69,6 @@ pub fn run() {
commands::db::add_five_why, commands::db::add_five_why,
commands::db::update_five_why, commands::db::update_five_why,
commands::db::add_timeline_event, commands::db::add_timeline_event,
commands::db::get_timeline_events,
// Analysis / PII // Analysis / PII
commands::analysis::upload_log_file, commands::analysis::upload_log_file,
commands::analysis::upload_log_file_by_content, commands::analysis::upload_log_file_by_content,
@ -121,7 +120,6 @@ pub fn run() {
commands::system::get_settings, commands::system::get_settings,
commands::system::update_settings, commands::system::update_settings,
commands::system::get_audit_log, commands::system::get_audit_log,
commands::system::get_app_version,
]) ])
.run(tauri::generate_context!()) .run(tauri::generate_context!())
.expect("Error running Troubleshooting and RCA Assistant application"); .expect("Error running Troubleshooting and RCA Assistant application");

View File

@ -6,7 +6,7 @@
"frontendDist": "../dist", "frontendDist": "../dist",
"devUrl": "http://localhost:1420", "devUrl": "http://localhost:1420",
"beforeDevCommand": "npm run dev", "beforeDevCommand": "npm run dev",
"beforeBuildCommand": "npm run version:update && npm run build" "beforeBuildCommand": "npm run build"
}, },
"app": { "app": {
"security": { "security": {
@ -26,7 +26,7 @@
}, },
"bundle": { "bundle": {
"active": true, "active": true,
"targets": ["deb", "rpm", "nsis"], "targets": "all",
"icon": [ "icon": [
"icons/32x32.png", "icons/32x32.png",
"icons/128x128.png", "icons/128x128.png",
@ -42,6 +42,3 @@
"longDescription": "Structured AI-backed assistant for IT troubleshooting, 5-whys root cause analysis, and post-mortem documentation with offline Ollama support." "longDescription": "Structured AI-backed assistant for IT troubleshooting, 5-whys root cause analysis, and post-mortem documentation with offline Ollama support."
} }
} }

View File

@ -1,4 +1,5 @@
import React, { useState, useEffect } from "react"; import React, { useState, useEffect } from "react";
import { getVersion } from "@tauri-apps/api/app";
import { Routes, Route, NavLink, useLocation } from "react-router-dom"; import { Routes, Route, NavLink, useLocation } from "react-router-dom";
import { import {
Home, Home,
@ -14,7 +15,7 @@ import {
Moon, Moon,
} from "lucide-react"; } from "lucide-react";
import { useSettingsStore } from "@/stores/settingsStore"; import { useSettingsStore } from "@/stores/settingsStore";
import { getAppVersionCmd, loadAiProvidersCmd, testProviderConnectionCmd } from "@/lib/tauriCommands"; import { loadAiProvidersCmd, testProviderConnectionCmd } from "@/lib/tauriCommands";
import Dashboard from "@/pages/Dashboard"; import Dashboard from "@/pages/Dashboard";
import NewIssue from "@/pages/NewIssue"; import NewIssue from "@/pages/NewIssue";
@ -49,7 +50,7 @@ export default function App() {
void useLocation(); void useLocation();
useEffect(() => { useEffect(() => {
getAppVersionCmd().then(setAppVersion).catch(() => {}); getVersion().then(setAppVersion).catch(() => {});
}, []); }, []);
// Load providers and auto-test active provider on startup // Load providers and auto-test active provider on startup

View File

@ -331,58 +331,6 @@ When analyzing identity and access issues, focus on these key areas:
Always ask about the Keycloak version, realm configuration (external IdP vs local users vs LDAP), SSSD version and configured domains, and whether this is a first-time setup or a regression.`, Always ask about the Keycloak version, realm configuration (external IdP vs local users vs LDAP), SSSD version and configured domains, and whether this is a first-time setup or a regression.`,
}; };
export const INCIDENT_RESPONSE_FRAMEWORK = `
---
## INCIDENT RESPONSE METHODOLOGY
Follow this structured framework for every triage conversation. Each phase must be completed with evidence before advancing.
### Phase 1: Detection & Evidence Gathering
- **Do NOT propose fixes** until the problem is fully understood
- Gather: error messages, timestamps, affected systems, scope of impact, recent changes
- Ask: "What changed? When did it start? Who/what is affected? What has been tried?"
- Record all evidence with UTC timestamps
- Establish a clear problem statement before proceeding
### Phase 2: Diagnosis & Hypothesis Testing
- Apply the scientific method: form hypotheses, test them with evidence
- **The 3-Fix Rule**: If you cannot confidently identify the root cause after 3 hypotheses, STOP and reassess your assumptions you may be looking at the wrong system or the wrong layer
- Check the most common causes first (Occam's Razor): DNS, certificates, disk space, permissions, recent deployments
- Differentiate between symptoms and causes treat causes, not symptoms
- Use binary search to narrow scope: which component, which layer, which change
### Phase 3: Root Cause Analysis with 5-Whys
- Each "Why" must be backed by evidence, not speculation
- If you cannot provide evidence for a "Why", state what investigation is needed to confirm
- Look for systemic issues, not just proximate causes
- The root cause should explain ALL observed symptoms, not just some
- Common root cause categories: configuration drift, capacity exhaustion, dependency failure, race condition, human error in process
### Phase 4: Resolution & Prevention
- **Immediate fix**: What stops the bleeding right now? (rollback, restart, failover)
- **Permanent fix**: What prevents recurrence? (code fix, config change, automation)
- **Runbook update**: Document the fix for future oncall engineers
- Verify the fix resolves ALL symptoms, not just the primary one
- Monitor for regression after applying the fix
### Phase 5: Post-Incident Review
- Calculate incident metrics: MTTD (detect), MTTA (acknowledge), MTTR (resolve)
- Conduct blameless post-mortem focused on systems and processes
- Identify action items with owners and due dates
- Categories: monitoring gaps, process improvements, technical debt, training needs
- Ask: "What would have prevented this? What would have detected it faster? What would have resolved it faster?"
### Communication Practices
- State your current phase explicitly (e.g., "We are in Phase 2: Diagnosis")
- Summarize findings at each phase transition
- Flag assumptions clearly: "ASSUMPTION: ..." vs "CONFIRMED: ..."
- When advancing the Why level, explicitly state the evidence chain
`;
export function getDomainPrompt(domainId: string): string { export function getDomainPrompt(domainId: string): string {
const domainSpecific = domainPrompts[domainId] ?? ""; return domainPrompts[domainId] ?? "";
if (!domainSpecific) return "";
return domainSpecific + INCIDENT_RESPONSE_FRAMEWORK;
} }

View File

@ -74,11 +74,9 @@ export interface FiveWhyEntry {
export interface TimelineEvent { export interface TimelineEvent {
id: string; id: string;
issue_id: string;
event_type: string; event_type: string;
description: string; description: string;
metadata: string; created_at: number;
created_at: string;
} }
export interface AiConversation { export interface AiConversation {
@ -106,7 +104,6 @@ export interface IssueDetail {
image_attachments: ImageAttachment[]; image_attachments: ImageAttachment[];
resolution_steps: ResolutionStep[]; resolution_steps: ResolutionStep[];
conversations: AiConversation[]; conversations: AiConversation[];
timeline_events: TimelineEvent[];
} }
export interface IssueSummary { export interface IssueSummary {
@ -271,8 +268,8 @@ export interface TriageMessage {
export const analyzeLogsCmd = (issueId: string, logFileIds: string[], providerConfig: ProviderConfig) => export const analyzeLogsCmd = (issueId: string, logFileIds: string[], providerConfig: ProviderConfig) =>
invoke<AnalysisResult>("analyze_logs", { issueId, logFileIds, providerConfig }); invoke<AnalysisResult>("analyze_logs", { issueId, logFileIds, providerConfig });
export const chatMessageCmd = (issueId: string, message: string, providerConfig: ProviderConfig, systemPrompt?: string) => export const chatMessageCmd = (issueId: string, message: string, providerConfig: ProviderConfig) =>
invoke<ChatResponse>("chat_message", { issueId, message, providerConfig, systemPrompt: systemPrompt ?? null }); invoke<ChatResponse>("chat_message", { issueId, message, providerConfig });
export const listProvidersCmd = () => invoke<ProviderInfo[]>("list_providers"); export const listProvidersCmd = () => invoke<ProviderInfo[]>("list_providers");
@ -364,11 +361,8 @@ export const addFiveWhyCmd = (
export const updateFiveWhyCmd = (entryId: string, answer: string) => export const updateFiveWhyCmd = (entryId: string, answer: string) =>
invoke<void>("update_five_why", { entryId, answer }); invoke<void>("update_five_why", { entryId, answer });
export const addTimelineEventCmd = (issueId: string, eventType: string, description: string, metadata?: string) => export const addTimelineEventCmd = (issueId: string, eventType: string, description: string) =>
invoke<TimelineEvent>("add_timeline_event", { issueId, eventType, description, metadata: metadata ?? null }); invoke<TimelineEvent>("add_timeline_event", { issueId, eventType, description });
export const getTimelineEventsCmd = (issueId: string) =>
invoke<TimelineEvent[]>("get_timeline_events", { issueId });
// ─── Document commands ──────────────────────────────────────────────────────── // ─── Document commands ────────────────────────────────────────────────────────
@ -492,8 +486,3 @@ export const loadAiProvidersCmd = () =>
export const deleteAiProviderCmd = (name: string) => export const deleteAiProviderCmd = (name: string) =>
invoke<void>("delete_ai_provider", { name }); invoke<void>("delete_ai_provider", { name });
// ─── System / Version ─────────────────────────────────────────────────────────
export const getAppVersionCmd = () =>
invoke<string>("get_app_version");

View File

@ -5,7 +5,7 @@ import { DocEditor } from "@/components/DocEditor";
import { useSettingsStore } from "@/stores/settingsStore"; import { useSettingsStore } from "@/stores/settingsStore";
import { import {
generatePostmortemCmd, generatePostmortemCmd,
addTimelineEventCmd,
updateDocumentCmd, updateDocumentCmd,
exportDocumentCmd, exportDocumentCmd,
type Document_, type Document_,
@ -28,7 +28,6 @@ export default function Postmortem() {
const generated = await generatePostmortemCmd(id); const generated = await generatePostmortemCmd(id);
setDoc(generated); setDoc(generated);
setContent(generated.content_md); setContent(generated.content_md);
addTimelineEventCmd(id, "postmortem_generated", "Post-mortem document generated").catch(() => {});
} catch (err) { } catch (err) {
setError(String(err)); setError(String(err));
} finally { } finally {
@ -55,7 +54,6 @@ export default function Postmortem() {
try { try {
const path = await exportDocumentCmd(doc.id, doc.title, content, format, ""); const path = await exportDocumentCmd(doc.id, doc.title, content, format, "");
setError(`Document exported to: ${path}`); setError(`Document exported to: ${path}`);
addTimelineEventCmd(id!, "document_exported", `Post-mortem exported as ${format}`).catch(() => {});
setTimeout(() => setError(null), 5000); setTimeout(() => setError(null), 5000);
} catch (err) { } catch (err) {
setError(`Export failed: ${String(err)}`); setError(`Export failed: ${String(err)}`);

View File

@ -8,7 +8,6 @@ import {
generateRcaCmd, generateRcaCmd,
updateDocumentCmd, updateDocumentCmd,
exportDocumentCmd, exportDocumentCmd,
addTimelineEventCmd,
type Document_, type Document_,
} from "@/lib/tauriCommands"; } from "@/lib/tauriCommands";
@ -30,7 +29,6 @@ export default function RCA() {
const generated = await generateRcaCmd(id); const generated = await generateRcaCmd(id);
setDoc(generated); setDoc(generated);
setContent(generated.content_md); setContent(generated.content_md);
addTimelineEventCmd(id, "rca_generated", "RCA document generated").catch(() => {});
} catch (err) { } catch (err) {
setError(String(err)); setError(String(err));
} finally { } finally {
@ -57,7 +55,6 @@ export default function RCA() {
try { try {
const path = await exportDocumentCmd(doc.id, doc.title, content, format, ""); const path = await exportDocumentCmd(doc.id, doc.title, content, format, "");
setError(`Document exported to: ${path}`); setError(`Document exported to: ${path}`);
addTimelineEventCmd(id!, "document_exported", `RCA exported as ${format}`).catch(() => {});
setTimeout(() => setError(null), 5000); setTimeout(() => setError(null), 5000);
} catch (err) { } catch (err) {
setError(`Export failed: ${String(err)}`); setError(`Export failed: ${String(err)}`);

View File

@ -15,7 +15,6 @@ import {
updateIssueCmd, updateIssueCmd,
addFiveWhyCmd, addFiveWhyCmd,
} from "@/lib/tauriCommands"; } from "@/lib/tauriCommands";
import { getDomainPrompt } from "@/lib/domainPrompts";
import type { TriageMessage } from "@/lib/tauriCommands"; import type { TriageMessage } from "@/lib/tauriCommands";
const CLOSE_PATTERNS = [ const CLOSE_PATTERNS = [
@ -168,8 +167,7 @@ export default function Triage() {
setPendingFiles([]); setPendingFiles([]);
try { try {
const systemPrompt = currentIssue ? getDomainPrompt(currentIssue.category) : undefined; const response = await chatMessageCmd(id, aiMessage, provider);
const response = await chatMessageCmd(id, aiMessage, provider, systemPrompt);
const assistantMsg: TriageMessage = { const assistantMsg: TriageMessage = {
id: `asst-${Date.now()}`, id: `asst-${Date.now()}`,
issue_id: id, issue_id: id,

View File

@ -42,8 +42,11 @@ describe("Audit Log", () => {
it("displays audit entries", async () => { it("displays audit entries", async () => {
render(<Security />); render(<Security />);
// Wait for table to appear after async audit data loads // Wait for audit log to load
const table = await screen.findByRole("table"); await screen.findByText("Audit Log");
// Check that the table has rows (header + data rows)
const table = screen.getByRole("table");
expect(table).toBeInTheDocument(); expect(table).toBeInTheDocument();
const rows = screen.getAllByRole("row"); const rows = screen.getAllByRole("row");
@ -53,7 +56,9 @@ describe("Audit Log", () => {
it("provides way to view transmitted data details", async () => { it("provides way to view transmitted data details", async () => {
render(<Security />); render(<Security />);
// Wait for async data to load and render the table await screen.findByText("Audit Log");
// Should have View/Hide buttons for expanding details
const viewButtons = await screen.findAllByRole("button", { name: /View/i }); const viewButtons = await screen.findAllByRole("button", { name: /View/i });
expect(viewButtons.length).toBeGreaterThan(0); expect(viewButtons.length).toBeGreaterThan(0);
}); });
@ -61,13 +66,14 @@ describe("Audit Log", () => {
it("details column or button exists for viewing data", async () => { it("details column or button exists for viewing data", async () => {
render(<Security />); render(<Security />);
// Wait for async data to load and render the table await screen.findByText("Audit Log");
await screen.findByRole("table");
// The audit log should have a Details column header
const detailsHeader = screen.getByText("Details"); const detailsHeader = screen.getByText("Details");
expect(detailsHeader).toBeInTheDocument(); expect(detailsHeader).toBeInTheDocument();
const viewButtons = screen.getAllByRole("button", { name: /View/i }); // Should have view buttons
const viewButtons = await screen.findAllByRole("button", { name: /View/i });
expect(viewButtons.length).toBe(2); // One for each mock entry expect(viewButtons.length).toBe(2); // One for each mock entry
}); });
}); });

View File

@ -1,63 +0,0 @@
import { describe, it, expect } from "vitest";
import { getDomainPrompt, DOMAINS, INCIDENT_RESPONSE_FRAMEWORK } from "@/lib/domainPrompts";
describe("Domain Prompts with Incident Response Framework", () => {
it("exports INCIDENT_RESPONSE_FRAMEWORK constant", () => {
expect(INCIDENT_RESPONSE_FRAMEWORK).toBeDefined();
expect(typeof INCIDENT_RESPONSE_FRAMEWORK).toBe("string");
expect(INCIDENT_RESPONSE_FRAMEWORK.length).toBeGreaterThan(100);
});
it("framework contains all 5 phases", () => {
expect(INCIDENT_RESPONSE_FRAMEWORK).toContain("Phase 1: Detection & Evidence Gathering");
expect(INCIDENT_RESPONSE_FRAMEWORK).toContain("Phase 2: Diagnosis & Hypothesis Testing");
expect(INCIDENT_RESPONSE_FRAMEWORK).toContain("Phase 3: Root Cause Analysis with 5-Whys");
expect(INCIDENT_RESPONSE_FRAMEWORK).toContain("Phase 4: Resolution & Prevention");
expect(INCIDENT_RESPONSE_FRAMEWORK).toContain("Phase 5: Post-Incident Review");
});
it("framework contains the 3-Fix Rule", () => {
expect(INCIDENT_RESPONSE_FRAMEWORK).toContain("3-Fix Rule");
});
it("framework contains communication practices", () => {
expect(INCIDENT_RESPONSE_FRAMEWORK).toContain("Communication Practices");
});
it("all defined domains include incident response methodology", () => {
for (const domain of DOMAINS) {
const prompt = getDomainPrompt(domain.id);
if (prompt) {
expect(prompt).toContain("INCIDENT RESPONSE METHODOLOGY");
expect(prompt).toContain("Phase 1:");
expect(prompt).toContain("Phase 5:");
}
}
});
it("returns empty string for unknown domain", () => {
expect(getDomainPrompt("nonexistent_domain")).toBe("");
expect(getDomainPrompt("")).toBe("");
});
it("preserves existing Linux domain content", () => {
const prompt = getDomainPrompt("linux");
expect(prompt).toContain("senior Linux systems engineer");
expect(prompt).toContain("RHEL");
expect(prompt).toContain("INCIDENT RESPONSE METHODOLOGY");
});
it("preserves existing Kubernetes domain content", () => {
const prompt = getDomainPrompt("kubernetes");
expect(prompt).toContain("Kubernetes platform engineer");
expect(prompt).toContain("k3s");
expect(prompt).toContain("INCIDENT RESPONSE METHODOLOGY");
});
it("preserves existing Network domain content", () => {
const prompt = getDomainPrompt("network");
expect(prompt).toContain("network engineer");
expect(prompt).toContain("Fortigate");
expect(prompt).toContain("INCIDENT RESPONSE METHODOLOGY");
});
});

View File

@ -35,7 +35,6 @@ const mockIssueDetail = {
}, },
], ],
conversations: [], conversations: [],
timeline_events: [],
}; };
describe("Resolution Page", () => { describe("Resolution Page", () => {

View File

@ -1,54 +0,0 @@
import { describe, it, expect, vi, beforeEach } from "vitest";
import { invoke } from "@tauri-apps/api/core";
const mockInvoke = vi.mocked(invoke);
describe("Timeline Event Commands", () => {
beforeEach(() => {
mockInvoke.mockReset();
});
it("addTimelineEventCmd calls invoke with correct params", async () => {
const mockEvent = {
id: "te-1",
issue_id: "issue-1",
event_type: "triage_started",
description: "Started",
metadata: "{}",
created_at: "2025-01-15 10:00:00 UTC",
};
mockInvoke.mockResolvedValueOnce(mockEvent as never);
const { addTimelineEventCmd } = await import("@/lib/tauriCommands");
const result = await addTimelineEventCmd("issue-1", "triage_started", "Started");
expect(mockInvoke).toHaveBeenCalledWith("add_timeline_event", {
issueId: "issue-1",
eventType: "triage_started",
description: "Started",
metadata: null,
});
expect(result).toEqual(mockEvent);
});
it("addTimelineEventCmd passes metadata when provided", async () => {
mockInvoke.mockResolvedValueOnce({} as never);
const { addTimelineEventCmd } = await import("@/lib/tauriCommands");
await addTimelineEventCmd("issue-1", "log_uploaded", "File uploaded", '{"file":"app.log"}');
expect(mockInvoke).toHaveBeenCalledWith("add_timeline_event", {
issueId: "issue-1",
eventType: "log_uploaded",
description: "File uploaded",
metadata: '{"file":"app.log"}',
});
});
it("getTimelineEventsCmd calls invoke with correct params", async () => {
mockInvoke.mockResolvedValueOnce([] as never);
const { getTimelineEventsCmd } = await import("@/lib/tauriCommands");
const result = await getTimelineEventsCmd("issue-1");
expect(mockInvoke).toHaveBeenCalledWith("get_timeline_events", { issueId: "issue-1" });
expect(result).toEqual([]);
});
});