Compare commits

..

No commits in common. "master" and "v0.2.45" have entirely different histories.

65 changed files with 845 additions and 7435 deletions

View File

@ -1,14 +1,11 @@
# Pre-baked builder for Linux amd64 Tauri releases. # Pre-baked builder for Linux amd64 Tauri releases.
# All system dependencies are installed once here; CI jobs skip apt-get entirely. # All system dependencies are installed once here; CI jobs skip apt-get entirely.
# Rebuild when: Rust toolchain version changes, webkit2gtk/gtk major version changes, # Rebuild when: Rust toolchain version changes, webkit2gtk/gtk major version changes,
# Node.js major version changes, OpenSSL major version changes (used via OPENSSL_STATIC=1), # or Node.js major version changes. Tag format: rust<VER>-node<VER>
# or Tauri CLI version changes that affect bundler system deps.
# Tag format: rust<VER>-node<VER>
FROM rust:1.88-slim FROM rust:1.88-slim
RUN apt-get update -qq \ RUN apt-get update -qq \
&& apt-get install -y -qq --no-install-recommends \ && apt-get install -y -qq --no-install-recommends \
ca-certificates \
libwebkit2gtk-4.1-dev \ libwebkit2gtk-4.1-dev \
libssl-dev \ libssl-dev \
libgtk-3-dev \ libgtk-3-dev \
@ -24,5 +21,4 @@ RUN apt-get update -qq \
&& apt-get install -y --no-install-recommends nodejs \ && apt-get install -y --no-install-recommends nodejs \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
RUN rustup target add x86_64-unknown-linux-gnu \ RUN rustup target add x86_64-unknown-linux-gnu
&& rustup component add rustfmt clippy

View File

@ -1,9 +1,7 @@
# Pre-baked cross-compiler for Linux arm64 Tauri releases (runs on Linux amd64). # Pre-baked cross-compiler for Linux arm64 Tauri releases (runs on Linux amd64).
# Bakes in: amd64 cross-toolchain, arm64 multiarch dev libs, Node.js, and Rust. # Bakes in: amd64 cross-toolchain, arm64 multiarch dev libs, Node.js, and Rust.
# This image takes ~15 min to build but is only rebuilt when deps change. # This image takes ~15 min to build but is only rebuilt when deps change.
# Rebuild when: Rust toolchain version, webkit2gtk/gtk major version, Node.js major version, # Rebuild when: Rust toolchain version, webkit2gtk/gtk major version, or Node.js changes.
# OpenSSL major version (used via OPENSSL_STATIC=1), or Tauri CLI changes that affect
# bundler system deps.
# Tag format: rust<VER>-node<VER> # Tag format: rust<VER>-node<VER>
FROM ubuntu:22.04 FROM ubuntu:22.04
@ -12,7 +10,7 @@ ARG DEBIAN_FRONTEND=noninteractive
# Step 1: amd64 host tools and cross-compiler # Step 1: amd64 host tools and cross-compiler
RUN apt-get update -qq \ RUN apt-get update -qq \
&& apt-get install -y -qq --no-install-recommends \ && apt-get install -y -qq --no-install-recommends \
ca-certificates curl git gcc g++ make patchelf pkg-config perl jq \ curl git gcc g++ make patchelf pkg-config perl jq \
gcc-aarch64-linux-gnu g++-aarch64-linux-gnu \ gcc-aarch64-linux-gnu g++-aarch64-linux-gnu \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
@ -42,7 +40,6 @@ RUN curl -fsSL https://deb.nodesource.com/setup_22.x | bash - \
# Step 4: Rust 1.88 with arm64 cross-compilation target # Step 4: Rust 1.88 with arm64 cross-compilation target
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y \ RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y \
--default-toolchain 1.88.0 --profile minimal --no-modify-path \ --default-toolchain 1.88.0 --profile minimal --no-modify-path \
&& /root/.cargo/bin/rustup target add aarch64-unknown-linux-gnu \ && /root/.cargo/bin/rustup target add aarch64-unknown-linux-gnu
&& /root/.cargo/bin/rustup component add rustfmt clippy
ENV PATH="/root/.cargo/bin:${PATH}" ENV PATH="/root/.cargo/bin:${PATH}"

View File

@ -1,14 +1,11 @@
# Pre-baked cross-compiler for Windows amd64 Tauri releases (runs on Linux amd64). # Pre-baked cross-compiler for Windows amd64 Tauri releases (runs on Linux amd64).
# All MinGW and Node.js dependencies are installed once here; CI jobs skip apt-get entirely. # All MinGW and Node.js dependencies are installed once here; CI jobs skip apt-get entirely.
# Rebuild when: Rust toolchain version changes, Node.js major version changes, # Rebuild when: Rust toolchain version changes or Node.js major version changes.
# OpenSSL major version changes (used via OPENSSL_STATIC=1), or Tauri CLI changes
# that affect bundler system deps.
# Tag format: rust<VER>-node<VER> # Tag format: rust<VER>-node<VER>
FROM rust:1.88-slim FROM rust:1.88-slim
RUN apt-get update -qq \ RUN apt-get update -qq \
&& apt-get install -y -qq --no-install-recommends \ && apt-get install -y -qq --no-install-recommends \
ca-certificates \
mingw-w64 \ mingw-w64 \
curl \ curl \
nsis \ nsis \

View File

@ -1,26 +0,0 @@
{
"extends": ["eslint:recommended", "plugin:@typescript-eslint/recommended", "plugin:react/recommended", "plugin:react-hooks/recommended"],
"parser": "@typescript-eslint/parser",
"parserOptions": {
"ecmaFeatures": {
"jsx": true
},
"ecmaVersion": "latest",
"sourceType": "module",
"project": ["./tsconfig.json"]
},
"plugins": ["@typescript-eslint", "react", "react-hooks"],
"settings": {
"react": {
"version": "detect"
}
},
"rules": {
"no-unused-vars": "off",
"@typescript-eslint/no-unused-vars": ["error", { "argsIgnorePattern": "^_" }],
"no-console": ["warn", { "allow": ["warn", "error"] }],
"react/react-in-jsx-scope": "off",
"react/prop-types": "off"
},
"ignorePatterns": ["dist/", "node_modules/", "src-tauri/", "target/", "coverage/"]
}

View File

@ -65,138 +65,6 @@ jobs:
echo "Tag $NEXT pushed successfully" echo "Tag $NEXT pushed successfully"
changelog:
needs: autotag
runs-on: linux-amd64
container:
image: alpine:latest
steps:
- name: Install dependencies
run: |
set -eu
apk add --no-cache git curl jq
- name: Checkout (full history + all tags)
env:
RELEASE_TOKEN: ${{ secrets.RELEASE_TOKEN }}
run: |
set -eu
git init
git remote add origin \
"http://oauth2:${RELEASE_TOKEN}@172.0.0.29:3000/${GITHUB_REPOSITORY}.git"
git fetch --tags --depth=2147483647 origin
git checkout FETCH_HEAD
git config user.name "gitea-actions[bot]"
git config user.email "gitea-actions@local"
- name: Install git-cliff
run: |
set -eu
CLIFF_VER="2.7.0"
curl -fsSL \
"https://github.com/orhun/git-cliff/releases/download/v${CLIFF_VER}/git-cliff-${CLIFF_VER}-x86_64-unknown-linux-musl.tar.gz" \
| tar -xz --strip-components=1 -C /usr/local/bin \
"git-cliff-${CLIFF_VER}/git-cliff"
- name: Generate changelog
run: |
set -eu
git-cliff --config cliff.toml --output CHANGELOG.md
git-cliff --config cliff.toml --latest --strip all > /tmp/release_body.md
echo "=== Release body preview ==="
cat /tmp/release_body.md
- name: Update Gitea release body
env:
RELEASE_TOKEN: ${{ secrets.RELEASE_TOKEN }}
run: |
set -eu
API="http://172.0.0.29:3000/api/v1/repos/$GITHUB_REPOSITORY"
TAG=$(git describe --tags --abbrev=0)
# Create release if it doesn't exist yet (build jobs may still be running)
curl -sf -X POST "$API/releases" \
-H "Authorization: token $RELEASE_TOKEN" \
-H "Content-Type: application/json" \
-d "{\"tag_name\":\"$TAG\",\"name\":\"TFTSR $TAG\",\"body\":\"Release $TAG\",\"draft\":false}" || true
RELEASE_ID=$(curl -sf "$API/releases/tags/$TAG" \
-H "Authorization: token $RELEASE_TOKEN" | jq -r '.id')
if [ -z "$RELEASE_ID" ] || [ "$RELEASE_ID" = "null" ]; then
echo "ERROR: Failed to get release ID for $TAG"
exit 1
fi
curl -sf -X PATCH "$API/releases/$RELEASE_ID" \
-H "Authorization: token $RELEASE_TOKEN" \
-H "Content-Type: application/json" \
--data-binary "{\"body\":$(jq -Rs . < /tmp/release_body.md)}"
echo "✓ Release body updated"
- name: Commit CHANGELOG.md to master
env:
RELEASE_TOKEN: ${{ secrets.RELEASE_TOKEN }}
run: |
set -euo pipefail
API="http://172.0.0.29:3000/api/v1/repos/$GITHUB_REPOSITORY"
TAG=$(git describe --tags --abbrev=0)
# Validate tag format to prevent shell injection in commit message / JSON
if ! echo "$TAG" | grep -qE '^v[0-9]+\.[0-9]+\.[0-9]+$'; then
echo "ERROR: Unexpected tag format: $TAG"
exit 1
fi
# Fetch current blob SHA from master; empty if file doesn't exist yet
CURRENT_SHA=$(curl -sf \
-H "Accept: application/json" \
-H "Authorization: token $RELEASE_TOKEN" \
"$API/contents/CHANGELOG.md?ref=master" 2>/dev/null \
| jq -r '.sha // empty' 2>/dev/null || true)
# Base64-encode content (no line wrapping)
CONTENT=$(base64 -w 0 CHANGELOG.md)
# Build JSON payload — omit "sha" when file doesn't exist yet (new repo)
PAYLOAD=$(jq -n \
--arg msg "chore: update CHANGELOG.md for ${TAG} [skip ci]" \
--arg body "$CONTENT" \
--arg sha "$CURRENT_SHA" \
'if $sha == ""
then {message: $msg, content: $body, branch: "master"}
else {message: $msg, content: $body, sha: $sha, branch: "master"}
end')
# PUT atomically updates (or creates) the file on master — no fast-forward needed
RESP_FILE=$(mktemp)
HTTP_CODE=$(curl -s -o "$RESP_FILE" -w "%{http_code}" -X PUT \
-H "Authorization: token $RELEASE_TOKEN" \
-H "Content-Type: application/json" \
-d "$PAYLOAD" \
"$API/contents/CHANGELOG.md")
if [ "$HTTP_CODE" -lt 200 ] || [ "$HTTP_CODE" -ge 300 ]; then
echo "ERROR: Failed to update CHANGELOG.md (HTTP $HTTP_CODE)"
cat "$RESP_FILE" >&2
exit 1
fi
echo "✓ CHANGELOG.md committed to master"
- name: Upload CHANGELOG.md as release asset
env:
RELEASE_TOKEN: ${{ secrets.RELEASE_TOKEN }}
run: |
set -eu
API="http://172.0.0.29:3000/api/v1/repos/$GITHUB_REPOSITORY"
TAG=$(git describe --tags --abbrev=0)
RELEASE_ID=$(curl -sf "$API/releases/tags/$TAG" \
-H "Authorization: token $RELEASE_TOKEN" | jq -r '.id')
if [ -z "$RELEASE_ID" ] || [ "$RELEASE_ID" = "null" ]; then
echo "ERROR: Failed to get release ID for $TAG"
exit 1
fi
EXISTING=$(curl -sf "$API/releases/$RELEASE_ID" \
-H "Authorization: token $RELEASE_TOKEN" \
| jq -r '.assets[]? | select(.name=="CHANGELOG.md") | .id')
[ -n "$EXISTING" ] && curl -sf -X DELETE \
"$API/releases/$RELEASE_ID/assets/$EXISTING" \
-H "Authorization: token $RELEASE_TOKEN"
curl -sf -X POST "$API/releases/$RELEASE_ID/assets" \
-H "Authorization: token $RELEASE_TOKEN" \
-F "attachment=@CHANGELOG.md;filename=CHANGELOG.md"
echo "✓ CHANGELOG.md uploaded"
wiki-sync: wiki-sync:
runs-on: linux-amd64 runs-on: linux-amd64
container: container:
@ -264,36 +132,27 @@ jobs:
needs: autotag needs: autotag
runs-on: linux-amd64 runs-on: linux-amd64
container: container:
image: 172.0.0.29:3000/sarman/trcaa-linux-amd64:rust1.88-node22 image: rust:1.88-slim
steps: steps:
- name: Checkout - name: Checkout
run: | run: |
apt-get update -qq && apt-get install -y -qq git
git init git init
git remote add origin http://172.0.0.29:3000/sarman/tftsr-devops_investigation.git git remote add origin http://172.0.0.29:3000/sarman/tftsr-devops_investigation.git
git fetch --depth=1 origin "$GITHUB_SHA" git fetch --depth=1 origin "$GITHUB_SHA"
git checkout FETCH_HEAD git checkout FETCH_HEAD
- name: Cache cargo registry - name: Install dependencies
uses: actions/cache@v4 run: |
with: apt-get update -qq && apt-get install -y -qq \
path: | libwebkit2gtk-4.1-dev libssl-dev libgtk-3-dev \
~/.cargo/registry/index libayatana-appindicator3-dev librsvg2-dev patchelf \
~/.cargo/registry/cache pkg-config curl perl jq
~/.cargo/git/db curl -fsSL https://deb.nodesource.com/setup_22.x | bash -
key: ${{ runner.os }}-cargo-linux-amd64-${{ hashFiles('**/Cargo.lock') }} apt-get install -y nodejs
restore-keys: |
${{ runner.os }}-cargo-linux-amd64-
- name: Cache npm
uses: actions/cache@v4
with:
path: ~/.npm
key: ${{ runner.os }}-npm-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-npm-
- name: Build - name: Build
env:
APPIMAGE_EXTRACT_AND_RUN: "1"
run: | run: |
npm ci --legacy-peer-deps npm ci --legacy-peer-deps
rustup target add x86_64-unknown-linux-gnu
CI=true npx tauri build --target x86_64-unknown-linux-gnu CI=true npx tauri build --target x86_64-unknown-linux-gnu
- name: Upload artifacts - name: Upload artifacts
env: env:
@ -322,7 +181,7 @@ jobs:
fi fi
echo "Release ID: $RELEASE_ID" echo "Release ID: $RELEASE_ID"
ARTIFACTS=$(find src-tauri/target/x86_64-unknown-linux-gnu/release/bundle -type f \ ARTIFACTS=$(find src-tauri/target/x86_64-unknown-linux-gnu/release/bundle -type f \
\( -name "*.deb" -o -name "*.rpm" \)) \( -name "*.deb" -o -name "*.rpm" -o -name "*.AppImage" \))
if [ -z "$ARTIFACTS" ]; then if [ -z "$ARTIFACTS" ]; then
echo "ERROR: No Linux amd64 artifacts were found to upload." echo "ERROR: No Linux amd64 artifacts were found to upload."
exit 1 exit 1
@ -359,31 +218,20 @@ jobs:
needs: autotag needs: autotag
runs-on: linux-amd64 runs-on: linux-amd64
container: container:
image: 172.0.0.29:3000/sarman/trcaa-windows-cross:rust1.88-node22 image: rust:1.88-slim
steps: steps:
- name: Checkout - name: Checkout
run: | run: |
apt-get update -qq && apt-get install -y -qq git
git init git init
git remote add origin http://172.0.0.29:3000/sarman/tftsr-devops_investigation.git git remote add origin http://172.0.0.29:3000/sarman/tftsr-devops_investigation.git
git fetch --depth=1 origin "$GITHUB_SHA" git fetch --depth=1 origin "$GITHUB_SHA"
git checkout FETCH_HEAD git checkout FETCH_HEAD
- name: Cache cargo registry - name: Install dependencies
uses: actions/cache@v4 run: |
with: apt-get update -qq && apt-get install -y -qq mingw-w64 curl nsis perl make jq
path: | curl -fsSL https://deb.nodesource.com/setup_22.x | bash -
~/.cargo/registry/index apt-get install -y nodejs
~/.cargo/registry/cache
~/.cargo/git/db
key: ${{ runner.os }}-cargo-windows-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-cargo-windows-
- name: Cache npm
uses: actions/cache@v4
with:
path: ~/.npm
key: ${{ runner.os }}-npm-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-npm-
- name: Build - name: Build
env: env:
CC_x86_64_pc_windows_gnu: x86_64-w64-mingw32-gcc CC_x86_64_pc_windows_gnu: x86_64-w64-mingw32-gcc
@ -394,6 +242,7 @@ jobs:
OPENSSL_STATIC: "1" OPENSSL_STATIC: "1"
run: | run: |
npm ci --legacy-peer-deps npm ci --legacy-peer-deps
rustup target add x86_64-pc-windows-gnu
CI=true npx tauri build --target x86_64-pc-windows-gnu CI=true npx tauri build --target x86_64-pc-windows-gnu
- name: Upload artifacts - name: Upload artifacts
env: env:
@ -543,31 +392,53 @@ jobs:
needs: autotag needs: autotag
runs-on: linux-amd64 runs-on: linux-amd64
container: container:
image: 172.0.0.29:3000/sarman/trcaa-linux-arm64:rust1.88-node22 image: ubuntu:22.04
steps: steps:
- name: Checkout - name: Checkout
run: | run: |
apt-get update -qq && apt-get install -y -qq git
git init git init
git remote add origin http://172.0.0.29:3000/sarman/tftsr-devops_investigation.git git remote add origin http://172.0.0.29:3000/sarman/tftsr-devops_investigation.git
git fetch --depth=1 origin "$GITHUB_SHA" git fetch --depth=1 origin "$GITHUB_SHA"
git checkout FETCH_HEAD git checkout FETCH_HEAD
- name: Cache cargo registry - name: Install dependencies
uses: actions/cache@v4 env:
with: DEBIAN_FRONTEND: noninteractive
path: | run: |
~/.cargo/registry/index # Step 1: Host tools + cross-compiler (all amd64, no multiarch yet)
~/.cargo/registry/cache apt-get update -qq
~/.cargo/git/db apt-get install -y -qq curl git gcc g++ make patchelf pkg-config perl jq \
key: ${{ runner.os }}-cargo-arm64-${{ hashFiles('**/Cargo.lock') }} gcc-aarch64-linux-gnu g++-aarch64-linux-gnu
restore-keys: |
${{ runner.os }}-cargo-arm64- # Step 2: Multiarch — Ubuntu uses ports.ubuntu.com for arm64,
- name: Cache npm # keeping it on a separate mirror from amd64 (archive.ubuntu.com).
uses: actions/cache@v4 # This avoids the binary-all index duplication and -dev package
with: # conflicts that plagued the Debian single-mirror approach.
path: ~/.npm dpkg --add-architecture arm64
key: ${{ runner.os }}-npm-${{ hashFiles('**/package-lock.json') }} sed -i 's|^deb http://archive.ubuntu.com|deb [arch=amd64] http://archive.ubuntu.com|g' /etc/apt/sources.list
restore-keys: | sed -i 's|^deb http://security.ubuntu.com|deb [arch=amd64] http://security.ubuntu.com|g' /etc/apt/sources.list
${{ runner.os }}-npm- printf '%s\n' \
'deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports jammy main restricted universe multiverse' \
'deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports jammy-updates main restricted universe multiverse' \
'deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports jammy-security main restricted universe multiverse' \
> /etc/apt/sources.list.d/arm64-ports.list
apt-get update -qq
# Step 3: ARM64 dev libs — libayatana omitted (no tray icon in this app)
apt-get install -y -qq \
libwebkit2gtk-4.1-dev:arm64 \
libssl-dev:arm64 \
libgtk-3-dev:arm64 \
librsvg2-dev:arm64
# Step 4: Node.js
curl -fsSL https://deb.nodesource.com/setup_22.x | bash -
apt-get install -y nodejs
# Step 5: Rust (not pre-installed in ubuntu:22.04)
# source "$HOME/.cargo/env" in the Build step handles PATH — no GITHUB_PATH needed
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y \
--default-toolchain 1.88.0 --profile minimal --no-modify-path
- name: Build - name: Build
env: env:
CC_aarch64_unknown_linux_gnu: aarch64-linux-gnu-gcc CC_aarch64_unknown_linux_gnu: aarch64-linux-gnu-gcc
@ -581,7 +452,9 @@ jobs:
OPENSSL_STATIC: "1" OPENSSL_STATIC: "1"
APPIMAGE_EXTRACT_AND_RUN: "1" APPIMAGE_EXTRACT_AND_RUN: "1"
run: | run: |
. "$HOME/.cargo/env"
npm ci --legacy-peer-deps npm ci --legacy-peer-deps
rustup target add aarch64-unknown-linux-gnu
CI=true npx tauri build --target aarch64-unknown-linux-gnu --bundles deb,rpm CI=true npx tauri build --target aarch64-unknown-linux-gnu --bundles deb,rpm
- name: Upload artifacts - name: Upload artifacts
env: env:

View File

@ -37,11 +37,11 @@ jobs:
linux-amd64: linux-amd64:
runs-on: linux-amd64 runs-on: linux-amd64
container: container:
image: alpine:latest image: docker:24-cli
steps: steps:
- name: Checkout - name: Checkout
run: | run: |
apk add --no-cache git docker-cli apk add --no-cache git
git init git init
git remote add origin http://172.0.0.29:3000/sarman/tftsr-devops_investigation.git git remote add origin http://172.0.0.29:3000/sarman/tftsr-devops_investigation.git
git fetch --depth=1 origin "$GITHUB_SHA" git fetch --depth=1 origin "$GITHUB_SHA"
@ -60,11 +60,11 @@ jobs:
windows-cross: windows-cross:
runs-on: linux-amd64 runs-on: linux-amd64
container: container:
image: alpine:latest image: docker:24-cli
steps: steps:
- name: Checkout - name: Checkout
run: | run: |
apk add --no-cache git docker-cli apk add --no-cache git
git init git init
git remote add origin http://172.0.0.29:3000/sarman/tftsr-devops_investigation.git git remote add origin http://172.0.0.29:3000/sarman/tftsr-devops_investigation.git
git fetch --depth=1 origin "$GITHUB_SHA" git fetch --depth=1 origin "$GITHUB_SHA"
@ -83,11 +83,11 @@ jobs:
linux-arm64: linux-arm64:
runs-on: linux-amd64 runs-on: linux-amd64
container: container:
image: alpine:latest image: docker:24-cli
steps: steps:
- name: Checkout - name: Checkout
run: | run: |
apk add --no-cache git docker-cli apk add --no-cache git
git init git init
git remote add origin http://172.0.0.29:3000/sarman/tftsr-devops_investigation.git git remote add origin http://172.0.0.29:3000/sarman/tftsr-devops_investigation.git
git fetch --depth=1 origin "$GITHUB_SHA" git fetch --depth=1 origin "$GITHUB_SHA"

View File

@ -1,134 +0,0 @@
name: PR Review Automation
on:
pull_request:
types: [opened, synchronize, reopened, edited]
concurrency:
group: pr-review-${{ github.event.pull_request.number }}
cancel-in-progress: true
jobs:
review:
runs-on: ubuntu-latest
permissions:
pull-requests: write
container:
image: ubuntu:22.04
options: --dns 8.8.8.8 --dns 1.1.1.1
steps:
- name: Install dependencies
shell: bash
run: |
set -euo pipefail
apt-get update -qq && apt-get install -y -qq git curl jq
- name: Checkout code
shell: bash
env:
REPOSITORY: ${{ github.repository }}
run: |
set -euo pipefail
git init
git remote add origin "https://gogs.tftsr.com/${REPOSITORY}.git"
git fetch --depth=1 origin ${{ github.head_ref }}
git checkout FETCH_HEAD
- name: Get PR diff
id: diff
shell: bash
run: |
set -euo pipefail
git fetch origin ${{ github.base_ref }}
git diff origin/${{ github.base_ref }}..HEAD > /tmp/pr_diff.txt
echo "diff_size=$(wc -l < /tmp/pr_diff.txt | tr -d ' ')" >> $GITHUB_OUTPUT
- name: Analyze with LLM
id: analyze
if: steps.diff.outputs.diff_size != '0'
shell: bash
env:
LITELLM_URL: http://172.0.0.29:11434/v1
LITELLM_API_KEY: ${{ secrets.OLLAMA_API_KEY }}
PR_TITLE: ${{ github.event.pull_request.title }}
PR_NUMBER: ${{ github.event.pull_request.number }}
run: |
set -euo pipefail
if grep -q "^Binary files" /tmp/pr_diff.txt; then
echo "WARNING: Binary file changes detected — they will be excluded from analysis"
fi
DIFF_CONTENT=$(head -n 500 /tmp/pr_diff.txt \
| grep -v -E '^[+-].*(password[[:space:]]*[=:"'"'"']|token[[:space:]]*[=:"'"'"']|secret[[:space:]]*[=:"'"'"']|api_key[[:space:]]*[=:"'"'"']|private_key[[:space:]]*[=:"'"'"']|Authorization:[[:space:]]|AKIA[A-Z0-9]{16}|xox[baprs]-[0-9]{10,13}-[0-9]{10,13}-[a-zA-Z0-9]{24}|gh[opsu]_[A-Za-z0-9_]{36,}|https?://[^@[:space:]]+:[^@[:space:]]+@)' \
| grep -v -E '^[+-].*[A-Za-z0-9+/]{40,}={0,2}([^A-Za-z0-9+/=]|$)')
PROMPT="Analyze the following code changes for correctness, security issues, and best practices. PR Title: ${PR_TITLE}\n\nDiff:\n${DIFF_CONTENT}\n\nProvide a review with: 1) Summary, 2) Bugs/errors, 3) Security issues, 4) Best practices. Give specific comments with suggested fixes."
BODY=$(jq -cn \
--arg model "qwen2.5-72b" \
--arg content "$PROMPT" \
'{model: $model, messages: [{role: "user", content: $content}], stream: false}')
echo "[$(date -u +%Y-%m-%dT%H:%M:%SZ)] PR #${PR_NUMBER} - Calling liteLLM API (${#BODY} bytes)..."
HTTP_CODE=$(curl -s --max-time 300 --connect-timeout 30 \
--retry 3 --retry-delay 10 --retry-connrefused --retry-max-time 300 \
-o /tmp/llm_response.json -w "%{http_code}" \
-X POST "$LITELLM_URL/chat/completions" \
-H "Authorization: Bearer $LITELLM_API_KEY" \
-H "Content-Type: application/json" \
-d "$BODY")
echo "HTTP status: $HTTP_CODE"
echo "Response file size: $(wc -c < /tmp/llm_response.json) bytes"
if [ "$HTTP_CODE" != "200" ]; then
echo "ERROR: liteLLM returned HTTP $HTTP_CODE"
cat /tmp/llm_response.json
exit 1
fi
if ! jq empty /tmp/llm_response.json 2>/dev/null; then
echo "ERROR: Invalid JSON response from liteLLM"
cat /tmp/llm_response.json
exit 1
fi
REVIEW=$(jq -r '.choices[0].message.content // empty' /tmp/llm_response.json)
if [ -z "$REVIEW" ]; then
echo "ERROR: No content in liteLLM response"
exit 1
fi
echo "Review length: ${#REVIEW} chars"
echo "$REVIEW" > /tmp/pr_review.txt
- name: Post review comment
if: always() && steps.diff.outputs.diff_size != '0'
shell: bash
env:
TF_TOKEN: ${{ secrets.TFT_GITEA_TOKEN }}
PR_NUMBER: ${{ github.event.pull_request.number }}
REPOSITORY: ${{ github.repository }}
run: |
set -euo pipefail
if [ -z "${TF_TOKEN:-}" ]; then
echo "ERROR: TFT_GITEA_TOKEN secret is not set"
exit 1
fi
if [ -f "/tmp/pr_review.txt" ] && [ -s "/tmp/pr_review.txt" ]; then
REVIEW_BODY=$(head -c 65536 /tmp/pr_review.txt)
BODY=$(jq -n \
--arg body "Automated PR Review (qwen2.5-72b via liteLLM):\n\n${REVIEW_BODY}\n\n---\n*automated code review*" \
'{body: $body, event: "COMMENT"}')
else
BODY=$(jq -n \
'{body: "Automated PR Review could not be completed - LLM analysis failed or produced no output.", event: "COMMENT"}')
fi
HTTP_CODE=$(curl -s --max-time 30 --connect-timeout 10 \
-o /tmp/review_post_response.json -w "%{http_code}" \
-X POST "https://gogs.tftsr.com/api/v1/repos/${REPOSITORY}/pulls/${PR_NUMBER}/reviews" \
-H "Authorization: Bearer $TF_TOKEN" \
-H "Content-Type: application/json" \
-d "$BODY")
echo "[$(date -u +%Y-%m-%dT%H:%M:%SZ)] Post review HTTP status: $HTTP_CODE"
if [ "$HTTP_CODE" != "200" ] && [ "$HTTP_CODE" != "201" ]; then
echo "ERROR: Failed to post review (HTTP $HTTP_CODE)"
cat /tmp/review_post_response.json
exit 1
fi
- name: Cleanup
if: always()
shell: bash
run: rm -f /tmp/pr_diff.txt /tmp/llm_response.json /tmp/pr_review.txt /tmp/review_post_response.json

View File

@ -1,20 +1,18 @@
name: Test name: Test
on: on:
push:
branches:
- master
pull_request: pull_request:
jobs: jobs:
rust-fmt-check: rust-fmt-check:
runs-on: ubuntu-latest runs-on: ubuntu-latest
container: container:
image: 172.0.0.29:3000/sarman/trcaa-linux-amd64:rust1.88-node22 image: rust:1.88-slim
steps: steps:
- name: Checkout - name: Checkout
run: | run: |
set -eux set -eux
apt-get update -qq && apt-get install -y -qq git
git init git init
git remote add origin http://172.0.0.29:3000/sarman/tftsr-devops_investigation.git git remote add origin http://172.0.0.29:3000/sarman/tftsr-devops_investigation.git
if [ -n "${GITHUB_SHA:-}" ] && git fetch --depth=1 origin "$GITHUB_SHA"; then if [ -n "${GITHUB_SHA:-}" ] && git fetch --depth=1 origin "$GITHUB_SHA"; then
@ -30,31 +28,18 @@ jobs:
echo "Fetched fallback ref: master" echo "Fetched fallback ref: master"
fi fi
git checkout FETCH_HEAD git checkout FETCH_HEAD
- name: Cache cargo registry - run: rustup component add rustfmt
uses: actions/cache@v4
with:
path: |
~/.cargo/registry/index
~/.cargo/registry/cache
~/.cargo/git/db
key: ${{ runner.os }}-cargo-linux-amd64-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-cargo-linux-amd64-
- name: Install dependencies
run: npm install --legacy-peer-deps
- name: Update version from Git
run: node scripts/update-version.mjs
- run: cargo generate-lockfile --manifest-path src-tauri/Cargo.toml
- run: cargo fmt --manifest-path src-tauri/Cargo.toml --check - run: cargo fmt --manifest-path src-tauri/Cargo.toml --check
rust-clippy: rust-clippy:
runs-on: ubuntu-latest runs-on: ubuntu-latest
container: container:
image: 172.0.0.29:3000/sarman/trcaa-linux-amd64:rust1.88-node22 image: rust:1.88-slim
steps: steps:
- name: Checkout - name: Checkout
run: | run: |
set -eux set -eux
apt-get update -qq && apt-get install -y -qq git
git init git init
git remote add origin http://172.0.0.29:3000/sarman/tftsr-devops_investigation.git git remote add origin http://172.0.0.29:3000/sarman/tftsr-devops_investigation.git
if [ -n "${GITHUB_SHA:-}" ] && git fetch --depth=1 origin "$GITHUB_SHA"; then if [ -n "${GITHUB_SHA:-}" ] && git fetch --depth=1 origin "$GITHUB_SHA"; then
@ -70,26 +55,19 @@ jobs:
echo "Fetched fallback ref: master" echo "Fetched fallback ref: master"
fi fi
git checkout FETCH_HEAD git checkout FETCH_HEAD
- name: Cache cargo registry - run: apt-get update -qq && apt-get install -y -qq libwebkit2gtk-4.1-dev libssl-dev libgtk-3-dev libayatana-appindicator3-dev librsvg2-dev patchelf pkg-config perl
uses: actions/cache@v4 - run: rustup component add clippy
with:
path: |
~/.cargo/registry/index
~/.cargo/registry/cache
~/.cargo/git/db
key: ${{ runner.os }}-cargo-linux-amd64-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-cargo-linux-amd64-
- run: cargo clippy --manifest-path src-tauri/Cargo.toml -- -D warnings - run: cargo clippy --manifest-path src-tauri/Cargo.toml -- -D warnings
rust-tests: rust-tests:
runs-on: ubuntu-latest runs-on: ubuntu-latest
container: container:
image: 172.0.0.29:3000/sarman/trcaa-linux-amd64:rust1.88-node22 image: rust:1.88-slim
steps: steps:
- name: Checkout - name: Checkout
run: | run: |
set -eux set -eux
apt-get update -qq && apt-get install -y -qq git
git init git init
git remote add origin http://172.0.0.29:3000/sarman/tftsr-devops_investigation.git git remote add origin http://172.0.0.29:3000/sarman/tftsr-devops_investigation.git
if [ -n "${GITHUB_SHA:-}" ] && git fetch --depth=1 origin "$GITHUB_SHA"; then if [ -n "${GITHUB_SHA:-}" ] && git fetch --depth=1 origin "$GITHUB_SHA"; then
@ -105,16 +83,7 @@ jobs:
echo "Fetched fallback ref: master" echo "Fetched fallback ref: master"
fi fi
git checkout FETCH_HEAD git checkout FETCH_HEAD
- name: Cache cargo registry - run: apt-get update -qq && apt-get install -y -qq libwebkit2gtk-4.1-dev libssl-dev libgtk-3-dev libayatana-appindicator3-dev librsvg2-dev patchelf pkg-config perl
uses: actions/cache@v4
with:
path: |
~/.cargo/registry/index
~/.cargo/registry/cache
~/.cargo/git/db
key: ${{ runner.os }}-cargo-linux-amd64-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-cargo-linux-amd64-
- run: cargo test --manifest-path src-tauri/Cargo.toml -- --test-threads=1 - run: cargo test --manifest-path src-tauri/Cargo.toml -- --test-threads=1
frontend-typecheck: frontend-typecheck:
@ -141,13 +110,6 @@ jobs:
echo "Fetched fallback ref: master" echo "Fetched fallback ref: master"
fi fi
git checkout FETCH_HEAD git checkout FETCH_HEAD
- name: Cache npm
uses: actions/cache@v4
with:
path: ~/.npm
key: ${{ runner.os }}-npm-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-npm-
- run: npm ci --legacy-peer-deps - run: npm ci --legacy-peer-deps
- run: npx tsc --noEmit - run: npx tsc --noEmit
@ -175,12 +137,5 @@ jobs:
echo "Fetched fallback ref: master" echo "Fetched fallback ref: master"
fi fi
git checkout FETCH_HEAD git checkout FETCH_HEAD
- name: Cache npm
uses: actions/cache@v4
with:
path: ~/.npm
key: ${{ runner.os }}-npm-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-npm-
- run: npm ci --legacy-peer-deps - run: npm ci --legacy-peer-deps
- run: npm run test:run - run: npm run test:run

View File

@ -8,7 +8,6 @@
| Frontend only (port 1420) | `npm run dev` | | Frontend only (port 1420) | `npm run dev` |
| Frontend production build | `npm run build` | | Frontend production build | `npm run build` |
| Rust fmt check | `cargo fmt --manifest-path src-tauri/Cargo.toml --check` | | Rust fmt check | `cargo fmt --manifest-path src-tauri/Cargo.toml --check` |
| Rust fmt fix | `cargo fmt --manifest-path src-tauri/Cargo.toml` |
| Rust clippy | `cargo clippy --manifest-path src-tauri/Cargo.toml -- -D warnings` | | Rust clippy | `cargo clippy --manifest-path src-tauri/Cargo.toml -- -D warnings` |
| Rust tests | `cargo test --manifest-path src-tauri/Cargo.toml -- --test-threads=1` | | Rust tests | `cargo test --manifest-path src-tauri/Cargo.toml -- --test-threads=1` |
| Rust single test module | `cargo test --manifest-path src-tauri/Cargo.toml -- --test-threads=1 pii::detector` | | Rust single test module | `cargo test --manifest-path src-tauri/Cargo.toml -- --test-threads=1 pii::detector` |
@ -17,9 +16,6 @@
| Frontend test (watch) | `npm run test` | | Frontend test (watch) | `npm run test` |
| Frontend coverage | `npm run test:coverage` | | Frontend coverage | `npm run test:coverage` |
| TypeScript type check | `npx tsc --noEmit` | | TypeScript type check | `npx tsc --noEmit` |
| Frontend lint | `npx eslint . --quiet` |
**Lint Policy**: **ALWAYS run `cargo fmt` and `cargo clippy` after any Rust code change**. Fix all issues before proceeding.
**Note**: The build runs `npm run build` before Rust build (via `beforeBuildCommand` in `tauri.conf.json`). This ensures TS is type-checked before packaging. **Note**: The build runs `npm run build` before Rust build (via `beforeBuildCommand` in `tauri.conf.json`). This ensures TS is type-checked before packaging.

View File

@ -1,467 +0,0 @@
# Changelog
All notable changes to TFTSR are documented here.
Commit types shown: feat, fix, perf, docs, refactor.
CI, chore, and build changes are excluded.
## [Unreleased]
### Bug Fixes
- Harden timeline event input validation and atomic writes
### Documentation
- Update wiki for timeline events and incident response methodology
### Features
- Add timeline_events table, model, and CRUD commands
- Populate RCA and postmortem docs with real timeline data
- Wire incident response methodology into AI and record triage events
## [0.2.65] — 2026-04-15
### Bug Fixes
- Add --locked to cargo commands and improve version update script
- Remove invalid --locked flag from cargo commands and fix format string
- **integrations**: Security and correctness improvements
- Correct WIQL syntax and escape_wiql implementation
### Features
- Implement dynamic versioning from Git tags
- **integrations**: Implement query expansion for semantic search
### Security
- Fix query expansion issues from PR review
- Address all issues from automated PR review
## [0.2.63] — 2026-04-13
### Bug Fixes
- Add Windows nsis target and update CHANGELOG to v0.2.61
## [0.2.61] — 2026-04-13
### Bug Fixes
- Remove AppImage from upload artifact patterns
## [0.2.59] — 2026-04-13
### Bug Fixes
- Remove AppImage bundling to fix linux-amd64 build
## [0.2.57] — 2026-04-13
### Bug Fixes
- Add fuse dependency for AppImage support
### Refactoring
- Remove custom linuxdeploy install per CI CI uses tauri-downloaded version
- Revert to original Dockerfile without manual linuxdeploy installation
## [0.2.56] — 2026-04-13
### Bug Fixes
- Add missing ai_providers columns and fix linux-amd64 build
- Address AI review findings
- Address critical AI review issues
## [0.2.55] — 2026-04-13
### Bug Fixes
- **ci**: Use Gitea file API to push CHANGELOG.md — eliminates non-fast-forward rejection
- **ci**: Harden CHANGELOG.md API push step per review
## [0.2.54] — 2026-04-13
### Bug Fixes
- **ci**: Correct git-cliff archive path in tar extraction
## [0.2.53] — 2026-04-13
### Features
- **ci**: Add automated changelog generation via git-cliff
## [0.2.52] — 2026-04-13
### Bug Fixes
- **ci**: Add APPIMAGE_EXTRACT_AND_RUN to build-linux-amd64
## [0.2.51] — 2026-04-13
### Bug Fixes
- **ci**: Address AI review — rustup idempotency and cargo --locked
- **ci**: Replace docker:24-cli with alpine + docker-cli in build-images
- **docker**: Add ca-certificates to arm64 base image step 1
- **ci**: Resolve test.yml failures — Cargo.lock, updated test assertions
- **ci**: Address second AI review — || true, ca-certs, cache@v4, key suffixes
### Documentation
- **docker**: Expand rebuild trigger comments to include OpenSSL and Tauri CLI
### Performance
- **ci**: Use pre-baked images and add cargo/npm caching
## [0.2.50] — 2026-04-12
### Bug Fixes
- Rename GITEA_TOKEN to TF_TOKEN to comply with naming restrictions
- Remove actions/checkout to avoid Node.js dependency
- Use ubuntu container with git installed
- Use actions/checkout with token auth and self-hosted runner
- Use IP addresses for internal services
- Simplified workflow syntax
- Add debugging output for Ollamaresponse
- Correct Ollama URL, API endpoint, and JSON construction in pr-review workflow
- Add diagnostics to identify empty Ollama response root cause
- Use bash shell and remove bash-only substring expansion in pr-review
- Restore migration 014, bump version to 0.2.50, harden pr-review workflow
- Harden pr-review workflow and sync versions to 0.2.50
- Configure container DNS to resolve ollama-ui.tftsr.com
- Harden pr-review workflow — URLs, DNS, correctness and reliability
- Resolve AI review false positives and address high/medium issues
- Replace github.server_url with hardcoded gogs.tftsr.com for container access
- Revert to two-dot diff — three-dot requires merge base unavailable in shallow clone
- Harden pr-review workflow — secret redaction, log safety, auth header
### Features
- Add automated PR review workflow with Ollama AI
## [0.2.49] — 2026-04-10
### Bug Fixes
- Add missing ai_providers migration (014)
## [0.2.48] — 2026-04-10
### Bug Fixes
- Lint fixes and formatting cleanup
### Features
- Support GenAI datastore file uploads and fix paste image upload
## [0.2.47] — 2026-04-09
### Bug Fixes
- Use 'provider' argument name to match Rust command signature
## [0.2.46] — 2026-04-09
### Bug Fixes
- Add @types/testing-library__react for TypeScript compilation
### Update
- Node_modules from npm install
## [0.2.45] — 2026-04-09
### Bug Fixes
- Force single test thread for Rust tests to eliminate race conditions
## [0.2.43] — 2026-04-09
### Bug Fixes
- Fix encryption test race condition with parallel tests
- OpenWebUI provider connection and missing command registrations
### Features
- Add image attachment support with PII detection
## [0.2.42] — 2026-04-07
### Documentation
- Add AGENTS.md and SECURITY_AUDIT.md
## [0.2.41] — 2026-04-07
### Bug Fixes
- **db,auth**: Auto-generate encryption keys for release builds
- **lint**: Use inline format args in auth.rs
- **lint**: Resolve all clippy warnings for CI compliance
- **fmt**: Apply rustfmt formatting to webview_fetch.rs
- **types**: Replace normalizeApiFormat() calls with direct value
### Documentation
- **architecture**: Add C4 diagrams, ADRs, and architecture overview
### Features
- **ai**: Add tool-calling and integration search as AI data source
## [0.2.40] — 2026-04-06
### Bug Fixes
- **ci**: Remove explicit docker.sock mount — act_runner mounts it automatically
## [0.2.36] — 2026-04-06
### Features
- **ci**: Add persistent pre-baked Docker builder images
## [0.2.35] — 2026-04-06
### Bug Fixes
- **ci**: Skip Ollama download on macOS build — runner has no access to GitHub binary assets
- **ci**: Remove all Ollama bundle download steps — use UI download button instead
### Refactoring
- **ollama**: Remove download/install buttons — show plain install instructions only
## [0.2.34] — 2026-04-06
### Bug Fixes
- **security**: Add path canonicalization and actionable permission error in install_ollama_from_bundle
### Features
- **ui**: Fix model dropdown, auth prefill, PII persistence, theme toggle, and Ollama bundle
## [0.2.33] — 2026-04-05
### Features
- **rebrand**: Rename binary to trcaa and auto-generate DB key
## [0.2.32] — 2026-04-05
### Bug Fixes
- **ci**: Restrict arm64 bundles to deb,rpm — skip AppImage
## [0.2.31] — 2026-04-05
### Bug Fixes
- **ci**: Set APPIMAGE_EXTRACT_AND_RUN=1 for arm64 AppImage bundling
## [0.2.30] — 2026-04-05
### Bug Fixes
- **ci**: Add make to arm64 host tools for OpenSSL vendored build
## [0.2.28] — 2026-04-05
### Bug Fixes
- **ci**: Use POSIX dot instead of source in arm64 build step
## [0.2.27] — 2026-04-05
### Bug Fixes
- **ci**: Remove GITHUB_PATH append that was breaking arm64 install step
## [0.2.26] — 2026-04-05
### Bug Fixes
- **ci**: Switch build-linux-arm64 to Ubuntu 22.04 with ports mirror
### Documentation
- Update CI pipeline wiki and add ticket summary for arm64 fix
## [0.2.25] — 2026-04-05
### Bug Fixes
- **ci**: Rebuild apt sources with per-arch entries before arm64 cross-compile install
- **ci**: Add workflow_dispatch and concurrency guard to auto-tag
- **ci**: Replace heredoc with printf in arm64 install step
## [0.2.24] — 2026-04-05
### Bug Fixes
- **ci**: Fix arm64 cross-compile, drop cargo install tauri-cli, move wiki-sync
## [0.2.23] — 2026-04-05
### Bug Fixes
- **ci**: Unblock release jobs and namespace linux artifacts by arch
- **security**: Harden secret handling and audit integrity
- **pii**: Remove lookahead from hostname regex, fix fmt in analysis test
- **security**: Enforce PII redaction before AI log transmission
- **ci**: Unblock release jobs and namespace linux artifacts by arch
## [0.2.22] — 2026-04-05
### Bug Fixes
- **ci**: Run linux arm release natively and enforce arm artifacts
## [0.2.21] — 2026-04-05
### Bug Fixes
- **ci**: Force explicit linux arm64 target for release artifacts
## [0.2.20] — 2026-04-05
### Refactoring
- **ci**: Remove standalone release workflow
## [0.2.19] — 2026-04-05
### Bug Fixes
- **ci**: Guarantee release jobs run after auto-tag
- **ci**: Use stable auto-tag job outputs for release fanout
- **ci**: Run post-tag release builds without job-output gating
- **ci**: Repair auto-tag workflow yaml so jobs trigger
## [0.2.18] — 2026-04-05
### Bug Fixes
- **ci**: Trigger release workflow from auto-tag pushes
## [0.2.17] — 2026-04-05
### Bug Fixes
- **ci**: Harden release asset uploads for reruns
## [0.2.16] — 2026-04-05
### Bug Fixes
- **ci**: Make release artifacts reliable across platforms
## [0.2.14] — 2026-04-04
### Bug Fixes
- Resolve macOS bundle path after app rename
## [0.2.13] — 2026-04-04
### Bug Fixes
- Resolve clippy uninlined_format_args in integrations and related modules
- Resolve clippy format-args failures and OpenSSL vendoring issue
### Features
- Add custom_rest provider mode and rebrand application name
## [0.2.12] — 2026-04-04
### Bug Fixes
- ARM64 build uses native target instead of cross-compile
## [0.2.11] — 2026-04-04
### Bug Fixes
- Persist integration settings and implement persistent browser windows
## [0.2.10] — 2026-04-03
### Features
- Complete webview cookie extraction implementation
## [0.2.9] — 2026-04-03
### Features
- Add multi-mode authentication for integrations (v0.2.10)
## [0.2.8] — 2026-04-03
### Features
- Add temperature and max_tokens support for Custom REST providers (v0.2.9)
## [0.2.7] — 2026-04-03
### Bug Fixes
- Use Wiki secret for authenticated wiki sync (v0.2.8)
### Documentation
- Update wiki for v0.2.6 - integrations and Custom REST provider
### Features
- Add automatic wiki sync to CI workflow (v0.2.7)
## [0.2.6] — 2026-04-03
### Bug Fixes
- Add user_id support and OAuth shell permission (v0.2.6)
## [0.2.5] — 2026-04-03
### Documentation
- Add Custom REST provider documentation
### Features
- Implement Confluence, ServiceNow, and Azure DevOps REST API clients
- Add Custom REST provider support
## [0.2.4] — 2026-04-03
### Features
- Implement OAuth2 token exchange and AES-256-GCM encryption
- Add OAuth2 Tauri commands for integration authentication
- Implement OAuth2 callback server with automatic token exchange
- Add OAuth2 frontend UI and complete integration flow
## [0.2.3] — 2026-04-03
### Bug Fixes
- Improve Cancel button contrast in AI disclaimer modal
### Features
- Add database schema for integration credentials and config
## [0.2.1] — 2026-04-03
### Bug Fixes
- Implement native DOCX export without pandoc dependency
### Features
- Add AI disclaimer modal before creating new issues
## [0.1.0] — 2026-04-03
### Bug Fixes
- Resolve all clippy lints (uninlined format args, range::contains, push_str single chars)
- Inline format args for Rust 1.88 clippy compatibility
- Retain GPU-VRAM-eligible models in recommender even when RAM is low
- Use alpine/git with explicit checkout for tag-based release builds
- Set CI=true for cargo tauri build — Woodpecker sets CI=woodpecker which Tauri CLI rejects
- Arm64 cross-compilation — add multiarch pkg-config sysroot setup
- Remove arm64 from release pipeline — webkit2gtk multiarch conflict on x86_64 host
- Write artifacts to workspace (shared between steps), not /artifacts/
- Upload step needs gogs_default network to reach Gogs API (host firewall blocks default bridge)
- Use bundled-sqlcipher-vendored-openssl for portable Windows cross-compilation
- Add make to windows build step (required by vendored OpenSSL)
- Replace empty icon placeholder files with real app icons
- Suppress MinGW auto-export to resolve Windows DLL ordinal overflow
- Use when: platform: for arm64 step routing (Woodpecker 0.15.4 compat)
- Remove unused tauri-plugin-cli causing startup crash
- Use $GITHUB_REF_NAME env var instead of ${{ github.ref_name }} expression
- Remove unused tauri-plugin-updater + SQLCipher 16KB page size
- Prevent WebKit/GTK system theme from overriding input text colors on Linux
- Set SQLCipher cipher_page_size BEFORE first database access
- Button text visibility, toggle contrast, create_issue IPC, ad-hoc codesign
- Dropdown text invisible on macOS + correct codesign order for DMG
- Add explicit text-foreground to SelectTrigger, SelectValue, and SelectItem
- Ollama detection, install guide UI, and AI Providers auto-fill
- Provider test FK error, model pull white screen, RECOMMENDED badge
- Provider routing uses provider_type, Active badge, fmt
- Navigate to /logs after issue creation, fix dashboard category display
- Dashboard shows — while loading, exposes errors, adds refresh button
- ListIssuesCmd was sending {query} but Rust expects {filter} — caused dashboard to always show 0 open issues
- Arm64 linux cross-compilation — add multiarch and pkg-config env vars
- Close from chat works before issue loads; save user reason as resolution step; dynamic version
- DomainPrompts closing brace too early; arm64 use native platform image
- UI contrast issues and ARM64 build failure
- Remove Woodpecker CI and fix Gitea Actions ARM64 build
- UI visibility issues, export errors, filtering, and audit log enhancement
- ARM64 build native compilation instead of cross-compilation
- Improve release artifact upload error handling
- Install jq in Linux/Windows build containers
- Improve download button visibility and add DOCX export
### Documentation
- Update PLAN.md with accurate implementation status
- Add CLAUDE.md with development guidance
- Add wiki source files and CI auto-sync pipeline
- Update PLAN.md - Phase 11 complete, redact token references
- Update README and wiki for v0.1.0-alpha release
- Remove broken arm64 CI step, document Woodpecker 0.15.4 limitation
- Update README and wiki for Gitea Actions migration
- Update README, wiki, and UI version to v0.1.1
- Add LiteLLM + AWS Bedrock integration guide
### Features
- Initial implementation of TFTSR IT Triage & RCA application
- Add Windows amd64 cross-compile to release pipeline; add arm64 QEMU agent
- Add native linux/arm64 release build step
- Add macOS arm64 act_runner and release build job
- Auto-increment patch tag on every merge to master
- Inline file/screenshot attachment in triage chat
- Close issues, restore history, auto-save resolution steps
- Expand domains to 13 — add Telephony, Security/Vault, Public Safety, Application, Automation/CI-CD
- Add HPE, Dell, Identity domains + expand k8s/security/observability/VESTA NXT
### Security
- Rotate exposed token, redact from PLAN.md, add secret patterns to .gitignore

View File

@ -1,41 +0,0 @@
[changelog]
header = """
# Changelog
All notable changes to TFTSR are documented here.
Commit types shown: feat, fix, perf, docs, refactor.
CI, chore, and build changes are excluded.
"""
body = """
{% if version -%}
## [{{ version | trim_start_matches(pat="v") }}] — {{ timestamp | date(format="%Y-%m-%d") }}
{% else -%}
## [Unreleased]
{% endif -%}
{% for group, commits in commits | group_by(attribute="group") -%}
### {{ group | upper_first }}
{% for commit in commits -%}
- {% if commit.scope %}**{{ commit.scope }}**: {% endif %}{{ commit.message | upper_first }}
{% endfor %}
{% endfor %}
"""
footer = ""
trim = true
[git]
conventional_commits = true
filter_unconventional = true
tag_pattern = "v[0-9].*"
ignore_tags = "rc|alpha|beta"
sort_commits = "oldest"
commit_parsers = [
{ message = "^feat", group = "Features" },
{ message = "^fix", group = "Bug Fixes" },
{ message = "^perf", group = "Performance" },
{ message = "^docs", group = "Documentation" },
{ message = "^refactor", group = "Refactoring" },
{ message = "^ci|^chore|^build|^test|^style", skip = true },
]

View File

@ -50,7 +50,7 @@ All command handlers receive `State<'_, AppState>` as a Tauri-injected parameter
| `commands/integrations.rs` | Confluence / ServiceNow / ADO — v0.2 stubs | | `commands/integrations.rs` | Confluence / ServiceNow / ADO — v0.2 stubs |
| `ai/provider.rs` | `Provider` trait + `create_provider()` factory | | `ai/provider.rs` | `Provider` trait + `create_provider()` factory |
| `pii/detector.rs` | Multi-pattern PII scanner with overlap resolution | | `pii/detector.rs` | Multi-pattern PII scanner with overlap resolution |
| `db/migrations.rs` | Versioned schema (17 migrations in `_migrations` table) | | `db/migrations.rs` | Versioned schema (12 migrations in `_migrations` table) |
| `db/models.rs` | All DB types — see `IssueDetail` note below | | `db/models.rs` | All DB types — see `IssueDetail` note below |
| `docs/rca.rs` + `docs/postmortem.rs` | Markdown template builders | | `docs/rca.rs` + `docs/postmortem.rs` | Markdown template builders |
| `audit/log.rs` | `write_audit_event()` — called before every external send | | `audit/log.rs` | `write_audit_event()` — called before every external send |
@ -176,55 +176,6 @@ pub struct IssueDetail {
Use `detail.issue.title`, **not** `detail.title`. Use `detail.issue.title`, **not** `detail.title`.
## Incident Response Methodology
The application integrates a comprehensive incident response framework via system prompt injection. The `INCIDENT_RESPONSE_FRAMEWORK` constant in `src/lib/domainPrompts.ts` is appended to all 17 domain-specific system prompts (Linux, Windows, Network, Kubernetes, Databases, Virtualization, Hardware, Observability, and others).
**5-Phase Framework:**
1. **Detection & Evidence Gathering** — Initial issue assessment, log collection, PII redaction
2. **Diagnosis & Hypothesis Testing** — AI-assisted analysis, pattern matching against known incidents
3. **Root Cause Analysis with 5-Whys** — Iterative questioning to identify underlying cause (steps 15)
4. **Resolution & Prevention** — Remediation planning and implementation
5. **Post-Incident Review** — Timeline-based blameless post-mortem and lessons learned
**System Prompt Injection:**
The `chat_message` command accepts an optional `system_prompt` parameter. If provided, it prepends domain expertise before the conversation history. If omitted, the framework selects the appropriate domain prompt based on the issue category. This allows:
- **Specialized expertise**: Different frameworks for Linux vs. Kubernetes vs. Network incidents
- **Flexible override**: Users can inject custom system prompts for cross-domain problems
- **Consistent methodology**: All 17 domain prompts follow the same 5-phase incident response structure
**Timeline Event Recording:**
Timeline events are recorded non-blockingly at key triage moments:
```
Issue Creation → triage_started
Log Upload → log_uploaded (metadata: file_name, file_size)
Why-Level Progression → why_level_advanced (metadata: from_level → to_level)
Root Cause Identified → root_cause_identified (metadata: root_cause, confidence)
RCA Generated → rca_generated (metadata: doc_id, section_count)
Postmortem Generated → postmortem_generated (metadata: doc_id, timeline_events_count)
Document Exported → document_exported (metadata: format, file_path)
```
**Document Generation:**
RCA and Postmortem generators now use real timeline event data instead of placeholders:
- **RCA**: Incorporates timeline to show detection-to-root-cause progression
- **Postmortem**: Uses full timeline to demonstrate the complete incident lifecycle and response effectiveness
Timeline events are stored in the `timeline_events` table (indexed by issue_id and created_at for fast retrieval) and dual-written to `audit_log` for security/compliance purposes.
## Application Startup Sequence ## Application Startup Sequence
``` ```

View File

@ -27,77 +27,12 @@ macOS runner runs jobs **directly on the host** (no Docker container) — macOS
--- ---
## Pre-baked Builder Images ## Test Pipeline (`.woodpecker/test.yml`)
CI build and test jobs use pre-baked Docker images pushed to the local Gitea registry
at `172.0.0.29:3000`. These images bake in all system dependencies (Tauri libs, Node.js,
Rust toolchain, cross-compilers) so that CI jobs skip package installation entirely.
| Image | Used by jobs | Contents |
|-------|-------------|----------|
| `172.0.0.29:3000/sarman/trcaa-linux-amd64:rust1.88-node22` | `rust-fmt-check`, `rust-clippy`, `rust-tests`, `build-linux-amd64` | Rust 1.88 + rustfmt + clippy + Tauri amd64 libs + Node.js 22 |
| `172.0.0.29:3000/sarman/trcaa-windows-cross:rust1.88-node22` | `build-windows-amd64` | Rust 1.88 + mingw-w64 + NSIS + Node.js 22 |
| `172.0.0.29:3000/sarman/trcaa-linux-arm64:rust1.88-node22` | `build-linux-arm64` | Rust 1.88 + aarch64 cross-toolchain + arm64 multiarch libs + Node.js 22 |
**Rebuild triggers:** Rust toolchain version bump, webkit2gtk/gtk major version change, Node.js major version change.
**How to rebuild images:**
1. Trigger `build-images.yml` via `workflow_dispatch` in the Gitea Actions UI
2. Confirm all 3 images appear in the Gitea package/container registry at `172.0.0.29:3000`
3. Only then merge workflow changes that depend on the new image contents
**Server prerequisite — insecure registry** (one-time, on 172.0.0.29):
```sh
echo '{"insecure-registries":["172.0.0.29:3000"]}' | sudo tee /etc/docker/daemon.json
sudo systemctl restart docker
```
This must be configured on every machine running an act_runner for the runner's Docker
daemon to pull from the local HTTP registry.
---
## Cargo and npm Caching
All Rust and build jobs use `actions/cache@v3` to cache downloaded package artifacts.
Gitea 1.22 implements the GitHub Actions cache API natively.
**Cargo cache** (Rust jobs):
```yaml
- name: Cache cargo registry
uses: actions/cache@v3
with:
path: |
~/.cargo/registry/index
~/.cargo/registry/cache
~/.cargo/git/db
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-cargo-
```
**npm cache** (frontend and build jobs):
```yaml
- name: Cache npm
uses: actions/cache@v3
with:
path: ~/.npm
key: ${{ runner.os }}-npm-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-npm-
```
Cache keys for cross-compile jobs use a suffix to avoid collisions:
- Windows build: `${{ runner.os }}-cargo-windows-${{ hashFiles('**/Cargo.lock') }}`
- arm64 build: `${{ runner.os }}-cargo-arm64-${{ hashFiles('**/Cargo.lock') }}`
---
## Test Pipeline (`.gitea/workflows/test.yml`)
**Triggers:** Pull requests only. **Triggers:** Pull requests only.
``` ```
Pipeline jobs (run in parallel): Pipeline steps:
1. rust-fmt-check → cargo fmt --check 1. rust-fmt-check → cargo fmt --check
2. rust-clippy → cargo clippy -- -D warnings 2. rust-clippy → cargo clippy -- -D warnings
3. rust-tests → cargo test (64 tests) 3. rust-tests → cargo test (64 tests)
@ -106,9 +41,28 @@ Pipeline jobs (run in parallel):
``` ```
**Docker images used:** **Docker images used:**
- `172.0.0.29:3000/sarman/trcaa-linux-amd64:rust1.88-node22` — Rust steps (replaces `rust:1.88-slim`) - `rust:1.88-slim` — Rust steps (minimum for cookie_store + time + darling)
- `node:22-alpine` — Frontend steps - `node:22-alpine` — Frontend steps
**Pipeline YAML format (Woodpecker 2.x — steps list format):**
```yaml
clone:
git:
image: woodpeckerci/plugin-git
network_mode: gogs_default # requires repo_trusted=1
environment:
- CI_REPO_CLONE_URL=http://gitea_app:3000/sarman/tftsr-devops_investigation.git
steps:
- name: step-name # LIST format (- name:)
image: rust:1.88-slim
commands:
- cargo test
```
> ⚠️ Woodpecker 2.x uses the `steps:` list format. The legacy `pipeline:` map format from
> Woodpecker 0.15.4 is no longer supported.
--- ---
## Release Pipeline (`.gitea/workflows/auto-tag.yml`) ## Release Pipeline (`.gitea/workflows/auto-tag.yml`)
@ -119,16 +73,14 @@ Auto tags are created by `.gitea/workflows/auto-tag.yml` using `git tag` + `git
Release jobs are executed in the same workflow and depend on `autotag` completion. Release jobs are executed in the same workflow and depend on `autotag` completion.
``` ```
Jobs (run in parallel after autotag): Jobs (run in parallel):
build-linux-amd64 → image: trcaa-linux-amd64:rust1.88-node22 build-linux-amd64 → cargo tauri build (x86_64-unknown-linux-gnu)
→ cargo tauri build (x86_64-unknown-linux-gnu)
→ {.deb, .rpm, .AppImage} uploaded to Gitea release → {.deb, .rpm, .AppImage} uploaded to Gitea release
→ fails fast if no Linux artifacts are produced → fails fast if no Linux artifacts are produced
build-windows-amd64 → image: trcaa-windows-cross:rust1.88-node22 build-windows-amd64 → cargo tauri build (x86_64-pc-windows-gnu) via mingw-w64
→ cargo tauri build (x86_64-pc-windows-gnu) via mingw-w64
→ {.exe, .msi} uploaded to Gitea release → {.exe, .msi} uploaded to Gitea release
→ fails fast if no Windows artifacts are produced → fails fast if no Windows artifacts are produced
build-linux-arm64 → image: trcaa-linux-arm64:rust1.88-node22 (ubuntu:22.04-based) build-linux-arm64 → Ubuntu 22.04 base (ports.ubuntu.com for arm64 packages)
→ cargo tauri build (aarch64-unknown-linux-gnu) → cargo tauri build (aarch64-unknown-linux-gnu)
→ {.deb, .rpm, .AppImage} uploaded to Gitea release → {.deb, .rpm, .AppImage} uploaded to Gitea release
→ fails fast if no Linux artifacts are produced → fails fast if no Linux artifacts are produced
@ -257,52 +209,6 @@ UPDATE protect_branch SET protected=true, require_pull_request=true WHERE repo_i
--- ---
## Changelog Generation
Changelogs are generated automatically by **git-cliff** on every release.
Configuration lives in `cliff.toml` at the repo root.
### How it works
A `changelog` job in `auto-tag.yml` runs in parallel with the build jobs, immediately
after `autotag` completes:
1. Clones the full repo history with all tags (`--depth=2147483647` — git-cliff needs
every tag to compute version boundaries).
2. Downloads the git-cliff v2.7.0 static musl binary (~5 MB, no image change needed).
3. Runs `git-cliff --output CHANGELOG.md` to regenerate the full cumulative changelog.
4. Runs `git-cliff --latest --strip all` to produce release notes for the new tag only.
5. PATCHes the Gitea release body with those notes (replaces the static `"Release vX.Y.Z"`).
6. Commits `CHANGELOG.md` to master with `[skip ci]` appended to the message.
The `[skip ci]` token prevents `auto-tag.yml` from re-triggering on the CHANGELOG commit.
7. Uploads `CHANGELOG.md` as a release asset (replaces any previous version).
### cliff.toml reference
| Setting | Value |
|---------|-------|
| `tag_pattern` | `v[0-9].*` |
| `ignore_tags` | `rc\|alpha\|beta` |
| `filter_unconventional` | `true` — non-conventional commits are dropped |
| Included types | `feat`, `fix`, `perf`, `docs`, `refactor` |
| Excluded types | `ci`, `chore`, `build`, `test`, `style` |
### Loop prevention
The `[skip ci]` suffix on the CHANGELOG commit message is recognised by Gitea Actions
and causes the workflow to be skipped for that push. Without it, the CHANGELOG commit
would trigger `auto-tag.yml` again, incrementing the patch version forever.
### Bootstrap
The initial `CHANGELOG.md` was generated locally before the first PR:
```sh
git-cliff --config cliff.toml --output CHANGELOG.md
```
Subsequent runs are fully automated by CI.
---
## Known Issues & Fixes ## Known Issues & Fixes
### Debian Multiarch Breaks arm64 Cross-Compile (`held broken packages`) ### Debian Multiarch Breaks arm64 Cross-Compile (`held broken packages`)

View File

@ -2,7 +2,7 @@
## Overview ## Overview
TFTSR uses **SQLite** via `rusqlite` with the `bundled-sqlcipher` feature for AES-256 encryption in production. 17 versioned migrations are tracked in the `_migrations` table. TFTSR uses **SQLite** via `rusqlite` with the `bundled-sqlcipher` feature for AES-256 encryption in production. 12 versioned migrations are tracked in the `_migrations` table.
**DB file location:** `{app_data_dir}/tftsr.db` **DB file location:** `{app_data_dir}/tftsr.db`
@ -38,7 +38,7 @@ pub fn init_db(data_dir: &Path) -> anyhow::Result<Connection> {
--- ---
## Schema (17 Migrations) ## Schema (11 Migrations)
### 001 — issues ### 001 — issues
@ -245,51 +245,6 @@ CREATE TABLE image_attachments (
- Basic auth (ServiceNow): Store encrypted password - Basic auth (ServiceNow): Store encrypted password
- One credential per service (enforced by UNIQUE constraint) - One credential per service (enforced by UNIQUE constraint)
### 017 — timeline_events (Incident Response Timeline)
```sql
CREATE TABLE timeline_events (
id TEXT PRIMARY KEY,
issue_id TEXT NOT NULL REFERENCES issues(id) ON DELETE CASCADE,
event_type TEXT NOT NULL,
description TEXT NOT NULL,
metadata TEXT, -- JSON object with event-specific data
created_at TEXT NOT NULL
);
CREATE INDEX idx_timeline_events_issue ON timeline_events(issue_id);
CREATE INDEX idx_timeline_events_time ON timeline_events(created_at);
```
**Event Types:**
- `triage_started` — Incident response begins, initial issue properties recorded
- `log_uploaded` — Log file uploaded and analyzed
- `why_level_advanced` — 5-Whys entry completed, progression to next level
- `root_cause_identified` — Root cause determined from analysis
- `rca_generated` — Root Cause Analysis document created
- `postmortem_generated` — Post-mortem document created
- `document_exported` — Document exported to file (MD or PDF)
**Metadata Structure (JSON):**
```json
{
"triage_started": {"severity": "high", "category": "network"},
"log_uploaded": {"file_name": "app.log", "file_size": 2048576},
"why_level_advanced": {"from_level": 2, "to_level": 3, "question": "Why did the service timeout?"},
"root_cause_identified": {"root_cause": "DNS resolution failure", "confidence": 0.95},
"rca_generated": {"doc_id": "doc_abc123", "section_count": 7},
"postmortem_generated": {"doc_id": "doc_def456", "timeline_events_count": 12},
"document_exported": {"format": "pdf", "file_path": "/home/user/docs/rca.pdf"}
}
```
**Design Notes:**
- Timeline events are **queryable** (indexed by issue_id and created_at) for document generation
- Dual-write: Events recorded to both `timeline_events` and `audit_log` — timeline for chronological reporting, audit_log for security/compliance
- `created_at`: TEXT UTC timestamp (`YYYY-MM-DD HH:MM:SS`)
- Non-blocking writes: Timeline events recorded asynchronously at key triage moments
- Cascade delete from issues ensures cleanup
--- ---
## Key Design Notes ## Key Design Notes
@ -334,13 +289,4 @@ pub struct AuditEntry {
pub user_id: String, pub user_id: String,
pub details: Option<String>, pub details: Option<String>,
} }
pub struct TimelineEvent {
pub id: String,
pub issue_id: String,
pub event_type: String,
pub description: String,
pub metadata: Option<String>, // JSON
pub created_at: String,
}
``` ```

View File

@ -62,27 +62,11 @@ updateFiveWhyCmd(entryId: string, answer: string) → void
``` ```
Sets or updates the answer for an existing 5-Whys entry. Sets or updates the answer for an existing 5-Whys entry.
### `get_timeline_events`
```typescript
getTimelineEventsCmd(issueId: string) → TimelineEvent[]
```
Retrieves all timeline events for an issue, ordered by created_at ascending.
```typescript
interface TimelineEvent {
id: string;
issue_id: string;
event_type: string; // One of: triage_started, log_uploaded, why_level_advanced, etc.
description: string;
metadata?: Record<string, any>; // Event-specific JSON data
created_at: string; // UTC timestamp
}
```
### `add_timeline_event` ### `add_timeline_event`
```typescript ```typescript
addTimelineEventCmd(issueId: string, eventType: string, description: string, metadata?: Record<string, any>) → TimelineEvent addTimelineEventCmd(issueId: string, eventType: string, description: string) → TimelineEvent
``` ```
Records a timestamped event in the issue timeline. Dual-writes to both `timeline_events` (for document generation) and `audit_log` (for security audit trail). Records a timestamped event in the issue timeline.
--- ---
@ -153,9 +137,9 @@ Sends selected (redacted) log files to the AI provider with an analysis prompt.
### `chat_message` ### `chat_message`
```typescript ```typescript
chatMessageCmd(issueId: string, message: string, providerConfig: ProviderConfig, systemPrompt?: string) → ChatResponse chatMessageCmd(issueId: string, message: string, providerConfig: ProviderConfig) → ChatResponse
``` ```
Sends a message in the ongoing triage conversation. Optional `systemPrompt` parameter allows prepending domain expertise before conversation history. If not provided, the domain-specific system prompt for the issue category is injected automatically on first message. AI response is parsed for why-level indicators (15). Sends a message in the ongoing triage conversation. Domain system prompt is injected automatically on first message. AI response is parsed for why-level indicators (15).
### `list_providers` ### `list_providers`
```typescript ```typescript
@ -171,13 +155,13 @@ Returns the list of supported providers with their available models and configur
```typescript ```typescript
generateRcaCmd(issueId: string) → Document generateRcaCmd(issueId: string) → Document
``` ```
Builds an RCA Markdown document from the issue data, 5-Whys answers, and timeline events. Uses real incident response timeline (log uploads, why-level progression, root cause identification) instead of placeholders. Builds an RCA Markdown document from the issue data, 5-Whys answers, and timeline.
### `generate_postmortem` ### `generate_postmortem`
```typescript ```typescript
generatePostmortemCmd(issueId: string) → Document generatePostmortemCmd(issueId: string) → Document
``` ```
Builds a blameless post-mortem Markdown document. Incorporates timeline events to show the full incident lifecycle: detection, diagnosis, resolution, and post-incident review phases. Builds a blameless post-mortem Markdown document.
### `update_document` ### `update_document`
```typescript ```typescript

View File

@ -1,142 +0,0 @@
import globals from "globals";
import pluginReact from "eslint-plugin-react";
import pluginReactHooks from "eslint-plugin-react-hooks";
import pluginTs from "@typescript-eslint/eslint-plugin";
import parserTs from "@typescript-eslint/parser";
export default [
{
files: ["src/**/*.{ts,tsx}"],
languageOptions: {
ecmaVersion: "latest",
sourceType: "module",
globals: {
...globals.browser,
...globals.node,
},
parser: parserTs,
parserOptions: {
ecmaFeatures: {
jsx: true,
},
project: "./tsconfig.json",
},
},
plugins: {
react: pluginReact,
"react-hooks": pluginReactHooks,
"@typescript-eslint": pluginTs,
},
settings: {
react: {
version: "detect",
},
},
rules: {
...pluginReact.configs.recommended.rules,
...pluginReactHooks.configs.recommended.rules,
...pluginTs.configs.recommended.rules,
"no-unused-vars": "off",
"@typescript-eslint/no-unused-vars": ["error", { argsIgnorePattern: "^_" }],
"no-console": ["warn", { allow: ["warn", "error"] }],
"react/react-in-jsx-scope": "off",
"react/prop-types": "off",
"react/no-unescaped-entities": "off",
},
},
{
files: ["tests/unit/**/*.test.{ts,tsx}"],
languageOptions: {
ecmaVersion: "latest",
sourceType: "module",
globals: {
...globals.browser,
...globals.node,
...globals.vitest,
},
parser: parserTs,
parserOptions: {
ecmaFeatures: {
jsx: true,
},
project: "./tsconfig.json",
},
},
plugins: {
react: pluginReact,
"react-hooks": pluginReactHooks,
"@typescript-eslint": pluginTs,
},
settings: {
react: {
version: "detect",
},
},
rules: {
...pluginReact.configs.recommended.rules,
...pluginReactHooks.configs.recommended.rules,
...pluginTs.configs.recommended.rules,
"no-unused-vars": "off",
"@typescript-eslint/no-unused-vars": ["error", { argsIgnorePattern: "^_" }],
"no-console": ["warn", { allow: ["warn", "error"] }],
"react/react-in-jsx-scope": "off",
"react/prop-types": "off",
"react/no-unescaped-entities": "off",
},
},
{
files: ["tests/e2e/**/*.ts", "tests/e2e/**/*.tsx"],
languageOptions: {
ecmaVersion: "latest",
sourceType: "module",
globals: {
...globals.node,
},
parser: parserTs,
parserOptions: {
ecmaFeatures: {
jsx: false,
},
},
},
plugins: {
"@typescript-eslint": pluginTs,
},
rules: {
...pluginTs.configs.recommended.rules,
"no-unused-vars": "off",
"@typescript-eslint/no-unused-vars": ["error", { argsIgnorePattern: "^_" }],
"no-console": ["warn", { allow: ["warn", "error"] }],
},
},
{
files: ["cli/**/*.{ts,tsx}"],
languageOptions: {
ecmaVersion: "latest",
sourceType: "module",
globals: {
...globals.node,
},
parser: parserTs,
parserOptions: {
ecmaFeatures: {
jsx: false,
},
},
},
plugins: {
"@typescript-eslint": pluginTs,
},
rules: {
...pluginTs.configs.recommended.rules,
"no-unused-vars": "off",
"@typescript-eslint/no-unused-vars": ["error", { argsIgnorePattern: "^_" }],
"no-console": ["warn", { allow: ["warn", "error"] }],
"react/no-unescaped-entities": "off",
},
},
{
files: ["**/*.ts", "**/*.tsx"],
ignores: ["dist/", "node_modules/", "src-tauri/", "target/", "coverage/", "tailwind.config.ts"],
},
];

3181
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -1,12 +1,11 @@
{ {
"name": "tftsr", "name": "tftsr",
"private": true, "private": true,
"version": "0.2.62", "version": "0.1.0",
"type": "module", "type": "module",
"scripts": { "scripts": {
"dev": "vite", "dev": "vite",
"build": "tsc && vite build", "build": "tsc && vite build",
"version:update": "node scripts/update-version.mjs",
"preview": "vite preview", "preview": "vite preview",
"tauri": "tauri", "tauri": "tauri",
"test": "vitest", "test": "vitest",
@ -38,17 +37,11 @@
"@testing-library/user-event": "^14", "@testing-library/user-event": "^14",
"@types/react": "^18", "@types/react": "^18",
"@types/react-dom": "^18", "@types/react-dom": "^18",
"@types/testing-library__react": "^10",
"@typescript-eslint/eslint-plugin": "^8.58.1",
"@typescript-eslint/parser": "^8.58.1",
"@vitejs/plugin-react": "^4", "@vitejs/plugin-react": "^4",
"@vitest/coverage-v8": "^2", "@vitest/coverage-v8": "^2",
"@wdio/cli": "^9", "@wdio/cli": "^9",
"@wdio/mocha-framework": "^9", "@wdio/mocha-framework": "^9",
"autoprefixer": "^10", "autoprefixer": "^10",
"eslint": "^9.39.4",
"eslint-plugin-react": "^7.37.5",
"eslint-plugin-react-hooks": "^7.0.1",
"jsdom": "^26", "jsdom": "^26",
"postcss": "^8", "postcss": "^8",
"typescript": "^5", "typescript": "^5",

View File

@ -1,111 +0,0 @@
#!/usr/bin/env node
import { execSync } from 'child_process';
import { readFileSync, writeFileSync, existsSync, mkdirSync } from 'fs';
import { resolve, dirname } from 'path';
import { fileURLToPath } from 'url';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const projectRoot = resolve(__dirname, '..');
/**
* Validate version is semver-compliant (X.Y.Z)
*/
function isValidSemver(version) {
return /^[0-9]+\.[0-9]+\.[0-9]+$/.test(version);
}
function validateGitRepo(root) {
if (!existsSync(resolve(root, '.git'))) {
throw new Error(`Not a Git repository: ${root}`);
}
}
function getVersionFromGit() {
validateGitRepo(projectRoot);
try {
const output = execSync('git describe --tags --abbrev=0', {
encoding: 'utf-8',
cwd: projectRoot,
shell: false
});
let version = output.trim();
// Remove v prefix
version = version.replace(/^v/, '');
// Validate it's a valid semver
if (!isValidSemver(version)) {
const pkgJsonVersion = getFallbackVersion();
console.warn(`Invalid version format "${version}" from git describe, using package.json fallback: ${pkgJsonVersion}`);
return pkgJsonVersion;
}
return version;
} catch (e) {
const pkgJsonVersion = getFallbackVersion();
console.warn(`Failed to get version from Git tags, using package.json fallback: ${pkgJsonVersion}`);
return pkgJsonVersion;
}
}
function getFallbackVersion() {
const pkgPath = resolve(projectRoot, 'package.json');
if (!existsSync(pkgPath)) {
return '0.2.50';
}
try {
const content = readFileSync(pkgPath, 'utf-8');
const json = JSON.parse(content);
return json.version || '0.2.50';
} catch {
return '0.2.50';
}
}
function updatePackageJson(version) {
const fullPath = resolve(projectRoot, 'package.json');
if (!existsSync(fullPath)) {
throw new Error(`File not found: ${fullPath}`);
}
const content = readFileSync(fullPath, 'utf-8');
const json = JSON.parse(content);
json.version = version;
// Write with 2-space indentation
writeFileSync(fullPath, JSON.stringify(json, null, 2) + '\n', 'utf-8');
console.log(`✓ Updated package.json to ${version}`);
}
function updateTOML(path, version) {
const fullPath = resolve(projectRoot, path);
if (!existsSync(fullPath)) {
throw new Error(`File not found: ${fullPath}`);
}
const content = readFileSync(fullPath, 'utf-8');
const lines = content.split('\n');
const output = [];
for (const line of lines) {
if (line.match(/^\s*version\s*=\s*"/)) {
output.push(`version = "${version}"`);
} else {
output.push(line);
}
}
writeFileSync(fullPath, output.join('\n') + '\n', 'utf-8');
console.log(`✓ Updated ${path} to ${version}`);
}
const version = getVersionFromGit();
console.log(`Setting version to: ${version}`);
updatePackageJson(version);
updateTOML('src-tauri/Cargo.toml', version);
updateTOML('src-tauri/tauri.conf.json', version);
console.log(`✓ All version fields updated to ${version}`);

4
src-tauri/Cargo.lock generated
View File

@ -4242,7 +4242,6 @@ dependencies = [
"js-sys", "js-sys",
"log", "log",
"mime", "mime",
"mime_guess",
"native-tls", "native-tls",
"percent-encoding", "percent-encoding",
"pin-project-lite", "pin-project-lite",
@ -6139,7 +6138,7 @@ dependencies = [
[[package]] [[package]]
name = "trcaa" name = "trcaa"
version = "0.2.62" version = "0.1.0"
dependencies = [ dependencies = [
"aes-gcm", "aes-gcm",
"aho-corasick", "aho-corasick",
@ -6174,7 +6173,6 @@ dependencies = [
"tokio-test", "tokio-test",
"tracing", "tracing",
"tracing-subscriber", "tracing-subscriber",
"url",
"urlencoding", "urlencoding",
"uuid", "uuid",
"warp", "warp",

View File

@ -1,6 +1,6 @@
[package] [package]
name = "trcaa" name = "trcaa"
version = "0.2.62" version = "0.1.0"
edition = "2021" edition = "2021"
[lib] [lib]
@ -21,7 +21,7 @@ rusqlite = { version = "0.31", features = ["bundled-sqlcipher-vendored-openssl"]
serde = { version = "1", features = ["derive"] } serde = { version = "1", features = ["derive"] }
serde_json = "1" serde_json = "1"
tokio = { version = "1", features = ["full"] } tokio = { version = "1", features = ["full"] }
reqwest = { version = "0.12", features = ["json", "stream", "multipart"] } reqwest = { version = "0.12", features = ["json", "stream"] }
regex = "1" regex = "1"
aho-corasick = "1" aho-corasick = "1"
uuid = { version = "1", features = ["v7"] } uuid = { version = "1", features = ["v7"] }
@ -44,7 +44,6 @@ lazy_static = "1.4"
warp = "0.3" warp = "0.3"
urlencoding = "2" urlencoding = "2"
infer = "0.15" infer = "0.15"
url = "2.5.8"
[dev-dependencies] [dev-dependencies]
tokio-test = "0.4" tokio-test = "0.4"
@ -53,7 +52,3 @@ mockito = "1.2"
[profile.release] [profile.release]
opt-level = "s" opt-level = "s"
strip = true strip = true

View File

@ -1,30 +1,3 @@
fn main() { fn main() {
let version = get_version_from_git();
println!("cargo:rustc-env=APP_VERSION={version}");
println!("cargo:rerun-if-changed=.git/refs/heads/master");
println!("cargo:rerun-if-changed=.git/refs/tags");
tauri_build::build() tauri_build::build()
} }
fn get_version_from_git() -> String {
if let Ok(output) = std::process::Command::new("git")
.arg("describe")
.arg("--tags")
.arg("--abbrev=0")
.output()
{
if output.status.success() {
let version = String::from_utf8_lossy(&output.stdout)
.trim()
.trim_start_matches('v')
.to_string();
if !version.is_empty() {
return version;
}
}
}
"0.2.50".to_string()
}

View File

@ -165,7 +165,6 @@ pub async fn chat_message(
issue_id: String, issue_id: String,
message: String, message: String,
provider_config: ProviderConfig, provider_config: ProviderConfig,
system_prompt: Option<String>,
app_handle: tauri::AppHandle, app_handle: tauri::AppHandle,
state: State<'_, AppState>, state: State<'_, AppState>,
) -> Result<ChatResponse, String> { ) -> Result<ChatResponse, String> {
@ -233,21 +232,7 @@ pub async fn chat_message(
// Search integration sources for relevant context // Search integration sources for relevant context
let integration_context = search_integration_sources(&message, &app_handle, &state).await; let integration_context = search_integration_sources(&message, &app_handle, &state).await;
let mut messages = Vec::new(); let mut messages = history;
// Inject domain system prompt if provided
if let Some(ref prompt) = system_prompt {
if !prompt.is_empty() {
messages.push(Message {
role: "system".into(),
content: prompt.clone(),
tool_call_id: None,
tool_calls: None,
});
}
}
messages.extend(history);
// If we found integration content, add it to the conversation context // If we found integration content, add it to the conversation context
if !integration_context.is_empty() { if !integration_context.is_empty() {

View File

@ -97,77 +97,6 @@ pub async fn upload_log_file(
Ok(log_file) Ok(log_file)
} }
#[tauri::command]
pub async fn upload_log_file_by_content(
issue_id: String,
file_name: String,
content: String,
state: State<'_, AppState>,
) -> Result<LogFile, String> {
let content_bytes = content.as_bytes();
let content_hash = format!("{:x}", Sha256::digest(content_bytes));
let file_size = content_bytes.len() as i64;
// Determine mime type based on file extension
let mime_type = if file_name.ends_with(".json") {
"application/json"
} else if file_name.ends_with(".xml") {
"application/xml"
} else {
"text/plain"
};
// Use the file_name as the file_path for DB storage
let log_file = LogFile::new(
issue_id.clone(),
file_name.clone(),
file_name.clone(),
file_size,
);
let log_file = LogFile {
content_hash: content_hash.clone(),
mime_type: mime_type.to_string(),
..log_file
};
let db = state.db.lock().map_err(|e| e.to_string())?;
db.execute(
"INSERT INTO log_files (id, issue_id, file_name, file_path, file_size, mime_type, content_hash, uploaded_at, redacted) \
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)",
rusqlite::params![
log_file.id,
log_file.issue_id,
log_file.file_name,
log_file.file_path,
log_file.file_size,
log_file.mime_type,
log_file.content_hash,
log_file.uploaded_at,
log_file.redacted as i32,
],
)
.map_err(|_| "Failed to store uploaded log metadata".to_string())?;
// Audit
let entry = AuditEntry::new(
"upload_log_file".to_string(),
"log_file".to_string(),
log_file.id.clone(),
serde_json::json!({ "issue_id": issue_id, "file_name": log_file.file_name }).to_string(),
);
if let Err(err) = crate::audit::log::write_audit_event(
&db,
&entry.action,
&entry.entity_type,
&entry.entity_id,
&entry.details,
) {
warn!(error = %err, "failed to write upload_log_file audit entry");
}
Ok(log_file)
}
#[tauri::command] #[tauri::command]
pub async fn detect_pii( pub async fn detect_pii(
log_file_id: String, log_file_id: String,

View File

@ -2,7 +2,7 @@ use tauri::State;
use crate::db::models::{ use crate::db::models::{
AiConversation, AiMessage, ImageAttachment, Issue, IssueDetail, IssueFilter, IssueSummary, AiConversation, AiMessage, ImageAttachment, Issue, IssueDetail, IssueFilter, IssueSummary,
IssueUpdate, LogFile, ResolutionStep, TimelineEvent, IssueUpdate, LogFile, ResolutionStep,
}; };
use crate::state::AppState; use crate::state::AppState;
@ -171,35 +171,12 @@ pub async fn get_issue(
.filter_map(|r| r.ok()) .filter_map(|r| r.ok())
.collect(); .collect();
// Load timeline events
let mut te_stmt = db
.prepare(
"SELECT id, issue_id, event_type, description, metadata, created_at \
FROM timeline_events WHERE issue_id = ?1 ORDER BY created_at ASC",
)
.map_err(|e| e.to_string())?;
let timeline_events: Vec<TimelineEvent> = te_stmt
.query_map([&issue_id], |row| {
Ok(TimelineEvent {
id: row.get(0)?,
issue_id: row.get(1)?,
event_type: row.get(2)?,
description: row.get(3)?,
metadata: row.get(4)?,
created_at: row.get(5)?,
})
})
.map_err(|e| e.to_string())?
.filter_map(|r| r.ok())
.collect();
Ok(IssueDetail { Ok(IssueDetail {
issue, issue,
log_files, log_files,
image_attachments, image_attachments,
resolution_steps, resolution_steps,
conversations, conversations,
timeline_events,
}) })
} }
@ -325,11 +302,6 @@ pub async fn delete_issue(issue_id: String, state: State<'_, AppState>) -> Resul
[&issue_id], [&issue_id],
) )
.map_err(|e| e.to_string())?; .map_err(|e| e.to_string())?;
db.execute(
"DELETE FROM timeline_events WHERE issue_id = ?1",
[&issue_id],
)
.map_err(|e| e.to_string())?;
db.execute("DELETE FROM issues WHERE id = ?1", [&issue_id]) db.execute("DELETE FROM issues WHERE id = ?1", [&issue_id])
.map_err(|e| e.to_string())?; .map_err(|e| e.to_string())?;
@ -533,105 +505,37 @@ pub async fn update_five_why(
Ok(()) Ok(())
} }
const VALID_EVENT_TYPES: &[&str] = &[
"triage_started",
"log_uploaded",
"why_level_advanced",
"root_cause_identified",
"rca_generated",
"postmortem_generated",
"document_exported",
];
#[tauri::command] #[tauri::command]
pub async fn add_timeline_event( pub async fn add_timeline_event(
issue_id: String, issue_id: String,
event_type: String, event_type: String,
description: String, description: String,
metadata: Option<String>,
state: State<'_, AppState>, state: State<'_, AppState>,
) -> Result<TimelineEvent, String> { ) -> Result<(), String> {
if !VALID_EVENT_TYPES.contains(&event_type.as_str()) { // Use audit_log for timeline tracking
return Err(format!("Invalid event_type: {event_type}")); let db = state.db.lock().map_err(|e| e.to_string())?;
} let entry = crate::db::models::AuditEntry::new(
event_type,
let meta = metadata.unwrap_or_else(|| "{}".to_string()); "issue".to_string(),
if meta.len() > 10240 {
return Err("metadata exceeds maximum size of 10KB".to_string());
}
serde_json::from_str::<serde_json::Value>(&meta)
.map_err(|_| "metadata must be valid JSON".to_string())?;
let event = TimelineEvent::new(
issue_id.clone(), issue_id.clone(),
event_type.clone(), serde_json::json!({ "description": description }).to_string(),
description.clone(),
meta,
); );
let mut db = state.db.lock().map_err(|e| e.to_string())?;
let tx = db.transaction().map_err(|e| e.to_string())?;
tx.execute(
"INSERT INTO timeline_events (id, issue_id, event_type, description, metadata, created_at) \
VALUES (?1, ?2, ?3, ?4, ?5, ?6)",
rusqlite::params![
event.id,
event.issue_id,
event.event_type,
event.description,
event.metadata,
event.created_at,
],
)
.map_err(|e| e.to_string())?;
crate::audit::log::write_audit_event( crate::audit::log::write_audit_event(
&tx, &db,
&event_type, &entry.action,
"issue", &entry.entity_type,
&issue_id, &entry.entity_id,
&serde_json::json!({ "description": description, "metadata": event.metadata }).to_string(), &entry.details,
) )
.map_err(|_| "Failed to write security audit entry".to_string())?; .map_err(|_| "Failed to write security audit entry".to_string())?;
// Update issue timestamp
let now = chrono::Utc::now().format("%Y-%m-%d %H:%M:%S").to_string(); let now = chrono::Utc::now().format("%Y-%m-%d %H:%M:%S").to_string();
tx.execute( db.execute(
"UPDATE issues SET updated_at = ?1 WHERE id = ?2", "UPDATE issues SET updated_at = ?1 WHERE id = ?2",
rusqlite::params![now, issue_id], rusqlite::params![now, issue_id],
) )
.map_err(|e| e.to_string())?; .map_err(|e| e.to_string())?;
tx.commit().map_err(|e| e.to_string())?; Ok(())
Ok(event)
}
#[tauri::command]
pub async fn get_timeline_events(
issue_id: String,
state: State<'_, AppState>,
) -> Result<Vec<TimelineEvent>, String> {
let db = state.db.lock().map_err(|e| e.to_string())?;
let mut stmt = db
.prepare(
"SELECT id, issue_id, event_type, description, metadata, created_at \
FROM timeline_events WHERE issue_id = ?1 ORDER BY created_at ASC",
)
.map_err(|e| e.to_string())?;
let events = stmt
.query_map([&issue_id], |row| {
Ok(TimelineEvent {
id: row.get(0)?,
issue_id: row.get(1)?,
event_type: row.get(2)?,
description: row.get(3)?,
metadata: row.get(4)?,
created_at: row.get(5)?,
})
})
.map_err(|e| e.to_string())?
.filter_map(|r| r.ok())
.collect();
Ok(events)
} }

View File

@ -8,13 +8,12 @@ use crate::db::models::{AuditEntry, ImageAttachment};
use crate::state::AppState; use crate::state::AppState;
const MAX_IMAGE_FILE_BYTES: u64 = 10 * 1024 * 1024; const MAX_IMAGE_FILE_BYTES: u64 = 10 * 1024 * 1024;
const SUPPORTED_IMAGE_MIME_TYPES: [&str; 6] = [ const SUPPORTED_IMAGE_MIME_TYPES: [&str; 5] = [
"image/png", "image/png",
"image/jpeg", "image/jpeg",
"image/gif", "image/gif",
"image/webp", "image/webp",
"image/svg+xml", "image/svg+xml",
"image/bmp",
]; ];
fn validate_image_file_path(file_path: &str) -> Result<std::path::PathBuf, String> { fn validate_image_file_path(file_path: &str) -> Result<std::path::PathBuf, String> {
@ -123,92 +122,6 @@ pub async fn upload_image_attachment(
Ok(attachment) Ok(attachment)
} }
#[tauri::command]
pub async fn upload_image_attachment_by_content(
issue_id: String,
file_name: String,
base64_content: String,
state: State<'_, AppState>,
) -> Result<ImageAttachment, String> {
let data_part = base64_content
.split(',')
.nth(1)
.ok_or("Invalid image data format - missing base64 content")?;
let decoded = base64::engine::general_purpose::STANDARD
.decode(data_part)
.map_err(|_| "Failed to decode base64 image data")?;
let content_hash = format!("{:x}", sha2::Sha256::digest(&decoded));
let file_size = decoded.len() as i64;
let mime_type: String = infer::get(&decoded)
.map(|m| m.mime_type().to_string())
.unwrap_or_else(|| "image/png".to_string());
if !is_supported_image_format(mime_type.as_str()) {
return Err(format!(
"Unsupported image format: {}. Supported formats: {}",
mime_type,
SUPPORTED_IMAGE_MIME_TYPES.join(", ")
));
}
// Use the file_name as file_path for DB storage
let attachment = ImageAttachment::new(
issue_id.clone(),
file_name.clone(),
file_name,
file_size,
mime_type,
content_hash.clone(),
true,
false,
);
let db = state.db.lock().map_err(|e| e.to_string())?;
db.execute(
"INSERT INTO image_attachments (id, issue_id, file_name, file_path, file_size, mime_type, upload_hash, uploaded_at, pii_warning_acknowledged, is_paste) \
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10)",
rusqlite::params![
attachment.id,
attachment.issue_id,
attachment.file_name,
attachment.file_path,
attachment.file_size,
attachment.mime_type,
attachment.upload_hash,
attachment.uploaded_at,
attachment.pii_warning_acknowledged as i32,
attachment.is_paste as i32,
],
)
.map_err(|_| "Failed to store uploaded image metadata".to_string())?;
let entry = AuditEntry::new(
"upload_image_attachment".to_string(),
"image_attachment".to_string(),
attachment.id.clone(),
serde_json::json!({
"issue_id": issue_id,
"file_name": attachment.file_name,
"is_paste": false,
})
.to_string(),
);
if let Err(err) = write_audit_event(
&db,
&entry.action,
&entry.entity_type,
&entry.entity_id,
&entry.details,
) {
tracing::warn!(error = %err, "failed to write upload_image_attachment audit entry");
}
Ok(attachment)
}
#[tauri::command] #[tauri::command]
pub async fn upload_paste_image( pub async fn upload_paste_image(
issue_id: String, issue_id: String,
@ -352,245 +265,6 @@ pub async fn delete_image_attachment(
Ok(()) Ok(())
} }
#[tauri::command]
pub async fn upload_file_to_datastore(
provider_config: serde_json::Value,
file_path: String,
_state: State<'_, AppState>,
) -> Result<String, String> {
use reqwest::multipart::Form;
let canonical_path = validate_image_file_path(&file_path)?;
let content =
std::fs::read(&canonical_path).map_err(|_| "Failed to read file for datastore upload")?;
let file_name = canonical_path
.file_name()
.and_then(|n| n.to_str())
.unwrap_or("unknown")
.to_string();
let _file_size = content.len() as i64;
// Extract API URL and auth header from provider config
let api_url = provider_config
.get("api_url")
.and_then(|v| v.as_str())
.ok_or("Provider config missing api_url")?
.to_string();
// Extract use_datastore_upload flag
let use_datastore = provider_config
.get("use_datastore_upload")
.and_then(|v| v.as_bool())
.unwrap_or(false);
if !use_datastore {
return Err("use_datastore_upload is not enabled for this provider".to_string());
}
// Get datastore ID from custom_endpoint_path (stored as datastore ID)
let datastore_id = provider_config
.get("custom_endpoint_path")
.and_then(|v| v.as_str())
.ok_or("Provider config missing datastore ID in custom_endpoint_path")?
.to_string();
// Build upload endpoint: POST /api/v2/upload/<DATASTORE-ID>
let api_url = api_url.trim_end_matches('/');
let upload_url = format!("{api_url}/upload/{datastore_id}");
// Read auth header and value
let auth_header = provider_config
.get("custom_auth_header")
.and_then(|v| v.as_str())
.unwrap_or("x-generic-api-key");
let auth_prefix = provider_config
.get("custom_auth_prefix")
.and_then(|v| v.as_str())
.unwrap_or("");
let api_key = provider_config
.get("api_key")
.and_then(|v| v.as_str())
.ok_or("Provider config missing api_key")?;
let auth_value = format!("{auth_prefix}{api_key}");
let client = reqwest::Client::new();
// Create multipart form
let part = reqwest::multipart::Part::bytes(content)
.file_name(file_name)
.mime_str("application/octet-stream")
.map_err(|e| format!("Failed to create multipart part: {e}"))?;
let form = Form::new().part("file", part);
let resp = client
.post(&upload_url)
.header(auth_header, auth_value)
.multipart(form)
.send()
.await
.map_err(|e| format!("Upload request failed: {e}"))?;
if !resp.status().is_success() {
let status = resp.status();
let text = resp
.text()
.await
.unwrap_or_else(|_| "unable to read response".to_string());
return Err(format!("Datastore upload error {status}: {text}"));
}
// Parse response to get file ID
let json = resp
.json::<serde_json::Value>()
.await
.map_err(|e| format!("Failed to parse upload response: {e}"))?;
// Response should have file_id or id field
let file_id = json
.get("file_id")
.or_else(|| json.get("id"))
.and_then(|v| v.as_str())
.ok_or_else(|| {
format!(
"Response missing file_id: {}",
serde_json::to_string_pretty(&json).unwrap_or_default()
)
})?
.to_string();
Ok(file_id)
}
/// Upload any file (not just images) to GenAI datastore
#[tauri::command]
pub async fn upload_file_to_datastore_any(
provider_config: serde_json::Value,
file_path: String,
_state: State<'_, AppState>,
) -> Result<String, String> {
use reqwest::multipart::Form;
// Validate file exists and is accessible
let path = Path::new(&file_path);
let canonical = std::fs::canonicalize(path).map_err(|_| "Unable to access selected file")?;
let metadata = std::fs::metadata(&canonical).map_err(|_| "Unable to read file metadata")?;
if !metadata.is_file() {
return Err("Selected path is not a file".to_string());
}
let content =
std::fs::read(&canonical).map_err(|_| "Failed to read file for datastore upload")?;
let file_name = canonical
.file_name()
.and_then(|n| n.to_str())
.unwrap_or("unknown")
.to_string();
let _file_size = content.len() as i64;
// Extract API URL and auth header from provider config
let api_url = provider_config
.get("api_url")
.and_then(|v| v.as_str())
.ok_or("Provider config missing api_url")?
.to_string();
// Extract use_datastore_upload flag
let use_datastore = provider_config
.get("use_datastore_upload")
.and_then(|v| v.as_bool())
.unwrap_or(false);
if !use_datastore {
return Err("use_datastore_upload is not enabled for this provider".to_string());
}
// Get datastore ID from custom_endpoint_path (stored as datastore ID)
let datastore_id = provider_config
.get("custom_endpoint_path")
.and_then(|v| v.as_str())
.ok_or("Provider config missing datastore ID in custom_endpoint_path")?
.to_string();
// Build upload endpoint: POST /api/v2/upload/<DATASTORE-ID>
let api_url = api_url.trim_end_matches('/');
let upload_url = format!("{api_url}/upload/{datastore_id}");
// Read auth header and value
let auth_header = provider_config
.get("custom_auth_header")
.and_then(|v| v.as_str())
.unwrap_or("x-generic-api-key");
let auth_prefix = provider_config
.get("custom_auth_prefix")
.and_then(|v| v.as_str())
.unwrap_or("");
let api_key = provider_config
.get("api_key")
.and_then(|v| v.as_str())
.ok_or("Provider config missing api_key")?;
let auth_value = format!("{auth_prefix}{api_key}");
let client = reqwest::Client::new();
// Create multipart form
let part = reqwest::multipart::Part::bytes(content)
.file_name(file_name)
.mime_str("application/octet-stream")
.map_err(|e| format!("Failed to create multipart part: {e}"))?;
let form = Form::new().part("file", part);
let resp = client
.post(&upload_url)
.header(auth_header, auth_value)
.multipart(form)
.send()
.await
.map_err(|e| format!("Upload request failed: {e}"))?;
if !resp.status().is_success() {
let status = resp.status();
let text = resp
.text()
.await
.unwrap_or_else(|_| "unable to read response".to_string());
return Err(format!("Datastore upload error {status}: {text}"));
}
// Parse response to get file ID
let json = resp
.json::<serde_json::Value>()
.await
.map_err(|e| format!("Failed to parse upload response: {e}"))?;
// Response should have file_id or id field
let file_id = json
.get("file_id")
.or_else(|| json.get("id"))
.and_then(|v| v.as_str())
.ok_or_else(|| {
format!(
"Response missing file_id: {}",
serde_json::to_string_pretty(&json).unwrap_or_default()
)
})?
.to_string();
Ok(file_id)
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
@ -602,7 +276,7 @@ mod tests {
assert!(is_supported_image_format("image/gif")); assert!(is_supported_image_format("image/gif"));
assert!(is_supported_image_format("image/webp")); assert!(is_supported_image_format("image/webp"));
assert!(is_supported_image_format("image/svg+xml")); assert!(is_supported_image_format("image/svg+xml"));
assert!(is_supported_image_format("image/bmp")); assert!(!is_supported_image_format("image/bmp"));
assert!(!is_supported_image_format("text/plain")); assert!(!is_supported_image_format("text/plain"));
} }
} }

View File

@ -4,7 +4,6 @@ use crate::ollama::{
OllamaStatus, OllamaStatus,
}; };
use crate::state::{AppSettings, AppState, ProviderConfig}; use crate::state::{AppSettings, AppState, ProviderConfig};
use std::env;
// --- Ollama commands --- // --- Ollama commands ---
@ -159,8 +158,8 @@ pub async fn save_ai_provider(
db.execute( db.execute(
"INSERT OR REPLACE INTO ai_providers "INSERT OR REPLACE INTO ai_providers
(id, name, provider_type, api_url, encrypted_api_key, model, max_tokens, temperature, (id, name, provider_type, api_url, encrypted_api_key, model, max_tokens, temperature,
custom_endpoint_path, custom_auth_header, custom_auth_prefix, api_format, user_id, use_datastore_upload, updated_at) custom_endpoint_path, custom_auth_header, custom_auth_prefix, api_format, user_id, updated_at)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, datetime('now'))", VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, datetime('now'))",
rusqlite::params![ rusqlite::params![
uuid::Uuid::now_v7().to_string(), uuid::Uuid::now_v7().to_string(),
provider.name, provider.name,
@ -175,7 +174,6 @@ pub async fn save_ai_provider(
provider.custom_auth_prefix, provider.custom_auth_prefix,
provider.api_format, provider.api_format,
provider.user_id, provider.user_id,
provider.use_datastore_upload,
], ],
) )
.map_err(|e| format!("Failed to save AI provider: {e}"))?; .map_err(|e| format!("Failed to save AI provider: {e}"))?;
@ -193,7 +191,7 @@ pub async fn load_ai_providers(
let mut stmt = db let mut stmt = db
.prepare( .prepare(
"SELECT name, provider_type, api_url, encrypted_api_key, model, max_tokens, temperature, "SELECT name, provider_type, api_url, encrypted_api_key, model, max_tokens, temperature,
custom_endpoint_path, custom_auth_header, custom_auth_prefix, api_format, user_id, use_datastore_upload custom_endpoint_path, custom_auth_header, custom_auth_prefix, api_format, user_id
FROM ai_providers FROM ai_providers
ORDER BY name", ORDER BY name",
) )
@ -216,7 +214,6 @@ pub async fn load_ai_providers(
row.get::<_, Option<String>>(9)?, // custom_auth_prefix row.get::<_, Option<String>>(9)?, // custom_auth_prefix
row.get::<_, Option<String>>(10)?, // api_format row.get::<_, Option<String>>(10)?, // api_format
row.get::<_, Option<String>>(11)?, // user_id row.get::<_, Option<String>>(11)?, // user_id
row.get::<_, Option<bool>>(12)?, // use_datastore_upload
)) ))
}) })
.map_err(|e| e.to_string())? .map_err(|e| e.to_string())?
@ -235,7 +232,6 @@ pub async fn load_ai_providers(
custom_auth_prefix, custom_auth_prefix,
api_format, api_format,
user_id, user_id,
use_datastore_upload,
)| { )| {
// Decrypt the API key // Decrypt the API key
let api_key = crate::integrations::auth::decrypt_token(&encrypted_key).ok()?; let api_key = crate::integrations::auth::decrypt_token(&encrypted_key).ok()?;
@ -254,7 +250,6 @@ pub async fn load_ai_providers(
api_format, api_format,
session_id: None, // Session IDs are not persisted session_id: None, // Session IDs are not persisted
user_id, user_id,
use_datastore_upload,
}) })
}, },
) )
@ -276,11 +271,3 @@ pub async fn delete_ai_provider(
Ok(()) Ok(())
} }
/// Get the application version from build-time environment
#[tauri::command]
pub async fn get_app_version() -> Result<String, String> {
env::var("APP_VERSION")
.or_else(|_| env::var("CARGO_PKG_VERSION"))
.map_err(|e| format!("Failed to get version: {e}"))
}

View File

@ -170,49 +170,6 @@ pub fn run_migrations(conn: &Connection) -> anyhow::Result<()> {
is_paste INTEGER NOT NULL DEFAULT 0 is_paste INTEGER NOT NULL DEFAULT 0
);", );",
), ),
(
"014_create_ai_providers",
"CREATE TABLE IF NOT EXISTS ai_providers (
id TEXT PRIMARY KEY,
name TEXT NOT NULL UNIQUE,
provider_type TEXT NOT NULL,
api_url TEXT NOT NULL,
encrypted_api_key TEXT NOT NULL,
model TEXT NOT NULL,
max_tokens INTEGER,
temperature REAL,
custom_endpoint_path TEXT,
custom_auth_header TEXT,
custom_auth_prefix TEXT,
api_format TEXT,
user_id TEXT,
use_datastore_upload INTEGER,
created_at TEXT NOT NULL DEFAULT (datetime('now')),
updated_at TEXT NOT NULL DEFAULT (datetime('now'))
);",
),
(
"015_add_use_datastore_upload",
"ALTER TABLE ai_providers ADD COLUMN use_datastore_upload INTEGER DEFAULT 0",
),
(
"016_add_created_at",
"ALTER TABLE ai_providers ADD COLUMN created_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%d %H:%M:%S', 'now'))",
),
(
"017_create_timeline_events",
"CREATE TABLE IF NOT EXISTS timeline_events (
id TEXT PRIMARY KEY,
issue_id TEXT NOT NULL,
event_type TEXT NOT NULL,
description TEXT NOT NULL DEFAULT '',
metadata TEXT NOT NULL DEFAULT '{}',
created_at TEXT NOT NULL,
FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE
);
CREATE INDEX idx_timeline_events_issue ON timeline_events(issue_id);
CREATE INDEX idx_timeline_events_time ON timeline_events(created_at);",
),
]; ];
for (name, sql) in migrations { for (name, sql) in migrations {
@ -223,27 +180,10 @@ pub fn run_migrations(conn: &Connection) -> anyhow::Result<()> {
if !already_applied { if !already_applied {
// FTS5 virtual table creation can be skipped if FTS5 is not compiled in // FTS5 virtual table creation can be skipped if FTS5 is not compiled in
// Also handle column-already-exists errors for migrations 015-016 if let Err(e) = conn.execute_batch(sql) {
if name.contains("fts") { if name.contains("fts") {
if let Err(e) = conn.execute_batch(sql) {
tracing::warn!("FTS5 not available, skipping: {e}"); tracing::warn!("FTS5 not available, skipping: {e}");
} } else {
} else if name.ends_with("_add_use_datastore_upload")
|| name.ends_with("_add_created_at")
{
// Use execute for ALTER TABLE (SQLite only allows one statement per command)
// Skip error if column already exists (SQLITE_ERROR with "duplicate column name")
if let Err(e) = conn.execute(sql, []) {
let err_str = e.to_string();
if err_str.contains("duplicate column name") {
tracing::info!("Column may already exist, skipping migration {name}: {e}");
} else {
return Err(e.into());
}
}
} else {
// Use execute_batch for other migrations (FTS5, CREATE TABLE, etc.)
if let Err(e) = conn.execute_batch(sql) {
return Err(e.into()); return Err(e.into());
} }
} }
@ -528,266 +468,4 @@ mod tests {
assert_eq!(mime_type, "image/png"); assert_eq!(mime_type, "image/png");
assert_eq!(is_paste, 0); assert_eq!(is_paste, 0);
} }
#[test]
fn test_create_ai_providers_table() {
let conn = setup_test_db();
let count: i64 = conn
.query_row(
"SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name='ai_providers'",
[],
|r| r.get(0),
)
.unwrap();
assert_eq!(count, 1);
let mut stmt = conn.prepare("PRAGMA table_info(ai_providers)").unwrap();
let columns: Vec<String> = stmt
.query_map([], |row| row.get::<_, String>(1))
.unwrap()
.collect::<Result<Vec<_>, _>>()
.unwrap();
assert!(columns.contains(&"id".to_string()));
assert!(columns.contains(&"name".to_string()));
assert!(columns.contains(&"provider_type".to_string()));
assert!(columns.contains(&"api_url".to_string()));
assert!(columns.contains(&"encrypted_api_key".to_string()));
assert!(columns.contains(&"model".to_string()));
assert!(columns.contains(&"max_tokens".to_string()));
assert!(columns.contains(&"temperature".to_string()));
assert!(columns.contains(&"custom_endpoint_path".to_string()));
assert!(columns.contains(&"custom_auth_header".to_string()));
assert!(columns.contains(&"custom_auth_prefix".to_string()));
assert!(columns.contains(&"api_format".to_string()));
assert!(columns.contains(&"user_id".to_string()));
assert!(columns.contains(&"use_datastore_upload".to_string()));
assert!(columns.contains(&"created_at".to_string()));
assert!(columns.contains(&"updated_at".to_string()));
}
#[test]
fn test_store_and_retrieve_ai_provider() {
let conn = setup_test_db();
conn.execute(
"INSERT INTO ai_providers (id, name, provider_type, api_url, encrypted_api_key, model)
VALUES (?1, ?2, ?3, ?4, ?5, ?6)",
rusqlite::params![
"test-provider-1",
"My OpenAI",
"openai",
"https://api.openai.com/v1",
"encrypted_key_123",
"gpt-4o"
],
)
.unwrap();
let (name, provider_type, api_url, encrypted_key, model): (String, String, String, String, String) = conn
.query_row(
"SELECT name, provider_type, api_url, encrypted_api_key, model FROM ai_providers WHERE name = ?1",
["My OpenAI"],
|r| Ok((r.get(0)?, r.get(1)?, r.get(2)?, r.get(3)?, r.get(4)?)),
)
.unwrap();
assert_eq!(name, "My OpenAI");
assert_eq!(provider_type, "openai");
assert_eq!(api_url, "https://api.openai.com/v1");
assert_eq!(encrypted_key, "encrypted_key_123");
assert_eq!(model, "gpt-4o");
}
#[test]
fn test_add_missing_columns_to_existing_table() {
let conn = Connection::open_in_memory().unwrap();
// Simulate existing table without use_datastore_upload and created_at
conn.execute_batch(
"CREATE TABLE IF NOT EXISTS ai_providers (
id TEXT PRIMARY KEY,
name TEXT NOT NULL UNIQUE,
provider_type TEXT NOT NULL,
api_url TEXT NOT NULL,
encrypted_api_key TEXT NOT NULL,
model TEXT NOT NULL,
max_tokens INTEGER,
temperature REAL,
custom_endpoint_path TEXT,
custom_auth_header TEXT,
custom_auth_prefix TEXT,
api_format TEXT,
user_id TEXT,
updated_at TEXT NOT NULL DEFAULT (datetime('now'))
);",
)
.unwrap();
// Verify columns BEFORE migration
let mut stmt = conn.prepare("PRAGMA table_info(ai_providers)").unwrap();
let columns: Vec<String> = stmt
.query_map([], |row| row.get::<_, String>(1))
.unwrap()
.collect::<Result<Vec<_>, _>>()
.unwrap();
assert!(columns.contains(&"name".to_string()));
assert!(columns.contains(&"model".to_string()));
assert!(!columns.contains(&"use_datastore_upload".to_string()));
assert!(!columns.contains(&"created_at".to_string()));
// Run migrations (should apply 015 to add missing columns)
run_migrations(&conn).unwrap();
// Verify columns AFTER migration
let mut stmt = conn.prepare("PRAGMA table_info(ai_providers)").unwrap();
let columns: Vec<String> = stmt
.query_map([], |row| row.get::<_, String>(1))
.unwrap()
.collect::<Result<Vec<_>, _>>()
.unwrap();
assert!(columns.contains(&"name".to_string()));
assert!(columns.contains(&"model".to_string()));
assert!(columns.contains(&"use_datastore_upload".to_string()));
assert!(columns.contains(&"created_at".to_string()));
// Verify data integrity - existing rows should have default values
conn.execute(
"INSERT INTO ai_providers (id, name, provider_type, api_url, encrypted_api_key, model)
VALUES (?, ?, ?, ?, ?, ?)",
rusqlite::params![
"test-provider-2",
"Test Provider",
"openai",
"https://api.example.com",
"encrypted_key_456",
"gpt-3.5-turbo"
],
)
.unwrap();
let (name, use_datastore_upload, created_at): (String, bool, String) = conn
.query_row(
"SELECT name, use_datastore_upload, created_at FROM ai_providers WHERE name = ?1",
["Test Provider"],
|r| Ok((r.get(0)?, r.get(1)?, r.get(2)?)),
)
.unwrap();
assert_eq!(name, "Test Provider");
assert!(!use_datastore_upload);
assert!(created_at.len() > 0);
}
#[test]
fn test_idempotent_add_missing_columns() {
let conn = Connection::open_in_memory().unwrap();
// Create table with both columns already present (simulating prior migration run)
conn.execute_batch(
"CREATE TABLE IF NOT EXISTS ai_providers (
id TEXT PRIMARY KEY,
name TEXT NOT NULL UNIQUE,
provider_type TEXT NOT NULL,
api_url TEXT NOT NULL,
encrypted_api_key TEXT NOT NULL,
model TEXT NOT NULL,
max_tokens INTEGER,
temperature REAL,
custom_endpoint_path TEXT,
custom_auth_header TEXT,
custom_auth_prefix TEXT,
api_format TEXT,
user_id TEXT,
use_datastore_upload INTEGER DEFAULT 0,
created_at TEXT NOT NULL DEFAULT (datetime('now')),
updated_at TEXT NOT NULL DEFAULT (datetime('now'))
);",
)
.unwrap();
// Should not fail even though columns already exist
run_migrations(&conn).unwrap();
}
#[test]
fn test_timeline_events_table_exists() {
let conn = setup_test_db();
let count: i64 = conn
.query_row(
"SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name='timeline_events'",
[],
|r| r.get(0),
)
.unwrap();
assert_eq!(count, 1);
let mut stmt = conn.prepare("PRAGMA table_info(timeline_events)").unwrap();
let columns: Vec<String> = stmt
.query_map([], |row| row.get::<_, String>(1))
.unwrap()
.collect::<Result<Vec<_>, _>>()
.unwrap();
assert!(columns.contains(&"id".to_string()));
assert!(columns.contains(&"issue_id".to_string()));
assert!(columns.contains(&"event_type".to_string()));
assert!(columns.contains(&"description".to_string()));
assert!(columns.contains(&"metadata".to_string()));
assert!(columns.contains(&"created_at".to_string()));
}
#[test]
fn test_timeline_events_cascade_delete() {
let conn = setup_test_db();
conn.execute("PRAGMA foreign_keys = ON", []).unwrap();
let now = chrono::Utc::now().format("%Y-%m-%d %H:%M:%S").to_string();
conn.execute(
"INSERT INTO issues (id, title, created_at, updated_at) VALUES (?1, ?2, ?3, ?4)",
rusqlite::params!["issue-1", "Test Issue", now, now],
)
.unwrap();
conn.execute(
"INSERT INTO timeline_events (id, issue_id, event_type, description, metadata, created_at) VALUES (?1, ?2, ?3, ?4, ?5, ?6)",
rusqlite::params!["te-1", "issue-1", "triage_started", "Started triage", "{}", "2025-01-15 10:00:00 UTC"],
)
.unwrap();
// Verify event exists
let count: i64 = conn
.query_row("SELECT COUNT(*) FROM timeline_events", [], |r| r.get(0))
.unwrap();
assert_eq!(count, 1);
// Delete issue — cascade should remove timeline event
conn.execute("DELETE FROM issues WHERE id = 'issue-1'", [])
.unwrap();
let count: i64 = conn
.query_row("SELECT COUNT(*) FROM timeline_events", [], |r| r.get(0))
.unwrap();
assert_eq!(count, 0);
}
#[test]
fn test_timeline_events_indexes() {
let conn = setup_test_db();
let mut stmt = conn
.prepare(
"SELECT name FROM sqlite_master WHERE type='index' AND tbl_name='timeline_events'",
)
.unwrap();
let indexes: Vec<String> = stmt
.query_map([], |row| row.get(0))
.unwrap()
.filter_map(|r| r.ok())
.collect();
assert!(indexes.contains(&"idx_timeline_events_issue".to_string()));
assert!(indexes.contains(&"idx_timeline_events_time".to_string()));
}
} }

View File

@ -47,7 +47,6 @@ pub struct IssueDetail {
pub image_attachments: Vec<ImageAttachment>, pub image_attachments: Vec<ImageAttachment>,
pub resolution_steps: Vec<ResolutionStep>, pub resolution_steps: Vec<ResolutionStep>,
pub conversations: Vec<AiConversation>, pub conversations: Vec<AiConversation>,
pub timeline_events: Vec<TimelineEvent>,
} }
/// Lightweight row returned by list/search commands. /// Lightweight row returned by list/search commands.
@ -122,31 +121,9 @@ pub struct FiveWhyEntry {
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TimelineEvent { pub struct TimelineEvent {
pub id: String, pub id: String,
pub issue_id: String,
pub event_type: String, pub event_type: String,
pub description: String, pub description: String,
pub metadata: String, pub created_at: i64,
pub created_at: String,
}
impl TimelineEvent {
pub fn new(
issue_id: String,
event_type: String,
description: String,
metadata: String,
) -> Self {
TimelineEvent {
id: Uuid::now_v7().to_string(),
issue_id,
event_type,
description,
metadata,
created_at: chrono::Utc::now()
.format("%Y-%m-%d %H:%M:%S UTC")
.to_string(),
}
}
} }
// ─── Log File ─────────────────────────────────────────────────────────────── // ─── Log File ───────────────────────────────────────────────────────────────

View File

@ -1,5 +1,4 @@
use crate::db::models::IssueDetail; use crate::db::models::IssueDetail;
use crate::docs::rca::{calculate_duration, format_event_type};
pub fn generate_postmortem_markdown(detail: &IssueDetail) -> String { pub fn generate_postmortem_markdown(detail: &IssueDetail) -> String {
let issue = &detail.issue; let issue = &detail.issue;
@ -52,16 +51,7 @@ pub fn generate_postmortem_markdown(detail: &IssueDetail) -> String {
// Impact // Impact
md.push_str("## Impact\n\n"); md.push_str("## Impact\n\n");
if detail.timeline_events.len() >= 2 { md.push_str("- **Duration:** _[How long did the incident last?]_\n");
let first = &detail.timeline_events[0].created_at;
let last = &detail.timeline_events[detail.timeline_events.len() - 1].created_at;
md.push_str(&format!(
"- **Duration:** {}\n",
calculate_duration(first, last)
));
} else {
md.push_str("- **Duration:** _[How long did the incident last?]_\n");
}
md.push_str("- **Users Affected:** _[Number/percentage of affected users]_\n"); md.push_str("- **Users Affected:** _[Number/percentage of affected users]_\n");
md.push_str("- **Revenue Impact:** _[Financial impact, if applicable]_\n"); md.push_str("- **Revenue Impact:** _[Financial impact, if applicable]_\n");
md.push_str("- **SLA Impact:** _[Were any SLAs breached?]_\n\n"); md.push_str("- **SLA Impact:** _[Were any SLAs breached?]_\n\n");
@ -77,19 +67,7 @@ pub fn generate_postmortem_markdown(detail: &IssueDetail) -> String {
if let Some(ref resolved) = issue.resolved_at { if let Some(ref resolved) = issue.resolved_at {
md.push_str(&format!("| {resolved} | Issue resolved |\n")); md.push_str(&format!("| {resolved} | Issue resolved |\n"));
} }
if detail.timeline_events.is_empty() { md.push_str("| _HH:MM_ | _[Add additional timeline events]_ |\n\n");
md.push_str("| _HH:MM_ | _[Add additional timeline events]_ |\n");
} else {
for event in &detail.timeline_events {
md.push_str(&format!(
"| {} | {} - {} |\n",
event.created_at,
format_event_type(&event.event_type),
event.description
));
}
}
md.push('\n');
// Root Cause Analysis // Root Cause Analysis
md.push_str("## Root Cause Analysis\n\n"); md.push_str("## Root Cause Analysis\n\n");
@ -136,19 +114,6 @@ pub fn generate_postmortem_markdown(detail: &IssueDetail) -> String {
// What Went Well // What Went Well
md.push_str("## What Went Well\n\n"); md.push_str("## What Went Well\n\n");
if !detail.resolution_steps.is_empty() {
md.push_str(&format!(
"- Systematic 5-whys analysis conducted ({} steps completed)\n",
detail.resolution_steps.len()
));
}
if detail
.timeline_events
.iter()
.any(|e| e.event_type == "root_cause_identified")
{
md.push_str("- Root cause was identified during triage\n");
}
md.push_str("- _[e.g., Quick detection through existing alerts]_\n"); md.push_str("- _[e.g., Quick detection through existing alerts]_\n");
md.push_str("- _[e.g., Effective cross-team collaboration]_\n"); md.push_str("- _[e.g., Effective cross-team collaboration]_\n");
md.push_str("- _[e.g., Smooth communication with stakeholders]_\n\n"); md.push_str("- _[e.g., Smooth communication with stakeholders]_\n\n");
@ -193,7 +158,7 @@ pub fn generate_postmortem_markdown(detail: &IssueDetail) -> String {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::db::models::{Issue, IssueDetail, ResolutionStep, TimelineEvent}; use crate::db::models::{Issue, IssueDetail, ResolutionStep};
fn make_test_detail() -> IssueDetail { fn make_test_detail() -> IssueDetail {
IssueDetail { IssueDetail {
@ -223,7 +188,6 @@ mod tests {
created_at: "2025-02-10 09:00:00".to_string(), created_at: "2025-02-10 09:00:00".to_string(),
}], }],
conversations: vec![], conversations: vec![],
timeline_events: vec![],
} }
} }
@ -282,76 +246,4 @@ mod tests {
assert!(md.contains("| Priority | Action | Owner | Due Date | Status |")); assert!(md.contains("| Priority | Action | Owner | Due Date | Status |"));
assert!(md.contains("| P0 |")); assert!(md.contains("| P0 |"));
} }
#[test]
fn test_postmortem_timeline_with_real_events() {
let mut detail = make_test_detail();
detail.timeline_events = vec![
TimelineEvent {
id: "te-1".to_string(),
issue_id: "pm-456".to_string(),
event_type: "triage_started".to_string(),
description: "Triage initiated".to_string(),
metadata: "{}".to_string(),
created_at: "2025-02-10 08:05:00 UTC".to_string(),
},
TimelineEvent {
id: "te-2".to_string(),
issue_id: "pm-456".to_string(),
event_type: "root_cause_identified".to_string(),
description: "Certificate expiry confirmed".to_string(),
metadata: "{}".to_string(),
created_at: "2025-02-10 10:30:00 UTC".to_string(),
},
];
let md = generate_postmortem_markdown(&detail);
assert!(md.contains("## Timeline"));
assert!(md.contains("| 2025-02-10 08:05:00 UTC | Triage Started - Triage initiated |"));
assert!(md.contains(
"| 2025-02-10 10:30:00 UTC | Root Cause Identified - Certificate expiry confirmed |"
));
assert!(!md.contains("_[Add additional timeline events]_"));
}
#[test]
fn test_postmortem_impact_with_duration() {
let mut detail = make_test_detail();
detail.timeline_events = vec![
TimelineEvent {
id: "te-1".to_string(),
issue_id: "pm-456".to_string(),
event_type: "triage_started".to_string(),
description: "Triage initiated".to_string(),
metadata: "{}".to_string(),
created_at: "2025-02-10 08:00:00 UTC".to_string(),
},
TimelineEvent {
id: "te-2".to_string(),
issue_id: "pm-456".to_string(),
event_type: "root_cause_identified".to_string(),
description: "Found it".to_string(),
metadata: "{}".to_string(),
created_at: "2025-02-10 10:30:00 UTC".to_string(),
},
];
let md = generate_postmortem_markdown(&detail);
assert!(md.contains("**Duration:** 2h 30m"));
assert!(!md.contains("_[How long did the incident last?]_"));
}
#[test]
fn test_postmortem_what_went_well_with_steps() {
let mut detail = make_test_detail();
detail.timeline_events = vec![TimelineEvent {
id: "te-1".to_string(),
issue_id: "pm-456".to_string(),
event_type: "root_cause_identified".to_string(),
description: "Root cause found".to_string(),
metadata: "{}".to_string(),
created_at: "2025-02-10 10:00:00 UTC".to_string(),
}];
let md = generate_postmortem_markdown(&detail);
assert!(md.contains("Systematic 5-whys analysis conducted (1 steps completed)"));
assert!(md.contains("Root cause was identified during triage"));
}
} }

View File

@ -1,48 +1,5 @@
use crate::db::models::IssueDetail; use crate::db::models::IssueDetail;
pub fn format_event_type(event_type: &str) -> &str {
match event_type {
"triage_started" => "Triage Started",
"log_uploaded" => "Log File Uploaded",
"why_level_advanced" => "Why Level Advanced",
"root_cause_identified" => "Root Cause Identified",
"rca_generated" => "RCA Document Generated",
"postmortem_generated" => "Post-Mortem Generated",
"document_exported" => "Document Exported",
other => other,
}
}
pub fn calculate_duration(start: &str, end: &str) -> String {
let fmt = "%Y-%m-%d %H:%M:%S UTC";
let start_dt = match chrono::NaiveDateTime::parse_from_str(start, fmt) {
Ok(dt) => dt,
Err(_) => return "N/A".to_string(),
};
let end_dt = match chrono::NaiveDateTime::parse_from_str(end, fmt) {
Ok(dt) => dt,
Err(_) => return "N/A".to_string(),
};
let duration = end_dt.signed_duration_since(start_dt);
let total_minutes = duration.num_minutes();
if total_minutes < 0 {
return "N/A".to_string();
}
let days = total_minutes / (24 * 60);
let hours = (total_minutes % (24 * 60)) / 60;
let minutes = total_minutes % 60;
if days > 0 {
format!("{days}d {hours}h")
} else if hours > 0 {
format!("{hours}h {minutes}m")
} else {
format!("{minutes}m")
}
}
pub fn generate_rca_markdown(detail: &IssueDetail) -> String { pub fn generate_rca_markdown(detail: &IssueDetail) -> String {
let issue = &detail.issue; let issue = &detail.issue;
@ -100,52 +57,6 @@ pub fn generate_rca_markdown(detail: &IssueDetail) -> String {
md.push_str("\n\n"); md.push_str("\n\n");
} }
// Incident Timeline
md.push_str("## Incident Timeline\n\n");
if detail.timeline_events.is_empty() {
md.push_str("_No timeline events recorded._\n\n");
} else {
md.push_str("| Time (UTC) | Event | Description |\n");
md.push_str("|------------|-------|-------------|\n");
for event in &detail.timeline_events {
md.push_str(&format!(
"| {} | {} | {} |\n",
event.created_at,
format_event_type(&event.event_type),
event.description
));
}
md.push('\n');
}
// Incident Metrics
md.push_str("## Incident Metrics\n\n");
md.push_str(&format!(
"- **Total Events:** {}\n",
detail.timeline_events.len()
));
if detail.timeline_events.len() >= 2 {
let first = &detail.timeline_events[0].created_at;
let last = &detail.timeline_events[detail.timeline_events.len() - 1].created_at;
md.push_str(&format!(
"- **Incident Duration:** {}\n",
calculate_duration(first, last)
));
} else {
md.push_str("- **Incident Duration:** N/A\n");
}
let root_cause_event = detail
.timeline_events
.iter()
.find(|e| e.event_type == "root_cause_identified");
if let (Some(first), Some(rc)) = (detail.timeline_events.first(), root_cause_event) {
md.push_str(&format!(
"- **Time to Root Cause:** {}\n",
calculate_duration(&first.created_at, &rc.created_at)
));
}
md.push('\n');
// 5 Whys Analysis // 5 Whys Analysis
md.push_str("## 5 Whys Analysis\n\n"); md.push_str("## 5 Whys Analysis\n\n");
if detail.resolution_steps.is_empty() { if detail.resolution_steps.is_empty() {
@ -232,7 +143,7 @@ pub fn generate_rca_markdown(detail: &IssueDetail) -> String {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::db::models::{Issue, IssueDetail, LogFile, ResolutionStep, TimelineEvent}; use crate::db::models::{Issue, IssueDetail, LogFile, ResolutionStep};
fn make_test_detail() -> IssueDetail { fn make_test_detail() -> IssueDetail {
IssueDetail { IssueDetail {
@ -283,7 +194,6 @@ mod tests {
}, },
], ],
conversations: vec![], conversations: vec![],
timeline_events: vec![],
} }
} }
@ -337,135 +247,4 @@ mod tests {
let md = generate_rca_markdown(&detail); let md = generate_rca_markdown(&detail);
assert!(md.contains("Unassigned")); assert!(md.contains("Unassigned"));
} }
#[test]
fn test_rca_timeline_section_with_events() {
let mut detail = make_test_detail();
detail.timeline_events = vec![
TimelineEvent {
id: "te-1".to_string(),
issue_id: "test-123".to_string(),
event_type: "triage_started".to_string(),
description: "Triage initiated by oncall".to_string(),
metadata: "{}".to_string(),
created_at: "2025-01-15 10:00:00 UTC".to_string(),
},
TimelineEvent {
id: "te-2".to_string(),
issue_id: "test-123".to_string(),
event_type: "log_uploaded".to_string(),
description: "app.log uploaded".to_string(),
metadata: "{}".to_string(),
created_at: "2025-01-15 10:30:00 UTC".to_string(),
},
TimelineEvent {
id: "te-3".to_string(),
issue_id: "test-123".to_string(),
event_type: "root_cause_identified".to_string(),
description: "Connection pool leak found".to_string(),
metadata: "{}".to_string(),
created_at: "2025-01-15 12:15:00 UTC".to_string(),
},
];
let md = generate_rca_markdown(&detail);
assert!(md.contains("## Incident Timeline"));
assert!(md.contains("| Time (UTC) | Event | Description |"));
assert!(md
.contains("| 2025-01-15 10:00:00 UTC | Triage Started | Triage initiated by oncall |"));
assert!(md.contains("| 2025-01-15 10:30:00 UTC | Log File Uploaded | app.log uploaded |"));
assert!(md.contains(
"| 2025-01-15 12:15:00 UTC | Root Cause Identified | Connection pool leak found |"
));
}
#[test]
fn test_rca_timeline_section_empty() {
let detail = make_test_detail();
let md = generate_rca_markdown(&detail);
assert!(md.contains("## Incident Timeline"));
assert!(md.contains("_No timeline events recorded._"));
}
#[test]
fn test_rca_metrics_section() {
let mut detail = make_test_detail();
detail.timeline_events = vec![
TimelineEvent {
id: "te-1".to_string(),
issue_id: "test-123".to_string(),
event_type: "triage_started".to_string(),
description: "Triage started".to_string(),
metadata: "{}".to_string(),
created_at: "2025-01-15 10:00:00 UTC".to_string(),
},
TimelineEvent {
id: "te-2".to_string(),
issue_id: "test-123".to_string(),
event_type: "root_cause_identified".to_string(),
description: "Root cause found".to_string(),
metadata: "{}".to_string(),
created_at: "2025-01-15 12:15:00 UTC".to_string(),
},
];
let md = generate_rca_markdown(&detail);
assert!(md.contains("## Incident Metrics"));
assert!(md.contains("**Total Events:** 2"));
assert!(md.contains("**Incident Duration:** 2h 15m"));
assert!(md.contains("**Time to Root Cause:** 2h 15m"));
}
#[test]
fn test_calculate_duration_hours_minutes() {
assert_eq!(
calculate_duration("2025-01-15 10:00:00 UTC", "2025-01-15 12:15:00 UTC"),
"2h 15m"
);
}
#[test]
fn test_calculate_duration_days() {
assert_eq!(
calculate_duration("2025-01-15 10:00:00 UTC", "2025-01-18 11:00:00 UTC"),
"3d 1h"
);
}
#[test]
fn test_calculate_duration_minutes_only() {
assert_eq!(
calculate_duration("2025-01-15 10:00:00 UTC", "2025-01-15 10:45:00 UTC"),
"45m"
);
}
#[test]
fn test_calculate_duration_invalid() {
assert_eq!(calculate_duration("bad-date", "also-bad"), "N/A");
}
#[test]
fn test_format_event_type_known() {
assert_eq!(format_event_type("triage_started"), "Triage Started");
assert_eq!(format_event_type("log_uploaded"), "Log File Uploaded");
assert_eq!(
format_event_type("why_level_advanced"),
"Why Level Advanced"
);
assert_eq!(
format_event_type("root_cause_identified"),
"Root Cause Identified"
);
assert_eq!(format_event_type("rca_generated"), "RCA Document Generated");
assert_eq!(
format_event_type("postmortem_generated"),
"Post-Mortem Generated"
);
assert_eq!(format_event_type("document_exported"), "Document Exported");
}
#[test]
fn test_format_event_type_unknown() {
assert_eq!(format_event_type("custom_event"), "custom_event");
assert_eq!(format_event_type(""), "");
}
} }

View File

@ -629,10 +629,11 @@ mod tests {
#[test] #[test]
fn test_derive_aes_key_is_stable_for_same_input() { fn test_derive_aes_key_is_stable_for_same_input() {
// Use deterministic helper to avoid env var race conditions in parallel tests std::env::set_var("TFTSR_ENCRYPTION_KEY", "stable-test-key");
let k1 = derive_aes_key_from_str("stable-test-key").unwrap(); let k1 = derive_aes_key().unwrap();
let k2 = derive_aes_key_from_str("stable-test-key").unwrap(); let k2 = derive_aes_key().unwrap();
assert_eq!(k1, k2); assert_eq!(k1, k2);
std::env::remove_var("TFTSR_ENCRYPTION_KEY");
} }
// Test helper functions that accept key directly (bypass env var) // Test helper functions that accept key directly (bypass env var)

View File

@ -1,40 +1,4 @@
use super::confluence_search::SearchResult; use super::confluence_search::SearchResult;
use crate::integrations::query_expansion::expand_query;
const MAX_EXPANDED_QUERIES: usize = 3;
fn escape_wiql(s: &str) -> String {
s.replace('\'', "''")
.replace('"', "\\\"")
.replace('\\', "\\\\")
.replace('(', "\\(")
.replace(')', "\\)")
.replace(';', "\\;")
.replace('=', "\\=")
}
/// Basic HTML tag stripping to prevent XSS in excerpts
fn strip_html_tags(html: &str) -> String {
let mut result = String::new();
let mut in_tag = false;
for ch in html.chars() {
match ch {
'<' => in_tag = true,
'>' => in_tag = false,
_ if !in_tag => result.push(ch),
_ => {}
}
}
// Clean up whitespace
result
.split_whitespace()
.collect::<Vec<_>>()
.join(" ")
.trim()
.to_string()
}
/// Search Azure DevOps Wiki for content matching the query /// Search Azure DevOps Wiki for content matching the query
pub async fn search_wiki( pub async fn search_wiki(
@ -46,94 +10,90 @@ pub async fn search_wiki(
let cookie_header = crate::integrations::webview_auth::cookies_to_header(cookies); let cookie_header = crate::integrations::webview_auth::cookies_to_header(cookies);
let client = reqwest::Client::new(); let client = reqwest::Client::new();
let expanded_queries = expand_query(query); // Use Azure DevOps Search API
let search_url = format!(
"{}/_apis/search/wikisearchresults?api-version=7.0",
org_url.trim_end_matches('/')
);
let mut all_results = Vec::new(); let search_body = serde_json::json!({
"searchText": query,
for expanded_query in expanded_queries.iter().take(MAX_EXPANDED_QUERIES) { "$top": 5,
// Use Azure DevOps Search API "filters": {
let search_url = format!( "ProjectFilters": [project]
"{}/_apis/search/wikisearchresults?api-version=7.0",
org_url.trim_end_matches('/')
);
let search_body = serde_json::json!({
"searchText": expanded_query,
"$top": 5,
"filters": {
"ProjectFilters": [project]
}
});
tracing::info!("Searching Azure DevOps Wiki with query: {}", expanded_query);
let resp = client
.post(&search_url)
.header("Cookie", &cookie_header)
.header("Accept", "application/json")
.header("Content-Type", "application/json")
.json(&search_body)
.send()
.await
.map_err(|e| format!("Azure DevOps wiki search failed: {e}"))?;
if !resp.status().is_success() {
let status = resp.status();
let text = resp.text().await.unwrap_or_default();
tracing::warn!("Azure DevOps wiki search failed with status {status}: {text}");
continue;
} }
});
let json: serde_json::Value = resp tracing::info!("Searching Azure DevOps Wiki: {}", search_url);
.json()
.await
.map_err(|e| format!("Failed to parse ADO wiki search response: {e}"))?;
if let Some(results_array) = json["results"].as_array() { let resp = client
for item in results_array.iter().take(MAX_EXPANDED_QUERIES) { .post(&search_url)
let title = item["fileName"].as_str().unwrap_or("Untitled").to_string(); .header("Cookie", &cookie_header)
.header("Accept", "application/json")
.header("Content-Type", "application/json")
.json(&search_body)
.send()
.await
.map_err(|e| format!("Azure DevOps wiki search failed: {e}"))?;
let path = item["path"].as_str().unwrap_or(""); if !resp.status().is_success() {
let url = format!( let status = resp.status();
"{}/_wiki/wikis/{}/{}", let text = resp.text().await.unwrap_or_default();
org_url.trim_end_matches('/'), return Err(format!(
project, "Azure DevOps wiki search failed with status {status}: {text}"
path ));
); }
let excerpt = strip_html_tags(item["content"].as_str().unwrap_or("")) let json: serde_json::Value = resp
.chars() .json()
.take(300) .await
.collect::<String>(); .map_err(|e| format!("Failed to parse ADO wiki search response: {e}"))?;
// Fetch full wiki page content let mut results = Vec::new();
let content = if let Some(wiki_id) = item["wiki"]["id"].as_str() {
if let Some(page_path) = item["path"].as_str() { if let Some(results_array) = json["results"].as_array() {
fetch_wiki_page(org_url, wiki_id, page_path, &cookie_header) for item in results_array.iter().take(3) {
.await let title = item["fileName"].as_str().unwrap_or("Untitled").to_string();
.ok()
} else { let path = item["path"].as_str().unwrap_or("");
None let url = format!(
} "{}/_wiki/wikis/{}/{}",
org_url.trim_end_matches('/'),
project,
path
);
let excerpt = item["content"]
.as_str()
.unwrap_or("")
.chars()
.take(300)
.collect::<String>();
// Fetch full wiki page content
let content = if let Some(wiki_id) = item["wiki"]["id"].as_str() {
if let Some(page_path) = item["path"].as_str() {
fetch_wiki_page(org_url, wiki_id, page_path, &cookie_header)
.await
.ok()
} else { } else {
None None
}; }
} else {
None
};
all_results.push(SearchResult { results.push(SearchResult {
title, title,
url, url,
excerpt, excerpt,
content, content,
source: "Azure DevOps".to_string(), source: "Azure DevOps".to_string(),
}); });
}
} }
} }
all_results.sort_by(|a, b| a.url.cmp(&b.url)); Ok(results)
all_results.dedup_by(|a, b| a.url == b.url);
Ok(all_results)
} }
/// Fetch full wiki page content /// Fetch full wiki page content
@ -191,68 +151,55 @@ pub async fn search_work_items(
let cookie_header = crate::integrations::webview_auth::cookies_to_header(cookies); let cookie_header = crate::integrations::webview_auth::cookies_to_header(cookies);
let client = reqwest::Client::new(); let client = reqwest::Client::new();
let expanded_queries = expand_query(query); // Use WIQL (Work Item Query Language)
let wiql_url = format!(
"{}/_apis/wit/wiql?api-version=7.0",
org_url.trim_end_matches('/')
);
let mut all_results = Vec::new(); let wiql_query = format!(
"SELECT [System.Id], [System.Title], [System.Description], [System.State] FROM WorkItems WHERE [System.TeamProject] = '{project}' AND ([System.Title] CONTAINS '{query}' OR [System.Description] CONTAINS '{query}') ORDER BY [System.ChangedDate] DESC"
);
for expanded_query in expanded_queries.iter().take(MAX_EXPANDED_QUERIES) { let wiql_body = serde_json::json!({
// Use WIQL (Work Item Query Language) "query": wiql_query
let wiql_url = format!( });
"{}/_apis/wit/wiql?api-version=7.0",
org_url.trim_end_matches('/')
);
let safe_query = escape_wiql(expanded_query); tracing::info!("Searching Azure DevOps work items");
let wiql_query = format!(
"SELECT [System.Id], [System.Title], [System.Description], [System.State] FROM WorkItems WHERE [System.TeamProject] = '{project}' AND ([System.Title] ~ '{safe_query}' OR [System.Description] ~ '{safe_query}') ORDER BY [System.ChangedDate] DESC"
);
let wiql_body = serde_json::json!({ let resp = client
"query": wiql_query .post(&wiql_url)
}); .header("Cookie", &cookie_header)
.header("Accept", "application/json")
.header("Content-Type", "application/json")
.json(&wiql_body)
.send()
.await
.map_err(|e| format!("ADO work item search failed: {e}"))?;
tracing::info!( if !resp.status().is_success() {
"Searching Azure DevOps work items with query: {}", return Ok(Vec::new()); // Don't fail if work item search fails
expanded_query }
);
let resp = client let json: serde_json::Value = resp
.post(&wiql_url) .json()
.header("Cookie", &cookie_header) .await
.header("Accept", "application/json") .map_err(|_| "Failed to parse work item response".to_string())?;
.header("Content-Type", "application/json")
.json(&wiql_body)
.send()
.await
.map_err(|e| format!("ADO work item search failed: {e}"))?;
if !resp.status().is_success() { let mut results = Vec::new();
continue; // Don't fail if work item search fails
}
let json: serde_json::Value = resp if let Some(work_items) = json["workItems"].as_array() {
.json() // Fetch details for top 3 work items
.await for item in work_items.iter().take(3) {
.map_err(|_| "Failed to parse work item response".to_string())?; if let Some(id) = item["id"].as_i64() {
if let Ok(work_item) = fetch_work_item_details(org_url, id, &cookie_header).await {
if let Some(work_items) = json["workItems"].as_array() { results.push(work_item);
// Fetch details for top 3 work items
for item in work_items.iter().take(MAX_EXPANDED_QUERIES) {
if let Some(id) = item["id"].as_i64() {
if let Ok(work_item) =
fetch_work_item_details(org_url, id, &cookie_header).await
{
all_results.push(work_item);
}
} }
} }
} }
} }
all_results.sort_by(|a, b| a.url.cmp(&b.url)); Ok(results)
all_results.dedup_by(|a, b| a.url == b.url);
Ok(all_results)
} }
/// Fetch work item details /// Fetch work item details
@ -316,53 +263,3 @@ async fn fetch_work_item_details(
source: "Azure DevOps".to_string(), source: "Azure DevOps".to_string(),
}) })
} }
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_escape_wiql_escapes_single_quotes() {
assert_eq!(escape_wiql("test'single"), "test''single");
}
#[test]
fn test_escape_wiql_escapes_double_quotes() {
assert_eq!(escape_wiql("test\"double"), "test\\\\\"double");
}
#[test]
fn test_escape_wiql_escapes_backslash() {
assert_eq!(escape_wiql("test\\backslash"), r#"test\\backslash"#);
}
#[test]
fn test_escape_wiql_escapes_parens() {
assert_eq!(escape_wiql("test(paren"), r#"test\(paren"#);
assert_eq!(escape_wiql("test)paren"), r#"test\)paren"#);
}
#[test]
fn test_escape_wiql_escapes_semicolon() {
assert_eq!(escape_wiql("test;semi"), r#"test\;semi"#);
}
#[test]
fn test_escape_wiql_escapes_equals() {
assert_eq!(escape_wiql("test=equal"), r#"test\=equal"#);
}
#[test]
fn test_escape_wiql_no_special_chars() {
assert_eq!(escape_wiql("simple query"), "simple query");
}
#[test]
fn test_strip_html_tags() {
let html = "<p>Hello <strong>world</strong>!</p>";
assert_eq!(strip_html_tags(html), "Hello world!");
let html2 = "<div><h1>Title</h1><p>Content</p></div>";
assert_eq!(strip_html_tags(html2), "TitleContent");
}
}

View File

@ -1,9 +1,4 @@
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use url::Url;
use super::query_expansion::expand_query;
const MAX_EXPANDED_QUERIES: usize = 3;
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SearchResult { pub struct SearchResult {
@ -11,36 +6,10 @@ pub struct SearchResult {
pub url: String, pub url: String,
pub excerpt: String, pub excerpt: String,
pub content: Option<String>, pub content: Option<String>,
pub source: String, pub source: String, // "confluence", "servicenow", "azuredevops"
}
fn canonicalize_url(url: &str) -> String {
Url::parse(url)
.ok()
.map(|u| {
let mut u = u.clone();
u.set_fragment(None);
u.set_query(None);
u.to_string()
})
.unwrap_or_else(|| url.to_string())
}
fn escape_cql(s: &str) -> String {
s.replace('"', "\\\"")
.replace(')', "\\)")
.replace('(', "\\(")
.replace('~', "\\~")
.replace('&', "\\&")
.replace('|', "\\|")
.replace('+', "\\+")
.replace('-', "\\-")
} }
/// Search Confluence for content matching the query /// Search Confluence for content matching the query
///
/// This function expands the user query with related terms, synonyms, and variations
/// to improve search coverage across Confluence spaces.
pub async fn search_confluence( pub async fn search_confluence(
base_url: &str, base_url: &str,
query: &str, query: &str,
@ -49,89 +18,86 @@ pub async fn search_confluence(
let cookie_header = crate::integrations::webview_auth::cookies_to_header(cookies); let cookie_header = crate::integrations::webview_auth::cookies_to_header(cookies);
let client = reqwest::Client::new(); let client = reqwest::Client::new();
let expanded_queries = expand_query(query); // Use Confluence CQL search
let search_url = format!(
"{}/rest/api/search?cql=text~\"{}\"&limit=5",
base_url.trim_end_matches('/'),
urlencoding::encode(query)
);
let mut all_results = Vec::new(); tracing::info!("Searching Confluence: {}", search_url);
for expanded_query in expanded_queries.iter().take(MAX_EXPANDED_QUERIES) { let resp = client
let safe_query = escape_cql(expanded_query); .get(&search_url)
let search_url = format!( .header("Cookie", &cookie_header)
"{}/rest/api/search?cql=text~\"{}\"&limit=5", .header("Accept", "application/json")
base_url.trim_end_matches('/'), .send()
urlencoding::encode(&safe_query) .await
); .map_err(|e| format!("Confluence search request failed: {e}"))?;
tracing::info!( if !resp.status().is_success() {
"Searching Confluence with expanded query: {}", let status = resp.status();
expanded_query let text = resp.text().await.unwrap_or_default();
); return Err(format!(
"Confluence search failed with status {status}: {text}"
));
}
let resp = client let json: serde_json::Value = resp
.get(&search_url) .json()
.header("Cookie", &cookie_header) .await
.header("Accept", "application/json") .map_err(|e| format!("Failed to parse Confluence search response: {e}"))?;
.send()
.await
.map_err(|e| format!("Confluence search request failed: {e}"))?;
if !resp.status().is_success() { let mut results = Vec::new();
let status = resp.status();
let text = resp.text().await.unwrap_or_default();
tracing::warn!("Confluence search failed with status {status}: {text}");
continue;
}
let json: serde_json::Value = resp if let Some(results_array) = json["results"].as_array() {
.json() for item in results_array.iter().take(3) {
.await // Take top 3 results
.map_err(|e| format!("Failed to parse Confluence search response: {e}"))?; let title = item["title"].as_str().unwrap_or("Untitled").to_string();
if let Some(results_array) = json["results"].as_array() { let id = item["content"]["id"].as_str();
for item in results_array.iter().take(MAX_EXPANDED_QUERIES) { let space_key = item["content"]["space"]["key"].as_str();
let title = item["title"].as_str().unwrap_or("Untitled").to_string();
let id = item["content"]["id"].as_str(); // Build URL
let space_key = item["content"]["space"]["key"].as_str(); let url = if let (Some(id_str), Some(space)) = (id, space_key) {
format!(
"{}/display/{}/{}",
base_url.trim_end_matches('/'),
space,
id_str
)
} else {
base_url.to_string()
};
let url = if let (Some(id_str), Some(space)) = (id, space_key) { // Get excerpt from search result
format!( let excerpt = item["excerpt"]
"{}/display/{}/{}", .as_str()
base_url.trim_end_matches('/'), .unwrap_or("")
space, .to_string()
id_str .replace("<span class=\"highlight\">", "")
) .replace("</span>", "");
} else {
base_url.to_string()
};
let excerpt = strip_html_tags(item["excerpt"].as_str().unwrap_or("")) // Fetch full page content
.chars() let content = if let Some(content_id) = id {
.take(300) fetch_page_content(base_url, content_id, &cookie_header)
.collect::<String>(); .await
.ok()
} else {
None
};
let content = if let Some(content_id) = id { results.push(SearchResult {
fetch_page_content(base_url, content_id, &cookie_header) title,
.await url,
.ok() excerpt,
} else { content,
None source: "Confluence".to_string(),
}; });
all_results.push(SearchResult {
title,
url,
excerpt,
content,
source: "Confluence".to_string(),
});
}
} }
} }
all_results.sort_by(|a, b| canonicalize_url(&a.url).cmp(&canonicalize_url(&b.url))); Ok(results)
all_results.dedup_by(|a, b| canonicalize_url(&a.url) == canonicalize_url(&b.url));
Ok(all_results)
} }
/// Fetch full content of a Confluence page /// Fetch full content of a Confluence page
@ -219,43 +185,4 @@ mod tests {
let html2 = "<div><h1>Title</h1><p>Content</p></div>"; let html2 = "<div><h1>Title</h1><p>Content</p></div>";
assert_eq!(strip_html_tags(html2), "TitleContent"); assert_eq!(strip_html_tags(html2), "TitleContent");
} }
#[test]
fn test_escape_cql_escapes_special_chars() {
assert_eq!(escape_cql("test\"quote"), r#"test\"quote"#);
assert_eq!(escape_cql("test(paren"), r#"test\(paren"#);
assert_eq!(escape_cql("test)paren"), r#"test\)paren"#);
assert_eq!(escape_cql("test~tilde"), r#"test\~tilde"#);
assert_eq!(escape_cql("test&and"), r#"test\&and"#);
assert_eq!(escape_cql("test|or"), r#"test\|or"#);
assert_eq!(escape_cql("test+plus"), r#"test\+plus"#);
assert_eq!(escape_cql("test-minus"), r#"test\-minus"#);
}
#[test]
fn test_escape_cql_no_special_chars() {
assert_eq!(escape_cql("simple query"), "simple query");
}
#[test]
fn test_canonicalize_url_removes_fragment() {
assert_eq!(
canonicalize_url("https://example.com/page#section"),
"https://example.com/page"
);
}
#[test]
fn test_canonicalize_url_removes_query() {
assert_eq!(
canonicalize_url("https://example.com/page?param=value"),
"https://example.com/page"
);
}
#[test]
fn test_canonicalize_url_handles_malformed() {
// Malformed URLs fall back to original
assert_eq!(canonicalize_url("not a url"), "not a url");
}
} }

View File

@ -4,7 +4,6 @@ pub mod azuredevops_search;
pub mod callback_server; pub mod callback_server;
pub mod confluence; pub mod confluence;
pub mod confluence_search; pub mod confluence_search;
pub mod query_expansion;
pub mod servicenow; pub mod servicenow;
pub mod servicenow_search; pub mod servicenow_search;
pub mod webview_auth; pub mod webview_auth;

View File

@ -1,290 +0,0 @@
/// Query expansion module for integration search
///
/// This module provides functionality to expand user queries with related terms,
/// synonyms, and variations to improve search results across integrations like
/// Confluence, ServiceNow, and Azure DevOps.
use std::collections::HashSet;
/// Product name synonyms for common product variations
/// Maps common abbreviations/variants to their full names for search expansion
fn get_product_synonyms(query: &str) -> Vec<String> {
let mut synonyms = Vec::new();
// VESTA NXT related synonyms
if query.to_lowercase().contains("vesta") || query.to_lowercase().contains("vnxt") {
synonyms.extend(vec![
"VESTA NXT".to_string(),
"Vesta NXT".to_string(),
"VNXT".to_string(),
"vnxt".to_string(),
"Vesta".to_string(),
"vesta".to_string(),
"VNX".to_string(),
"vnx".to_string(),
]);
}
// Version number patterns (e.g., 1.0.12, 1.1.9)
if query.contains('.') {
// Extract version-like patterns and add variations
let version_parts: Vec<&str> = query.split('.').collect();
if version_parts.len() >= 2 {
// Add variations without dots
let version_no_dots = version_parts.join("");
synonyms.push(version_no_dots);
// Add partial versions
if version_parts.len() >= 2 {
synonyms.push(version_parts[0..2].join("."));
}
if version_parts.len() >= 3 {
synonyms.push(version_parts[0..3].join("."));
}
}
}
// Common upgrade-related terms
if query.to_lowercase().contains("upgrade") || query.to_lowercase().contains("update") {
synonyms.extend(vec![
"upgrade".to_string(),
"update".to_string(),
"migration".to_string(),
"patch".to_string(),
"version".to_string(),
"install".to_string(),
"installation".to_string(),
]);
}
// Remove duplicates and empty strings
synonyms.sort();
synonyms.dedup();
synonyms.retain(|s| !s.is_empty());
synonyms
}
/// Expand a search query with related terms for better search coverage
///
/// This function takes a user query and expands it with:
/// - Product name synonyms (e.g., "VNXT" -> "VESTA NXT", "Vesta NXT")
/// - Version number variations
/// - Related terms based on query content
///
/// # Arguments
/// * `query` - The original user query
///
/// # Returns
/// A vector of query strings to search, with the original query first
/// followed by expanded variations. Returns empty only if input is empty or
/// whitespace-only. Otherwise, always returns at least the original query.
pub fn expand_query(query: &str) -> Vec<String> {
if query.trim().is_empty() {
return Vec::new();
}
let mut expanded = vec![query.to_string()];
// Get product synonyms
let product_synonyms = get_product_synonyms(query);
expanded.extend(product_synonyms);
// Extract keywords from query for additional expansion
let keywords = extract_keywords(query);
// Add keyword variations
for keyword in keywords.iter().take(5) {
if !expanded.contains(keyword) {
expanded.push(keyword.clone());
}
}
// Add common related terms based on query content
let query_lower = query.to_lowercase();
if query_lower.contains("confluence") || query_lower.contains("documentation") {
expanded.push("docs".to_string());
expanded.push("manual".to_string());
expanded.push("guide".to_string());
}
if query_lower.contains("deploy") || query_lower.contains("deployment") {
expanded.push("deploy".to_string());
expanded.push("deployment".to_string());
expanded.push("release".to_string());
expanded.push("build".to_string());
}
if query_lower.contains("kubernetes") || query_lower.contains("k8s") {
expanded.push("kubernetes".to_string());
expanded.push("k8s".to_string());
expanded.push("pod".to_string());
expanded.push("container".to_string());
}
// Remove duplicates and empty strings
expanded.sort();
expanded.dedup();
expanded.retain(|s| !s.is_empty());
expanded
}
/// Extract important keywords from a search query
///
/// This function removes stop words and extracts meaningful terms
/// for search expansion.
///
/// # Arguments
/// * `query` - The original user query
///
/// # Returns
/// A vector of extracted keywords
fn extract_keywords(query: &str) -> Vec<String> {
let stop_words: HashSet<&str> = [
"how", "do", "i", "the", "a", "an", "is", "are", "was", "were", "be", "been", "being",
"have", "has", "had", "having", "do", "does", "did", "doing", "will", "would", "should",
"could", "can", "may", "might", "must", "to", "from", "in", "on", "at", "by", "for",
"with", "about", "as", "of", "or", "and", "but", "not", "what", "when", "where", "which",
"who", "this", "that", "these", "those", "if", "then", "else", "for", "while", "until",
"against", "between", "into", "through", "during", "before", "after", "above", "below",
"up", "down", "out", "off", "over", "under", "again", "further", "then", "once", "here",
"there", "why", "where", "all", "any", "both", "each", "few", "more", "most", "other",
"some", "such", "no", "nor", "only", "own", "same", "so", "than", "too", "very", "can",
"just", "should", "now",
]
.into_iter()
.collect();
let mut keywords = Vec::new();
let mut remaining = query.to_string();
while !remaining.is_empty() {
// Skip leading whitespace
if remaining.starts_with(char::is_whitespace) {
remaining = remaining.trim_start().to_string();
continue;
}
// Try to extract version number (e.g., 1.0.12, 1.1.9)
if remaining.starts_with(|c: char| c.is_ascii_digit()) {
let mut end_pos = 0;
let mut dot_count = 0;
for (i, c) in remaining.chars().enumerate() {
if c.is_ascii_digit() {
end_pos = i + 1;
} else if c == '.' {
end_pos = i + 1;
dot_count += 1;
} else {
break;
}
}
// Only extract if we have at least 2 dots (e.g., 1.0.12)
if dot_count >= 2 && end_pos > 0 {
let version = remaining[..end_pos].to_string();
keywords.push(version.clone());
remaining = remaining[end_pos..].to_string();
continue;
}
}
// Find word boundary - split on whitespace or non-alphanumeric
let mut split_pos = remaining.len();
for (i, c) in remaining.chars().enumerate() {
if c.is_whitespace() || !c.is_alphanumeric() {
split_pos = i;
break;
}
}
// If split_pos is 0, the string starts with a non-alphanumeric character
// Skip it and continue
if split_pos == 0 {
remaining = remaining[1..].to_string();
continue;
}
let word = remaining[..split_pos].to_lowercase();
remaining = remaining[split_pos..].to_string();
// Skip empty words, single chars, and stop words
if word.is_empty() || word.len() < 2 || stop_words.contains(word.as_str()) {
continue;
}
// Add numeric words with 3+ digits
if word.chars().all(|c| c.is_ascii_digit()) && word.len() >= 3 {
keywords.push(word.clone());
continue;
}
// Add words with at least one alphabetic character
if word.chars().any(|c| c.is_alphabetic()) {
keywords.push(word.clone());
}
}
keywords.sort();
keywords.dedup();
keywords
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_expand_query_with_product_synonyms() {
let query = "upgrade vesta nxt to 1.1.9";
let expanded = expand_query(query);
// Should contain original query
assert!(expanded.contains(&query.to_string()));
// Should contain product synonyms
assert!(expanded
.iter()
.any(|s| s.contains("vnxt") || s.contains("vnxt")));
}
#[test]
fn test_expand_query_with_version_numbers() {
let query = "version 1.0.12";
let expanded = expand_query(query);
// Should contain original query
assert!(expanded.contains(&query.to_string()));
}
#[test]
fn test_extract_keywords() {
let query = "How do I upgrade VESTA NXT from 1.0.12 to 1.1.9?";
let keywords = extract_keywords(query);
assert!(keywords.contains(&"upgrade".to_string()));
assert!(keywords.contains(&"vesta".to_string()));
assert!(keywords.contains(&"nxt".to_string()));
assert!(keywords.contains(&"1.0.12".to_string()));
assert!(keywords.contains(&"1.1.9".to_string()));
}
#[test]
fn test_product_synonyms() {
let synonyms = get_product_synonyms("vesta nxt upgrade");
// Should contain VNXT synonym
assert!(synonyms
.iter()
.any(|s| s.contains("VNXT") || s.contains("vnxt")));
}
#[test]
fn test_empty_query() {
let expanded = expand_query("");
assert!(expanded.is_empty() || expanded.contains(&"".to_string()));
}
}

View File

@ -1,7 +1,4 @@
use super::confluence_search::SearchResult; use super::confluence_search::SearchResult;
use crate::integrations::query_expansion::expand_query;
const MAX_EXPANDED_QUERIES: usize = 3;
/// Search ServiceNow Knowledge Base for content matching the query /// Search ServiceNow Knowledge Base for content matching the query
pub async fn search_servicenow( pub async fn search_servicenow(
@ -12,88 +9,82 @@ pub async fn search_servicenow(
let cookie_header = crate::integrations::webview_auth::cookies_to_header(cookies); let cookie_header = crate::integrations::webview_auth::cookies_to_header(cookies);
let client = reqwest::Client::new(); let client = reqwest::Client::new();
let expanded_queries = expand_query(query); // Search Knowledge Base articles
let search_url = format!(
"{}/api/now/table/kb_knowledge?sysparm_query=textLIKE{}^ORshort_descriptionLIKE{}&sysparm_limit=5",
instance_url.trim_end_matches('/'),
urlencoding::encode(query),
urlencoding::encode(query)
);
let mut all_results = Vec::new(); tracing::info!("Searching ServiceNow: {}", search_url);
for expanded_query in expanded_queries.iter().take(MAX_EXPANDED_QUERIES) { let resp = client
// Search Knowledge Base articles .get(&search_url)
let search_url = format!( .header("Cookie", &cookie_header)
"{}/api/now/table/kb_knowledge?sysparm_query=textLIKE{}^ORshort_descriptionLIKE{}&sysparm_limit=5", .header("Accept", "application/json")
instance_url.trim_end_matches('/'), .send()
urlencoding::encode(expanded_query), .await
urlencoding::encode(expanded_query) .map_err(|e| format!("ServiceNow search request failed: {e}"))?;
);
tracing::info!("Searching ServiceNow with query: {}", expanded_query); if !resp.status().is_success() {
let status = resp.status();
let text = resp.text().await.unwrap_or_default();
return Err(format!(
"ServiceNow search failed with status {status}: {text}"
));
}
let resp = client let json: serde_json::Value = resp
.get(&search_url) .json()
.header("Cookie", &cookie_header) .await
.header("Accept", "application/json") .map_err(|e| format!("Failed to parse ServiceNow search response: {e}"))?;
.send()
.await
.map_err(|e| format!("ServiceNow search request failed: {e}"))?;
if !resp.status().is_success() { let mut results = Vec::new();
let status = resp.status();
let text = resp.text().await.unwrap_or_default();
tracing::warn!("ServiceNow search failed with status {status}: {text}");
continue;
}
let json: serde_json::Value = resp if let Some(result_array) = json["result"].as_array() {
.json() for item in result_array.iter().take(3) {
.await // Take top 3 results
.map_err(|e| format!("Failed to parse ServiceNow search response: {e}"))?; let title = item["short_description"]
.as_str()
.unwrap_or("Untitled")
.to_string();
if let Some(result_array) = json["result"].as_array() { let sys_id = item["sys_id"].as_str().unwrap_or("").to_string();
for item in result_array.iter().take(MAX_EXPANDED_QUERIES) {
// Take top 3 results
let title = item["short_description"]
.as_str()
.unwrap_or("Untitled")
.to_string();
let sys_id = item["sys_id"].as_str().unwrap_or("").to_string(); let url = format!(
"{}/kb_view.do?sysparm_article={}",
instance_url.trim_end_matches('/'),
sys_id
);
let url = format!( let excerpt = item["text"]
"{}/kb_view.do?sysparm_article={}", .as_str()
instance_url.trim_end_matches('/'), .unwrap_or("")
sys_id .chars()
); .take(300)
.collect::<String>();
let excerpt = item["text"] // Get full article content
.as_str() let content = item["text"].as_str().map(|text| {
.unwrap_or("") if text.len() > 3000 {
.chars() format!("{}...", &text[..3000])
.take(300) } else {
.collect::<String>(); text.to_string()
}
});
// Get full article content results.push(SearchResult {
let content = item["text"].as_str().map(|text| { title,
if text.len() > 3000 { url,
format!("{}...", &text[..3000]) excerpt,
} else { content,
text.to_string() source: "ServiceNow".to_string(),
} });
});
all_results.push(SearchResult {
title,
url,
excerpt,
content,
source: "ServiceNow".to_string(),
});
}
} }
} }
all_results.sort_by(|a, b| a.url.cmp(&b.url)); Ok(results)
all_results.dedup_by(|a, b| a.url == b.url);
Ok(all_results)
} }
/// Search ServiceNow Incidents for related issues /// Search ServiceNow Incidents for related issues
@ -105,78 +96,68 @@ pub async fn search_incidents(
let cookie_header = crate::integrations::webview_auth::cookies_to_header(cookies); let cookie_header = crate::integrations::webview_auth::cookies_to_header(cookies);
let client = reqwest::Client::new(); let client = reqwest::Client::new();
let expanded_queries = expand_query(query); // Search incidents
let search_url = format!(
"{}/api/now/table/incident?sysparm_query=short_descriptionLIKE{}^ORdescriptionLIKE{}&sysparm_limit=3&sysparm_display_value=true",
instance_url.trim_end_matches('/'),
urlencoding::encode(query),
urlencoding::encode(query)
);
let mut all_results = Vec::new(); tracing::info!("Searching ServiceNow incidents: {}", search_url);
for expanded_query in expanded_queries.iter().take(MAX_EXPANDED_QUERIES) { let resp = client
// Search incidents .get(&search_url)
let search_url = format!( .header("Cookie", &cookie_header)
"{}/api/now/table/incident?sysparm_query=short_descriptionLIKE{}^ORdescriptionLIKE{}&sysparm_limit=3&sysparm_display_value=true", .header("Accept", "application/json")
instance_url.trim_end_matches('/'), .send()
urlencoding::encode(expanded_query), .await
urlencoding::encode(expanded_query) .map_err(|e| format!("ServiceNow incident search failed: {e}"))?;
);
tracing::info!( if !resp.status().is_success() {
"Searching ServiceNow incidents with query: {}", return Ok(Vec::new()); // Don't fail if incident search fails
expanded_query }
);
let resp = client let json: serde_json::Value = resp
.get(&search_url) .json()
.header("Cookie", &cookie_header) .await
.header("Accept", "application/json") .map_err(|_| "Failed to parse incident response".to_string())?;
.send()
.await
.map_err(|e| format!("ServiceNow incident search failed: {e}"))?;
if !resp.status().is_success() { let mut results = Vec::new();
continue; // Don't fail if incident search fails
}
let json: serde_json::Value = resp if let Some(result_array) = json["result"].as_array() {
.json() for item in result_array.iter() {
.await let number = item["number"].as_str().unwrap_or("Unknown");
.map_err(|_| "Failed to parse incident response".to_string())?; let title = format!(
"Incident {}: {}",
number,
item["short_description"].as_str().unwrap_or("No title")
);
if let Some(result_array) = json["result"].as_array() { let sys_id = item["sys_id"].as_str().unwrap_or("");
for item in result_array.iter() { let url = format!(
let number = item["number"].as_str().unwrap_or("Unknown"); "{}/incident.do?sys_id={}",
let title = format!( instance_url.trim_end_matches('/'),
"Incident {}: {}", sys_id
number, );
item["short_description"].as_str().unwrap_or("No title")
);
let sys_id = item["sys_id"].as_str().unwrap_or(""); let description = item["description"].as_str().unwrap_or("").to_string();
let url = format!(
"{}/incident.do?sys_id={}",
instance_url.trim_end_matches('/'),
sys_id
);
let description = item["description"].as_str().unwrap_or("").to_string(); let resolution = item["close_notes"].as_str().unwrap_or("").to_string();
let resolution = item["close_notes"].as_str().unwrap_or("").to_string(); let content = format!("Description: {description}\nResolution: {resolution}");
let content = format!("Description: {description}\nResolution: {resolution}"); let excerpt = content.chars().take(200).collect::<String>();
let excerpt = content.chars().take(200).collect::<String>(); results.push(SearchResult {
title,
all_results.push(SearchResult { url,
title, excerpt,
url, content: Some(content),
excerpt, source: "ServiceNow".to_string(),
content: Some(content), });
source: "ServiceNow".to_string(),
});
}
} }
} }
all_results.sort_by(|a, b| a.url.cmp(&b.url)); Ok(results)
all_results.dedup_by(|a, b| a.url == b.url);
Ok(all_results)
} }

View File

@ -6,7 +6,6 @@ use serde_json::Value;
use tauri::WebviewWindow; use tauri::WebviewWindow;
use super::confluence_search::SearchResult; use super::confluence_search::SearchResult;
use crate::integrations::query_expansion::expand_query;
/// Execute an HTTP request from within the webview context /// Execute an HTTP request from within the webview context
/// This automatically includes all cookies (including HttpOnly) from the authenticated session /// This automatically includes all cookies (including HttpOnly) from the authenticated session
@ -124,113 +123,106 @@ pub async fn search_confluence_webview<R: tauri::Runtime>(
base_url: &str, base_url: &str,
query: &str, query: &str,
) -> Result<Vec<SearchResult>, String> { ) -> Result<Vec<SearchResult>, String> {
let expanded_queries = expand_query(query); // Extract keywords from the query for better search
// Remove common words and extract important terms
let keywords = extract_keywords(query);
let mut all_results = Vec::new(); // Build CQL query with OR logic for keywords
let cql = if keywords.len() > 1 {
// Multiple keywords - search for any of them
let keyword_conditions: Vec<String> =
keywords.iter().map(|k| format!("text ~ \"{k}\"")).collect();
keyword_conditions.join(" OR ")
} else if !keywords.is_empty() {
// Single keyword
let keyword = &keywords[0];
format!("text ~ \"{keyword}\"")
} else {
// Fallback to original query
format!("text ~ \"{query}\"")
};
for expanded_query in expanded_queries.iter().take(3) { let search_url = format!(
// Extract keywords from the query for better search "{}/rest/api/search?cql={}&limit=10",
// Remove common words and extract important terms base_url.trim_end_matches('/'),
let keywords = extract_keywords(expanded_query); urlencoding::encode(&cql)
);
// Build CQL query with OR logic for keywords tracing::info!("Executing Confluence search via webview with CQL: {}", cql);
let cql = if keywords.len() > 1 {
// Multiple keywords - search for any of them
let keyword_conditions: Vec<String> =
keywords.iter().map(|k| format!("text ~ \"{k}\"")).collect();
keyword_conditions.join(" OR ")
} else if !keywords.is_empty() {
// Single keyword
let keyword = &keywords[0];
format!("text ~ \"{keyword}\"")
} else {
// Fallback to expanded query
format!("text ~ \"{expanded_query}\"")
};
let search_url = format!( let response = fetch_from_webview(webview_window, &search_url, "GET", None).await?;
"{}/rest/api/search?cql={}&limit=10",
base_url.trim_end_matches('/'),
urlencoding::encode(&cql)
);
tracing::info!("Executing Confluence search via webview with CQL: {}", cql); let mut results = Vec::new();
let response = fetch_from_webview(webview_window, &search_url, "GET", None).await?; if let Some(results_array) = response.get("results").and_then(|v| v.as_array()) {
for item in results_array.iter().take(5) {
let title = item["title"].as_str().unwrap_or("Untitled").to_string();
let content_id = item["content"]["id"].as_str();
let space_key = item["content"]["space"]["key"].as_str();
if let Some(results_array) = response.get("results").and_then(|v| v.as_array()) { let url = if let (Some(id), Some(space)) = (content_id, space_key) {
for item in results_array.iter().take(5) { format!(
let title = item["title"].as_str().unwrap_or("Untitled").to_string(); "{}/display/{}/{}",
let content_id = item["content"]["id"].as_str(); base_url.trim_end_matches('/'),
let space_key = item["content"]["space"]["key"].as_str(); space,
id
)
} else {
base_url.to_string()
};
let url = if let (Some(id), Some(space)) = (content_id, space_key) { let excerpt = item["excerpt"]
format!( .as_str()
"{}/display/{}/{}", .unwrap_or("")
base_url.trim_end_matches('/'), .replace("<span class=\"highlight\">", "")
space, .replace("</span>", "");
id
)
} else {
base_url.to_string()
};
let excerpt = item["excerpt"] // Fetch full page content
.as_str() let content = if let Some(id) = content_id {
.unwrap_or("") let content_url = format!(
.replace("<span class=\"highlight\">", "") "{}/rest/api/content/{id}?expand=body.storage",
.replace("</span>", ""); base_url.trim_end_matches('/')
);
// Fetch full page content if let Ok(content_resp) =
let content = if let Some(id) = content_id { fetch_from_webview(webview_window, &content_url, "GET", None).await
let content_url = format!( {
"{}/rest/api/content/{id}?expand=body.storage", if let Some(body) = content_resp
base_url.trim_end_matches('/') .get("body")
); .and_then(|b| b.get("storage"))
if let Ok(content_resp) = .and_then(|s| s.get("value"))
fetch_from_webview(webview_window, &content_url, "GET", None).await .and_then(|v| v.as_str())
{ {
if let Some(body) = content_resp let text = strip_html_simple(body);
.get("body") Some(if text.len() > 3000 {
.and_then(|b| b.get("storage")) format!("{}...", &text[..3000])
.and_then(|s| s.get("value"))
.and_then(|v| v.as_str())
{
let text = strip_html_simple(body);
Some(if text.len() > 3000 {
format!("{}...", &text[..3000])
} else {
text
})
} else { } else {
None text
} })
} else { } else {
None None
} }
} else { } else {
None None
}; }
} else {
None
};
all_results.push(SearchResult { results.push(SearchResult {
title, title,
url, url,
excerpt: excerpt.chars().take(300).collect(), excerpt: excerpt.chars().take(300).collect(),
content, content,
source: "Confluence".to_string(), source: "Confluence".to_string(),
}); });
}
} }
} }
all_results.sort_by(|a, b| a.url.cmp(&b.url));
all_results.dedup_by(|a, b| a.url == b.url);
tracing::info!( tracing::info!(
"Confluence webview search returned {} results", "Confluence webview search returned {} results",
all_results.len() results.len()
); );
Ok(all_results) Ok(results)
} }
/// Extract keywords from a search query /// Extract keywords from a search query
@ -304,99 +296,92 @@ pub async fn search_servicenow_webview<R: tauri::Runtime>(
instance_url: &str, instance_url: &str,
query: &str, query: &str,
) -> Result<Vec<SearchResult>, String> { ) -> Result<Vec<SearchResult>, String> {
let expanded_queries = expand_query(query); let mut results = Vec::new();
let mut all_results = Vec::new(); // Search knowledge base
let kb_url = format!(
"{}/api/now/table/kb_knowledge?sysparm_query=textLIKE{}^ORshort_descriptionLIKE{}&sysparm_limit=3",
instance_url.trim_end_matches('/'),
urlencoding::encode(query),
urlencoding::encode(query)
);
for expanded_query in expanded_queries.iter().take(3) { tracing::info!("Executing ServiceNow KB search via webview");
// Search knowledge base
let kb_url = format!(
"{}/api/now/table/kb_knowledge?sysparm_query=textLIKE{}^ORshort_descriptionLIKE{}&sysparm_limit=3",
instance_url.trim_end_matches('/'),
urlencoding::encode(expanded_query),
urlencoding::encode(expanded_query)
);
tracing::info!("Executing ServiceNow KB search via webview with expanded query"); if let Ok(kb_response) = fetch_from_webview(webview_window, &kb_url, "GET", None).await {
if let Some(kb_array) = kb_response.get("result").and_then(|v| v.as_array()) {
for item in kb_array {
let title = item["short_description"]
.as_str()
.unwrap_or("Untitled")
.to_string();
let sys_id = item["sys_id"].as_str().unwrap_or("");
let url = format!(
"{}/kb_view.do?sysparm_article={sys_id}",
instance_url.trim_end_matches('/')
);
let text = item["text"].as_str().unwrap_or("");
let excerpt = text.chars().take(300).collect();
let content = Some(if text.len() > 3000 {
format!("{}...", &text[..3000])
} else {
text.to_string()
});
if let Ok(kb_response) = fetch_from_webview(webview_window, &kb_url, "GET", None).await { results.push(SearchResult {
if let Some(kb_array) = kb_response.get("result").and_then(|v| v.as_array()) { title,
for item in kb_array { url,
let title = item["short_description"] excerpt,
.as_str() content,
.unwrap_or("Untitled") source: "ServiceNow".to_string(),
.to_string(); });
let sys_id = item["sys_id"].as_str().unwrap_or("");
let url = format!(
"{}/kb_view.do?sysparm_article={sys_id}",
instance_url.trim_end_matches('/')
);
let text = item["text"].as_str().unwrap_or("");
let excerpt = text.chars().take(300).collect();
let content = Some(if text.len() > 3000 {
format!("{}...", &text[..3000])
} else {
text.to_string()
});
all_results.push(SearchResult {
title,
url,
excerpt,
content,
source: "ServiceNow".to_string(),
});
}
}
}
// Search incidents
let inc_url = format!(
"{}/api/now/table/incident?sysparm_query=short_descriptionLIKE{}^ORdescriptionLIKE{}&sysparm_limit=3&sysparm_display_value=true",
instance_url.trim_end_matches('/'),
urlencoding::encode(expanded_query),
urlencoding::encode(expanded_query)
);
if let Ok(inc_response) = fetch_from_webview(webview_window, &inc_url, "GET", None).await {
if let Some(inc_array) = inc_response.get("result").and_then(|v| v.as_array()) {
for item in inc_array {
let number = item["number"].as_str().unwrap_or("Unknown");
let title = format!(
"Incident {}: {}",
number,
item["short_description"].as_str().unwrap_or("No title")
);
let sys_id = item["sys_id"].as_str().unwrap_or("");
let url = format!(
"{}/incident.do?sys_id={sys_id}",
instance_url.trim_end_matches('/')
);
let description = item["description"].as_str().unwrap_or("");
let resolution = item["close_notes"].as_str().unwrap_or("");
let content = format!("Description: {description}\nResolution: {resolution}");
let excerpt = content.chars().take(200).collect();
all_results.push(SearchResult {
title,
url,
excerpt,
content: Some(content),
source: "ServiceNow".to_string(),
});
}
} }
} }
} }
all_results.sort_by(|a, b| a.url.cmp(&b.url)); // Search incidents
all_results.dedup_by(|a, b| a.url == b.url); let inc_url = format!(
"{}/api/now/table/incident?sysparm_query=short_descriptionLIKE{}^ORdescriptionLIKE{}&sysparm_limit=3&sysparm_display_value=true",
instance_url.trim_end_matches('/'),
urlencoding::encode(query),
urlencoding::encode(query)
);
if let Ok(inc_response) = fetch_from_webview(webview_window, &inc_url, "GET", None).await {
if let Some(inc_array) = inc_response.get("result").and_then(|v| v.as_array()) {
for item in inc_array {
let number = item["number"].as_str().unwrap_or("Unknown");
let title = format!(
"Incident {}: {}",
number,
item["short_description"].as_str().unwrap_or("No title")
);
let sys_id = item["sys_id"].as_str().unwrap_or("");
let url = format!(
"{}/incident.do?sys_id={sys_id}",
instance_url.trim_end_matches('/')
);
let description = item["description"].as_str().unwrap_or("");
let resolution = item["close_notes"].as_str().unwrap_or("");
let content = format!("Description: {description}\nResolution: {resolution}");
let excerpt = content.chars().take(200).collect();
results.push(SearchResult {
title,
url,
excerpt,
content: Some(content),
source: "ServiceNow".to_string(),
});
}
}
}
tracing::info!( tracing::info!(
"ServiceNow webview search returned {} results", "ServiceNow webview search returned {} results",
all_results.len() results.len()
); );
Ok(all_results) Ok(results)
} }
/// Search Azure DevOps wiki using webview fetch /// Search Azure DevOps wiki using webview fetch
@ -406,89 +391,82 @@ pub async fn search_azuredevops_wiki_webview<R: tauri::Runtime>(
project: &str, project: &str,
query: &str, query: &str,
) -> Result<Vec<SearchResult>, String> { ) -> Result<Vec<SearchResult>, String> {
let expanded_queries = expand_query(query); // Extract keywords for better search
let keywords = extract_keywords(query);
let mut all_results = Vec::new(); let search_text = if !keywords.is_empty() {
keywords.join(" ")
} else {
query.to_string()
};
for expanded_query in expanded_queries.iter().take(3) { // Azure DevOps wiki search API
// Extract keywords for better search let search_url = format!(
let keywords = extract_keywords(expanded_query); "{}/{}/_apis/wiki/wikis?api-version=7.0",
org_url.trim_end_matches('/'),
urlencoding::encode(project)
);
let search_text = if !keywords.is_empty() { tracing::info!(
keywords.join(" ") "Executing Azure DevOps wiki search via webview for: {}",
} else { search_text
expanded_query.clone() );
};
// Azure DevOps wiki search API // First, get list of wikis
let search_url = format!( let wikis_response = fetch_from_webview(webview_window, &search_url, "GET", None).await?;
"{}/{}/_apis/wiki/wikis?api-version=7.0",
org_url.trim_end_matches('/'),
urlencoding::encode(project)
);
tracing::info!( let mut results = Vec::new();
"Executing Azure DevOps wiki search via webview for: {}",
search_text
);
// First, get list of wikis if let Some(wikis_array) = wikis_response.get("value").and_then(|v| v.as_array()) {
let wikis_response = fetch_from_webview(webview_window, &search_url, "GET", None).await?; // Search each wiki
for wiki in wikis_array.iter().take(3) {
let wiki_id = wiki["id"].as_str().unwrap_or("");
if let Some(wikis_array) = wikis_response.get("value").and_then(|v| v.as_array()) { if wiki_id.is_empty() {
// Search each wiki continue;
for wiki in wikis_array.iter().take(3) { }
let wiki_id = wiki["id"].as_str().unwrap_or("");
if wiki_id.is_empty() { // Search wiki pages
continue; let pages_url = format!(
} "{}/{}/_apis/wiki/wikis/{}/pages?recursionLevel=Full&includeContent=true&api-version=7.0",
org_url.trim_end_matches('/'),
urlencoding::encode(project),
urlencoding::encode(wiki_id)
);
// Search wiki pages if let Ok(pages_response) =
let pages_url = format!( fetch_from_webview(webview_window, &pages_url, "GET", None).await
"{}/{}/_apis/wiki/wikis/{}/pages?recursionLevel=Full&includeContent=true&api-version=7.0", {
org_url.trim_end_matches('/'), // Try to get "page" field, or use the response itself if it's the page object
urlencoding::encode(project), if let Some(page) = pages_response.get("page") {
urlencoding::encode(wiki_id) search_page_recursive(
); page,
&search_text,
if let Ok(pages_response) = org_url,
fetch_from_webview(webview_window, &pages_url, "GET", None).await project,
{ wiki_id,
// Try to get "page" field, or use the response itself if it's the page object &mut results,
if let Some(page) = pages_response.get("page") { );
search_page_recursive( } else {
page, // Response might be the page object itself
&search_text, search_page_recursive(
org_url, &pages_response,
project, &search_text,
wiki_id, org_url,
&mut all_results, project,
); wiki_id,
} else { &mut results,
// Response might be the page object itself );
search_page_recursive(
&pages_response,
&search_text,
org_url,
project,
wiki_id,
&mut all_results,
);
}
} }
} }
} }
} }
all_results.sort_by(|a, b| a.url.cmp(&b.url));
all_results.dedup_by(|a, b| a.url == b.url);
tracing::info!( tracing::info!(
"Azure DevOps wiki webview search returned {} results", "Azure DevOps wiki webview search returned {} results",
all_results.len() results.len()
); );
Ok(all_results) Ok(results)
} }
/// Recursively search through wiki pages for matching content /// Recursively search through wiki pages for matching content
@ -566,124 +544,115 @@ pub async fn search_azuredevops_workitems_webview<R: tauri::Runtime>(
project: &str, project: &str,
query: &str, query: &str,
) -> Result<Vec<SearchResult>, String> { ) -> Result<Vec<SearchResult>, String> {
let expanded_queries = expand_query(query); // Extract keywords
let keywords = extract_keywords(query);
let mut all_results = Vec::new(); // Check if query contains a work item ID (pure number)
let work_item_id: Option<i64> = keywords
.iter()
.filter(|k| k.chars().all(|c| c.is_numeric()))
.filter_map(|k| k.parse::<i64>().ok())
.next();
for expanded_query in expanded_queries.iter().take(3) { // Build WIQL query
// Extract keywords let wiql_query = if let Some(id) = work_item_id {
let keywords = extract_keywords(expanded_query); // Search by specific ID
format!(
// Check if query contains a work item ID (pure number) "SELECT [System.Id], [System.Title], [System.Description], [System.WorkItemType] \
let work_item_id: Option<i64> = keywords FROM WorkItems WHERE [System.Id] = {id}"
.iter() )
.filter(|k| k.chars().all(|c| c.is_numeric())) } else {
.filter_map(|k| k.parse::<i64>().ok()) // Search by text in title/description
.next(); let search_terms = if !keywords.is_empty() {
keywords.join(" ")
// Build WIQL query
let wiql_query = if let Some(id) = work_item_id {
// Search by specific ID
format!(
"SELECT [System.Id], [System.Title], [System.Description], [System.WorkItemType] \
FROM WorkItems WHERE [System.Id] = {id}"
)
} else { } else {
// Search by text in title/description query.to_string()
let search_terms = if !keywords.is_empty() {
keywords.join(" ")
} else {
expanded_query.clone()
};
// Use CONTAINS for text search (case-insensitive)
format!(
"SELECT [System.Id], [System.Title], [System.Description], [System.WorkItemType] \
FROM WorkItems WHERE [System.TeamProject] = '{project}' \
AND ([System.Title] CONTAINS '{search_terms}' OR [System.Description] CONTAINS '{search_terms}') \
ORDER BY [System.ChangedDate] DESC"
)
}; };
let wiql_url = format!( // Use CONTAINS for text search (case-insensitive)
"{}/{}/_apis/wit/wiql?api-version=7.0", format!(
org_url.trim_end_matches('/'), "SELECT [System.Id], [System.Title], [System.Description], [System.WorkItemType] \
urlencoding::encode(project) FROM WorkItems WHERE [System.TeamProject] = '{project}' \
); AND ([System.Title] CONTAINS '{search_terms}' OR [System.Description] CONTAINS '{search_terms}') \
ORDER BY [System.ChangedDate] DESC"
)
};
let body = serde_json::json!({ let wiql_url = format!(
"query": wiql_query "{}/{}/_apis/wit/wiql?api-version=7.0",
}) org_url.trim_end_matches('/'),
.to_string(); urlencoding::encode(project)
);
tracing::info!("Executing Azure DevOps work item search via webview"); let body = serde_json::json!({
tracing::debug!("WIQL query: {}", wiql_query); "query": wiql_query
tracing::debug!("Request URL: {}", wiql_url); })
.to_string();
let wiql_response = tracing::info!("Executing Azure DevOps work item search via webview");
fetch_from_webview(webview_window, &wiql_url, "POST", Some(&body)).await?; tracing::debug!("WIQL query: {}", wiql_query);
tracing::debug!("Request URL: {}", wiql_url);
if let Some(work_items) = wiql_response.get("workItems").and_then(|v| v.as_array()) { let wiql_response = fetch_from_webview(webview_window, &wiql_url, "POST", Some(&body)).await?;
// Fetch details for first 5 work items
for item in work_items.iter().take(5) {
if let Some(id) = item.get("id").and_then(|i| i.as_i64()) {
let details_url = format!(
"{}/_apis/wit/workitems/{}?api-version=7.0",
org_url.trim_end_matches('/'),
id
);
if let Ok(details) = let mut results = Vec::new();
fetch_from_webview(webview_window, &details_url, "GET", None).await
{
if let Some(fields) = details.get("fields") {
let title = fields
.get("System.Title")
.and_then(|t| t.as_str())
.unwrap_or("Untitled");
let work_item_type = fields
.get("System.WorkItemType")
.and_then(|t| t.as_str())
.unwrap_or("Item");
let description = fields
.get("System.Description")
.and_then(|d| d.as_str())
.unwrap_or("");
let clean_description = strip_html_simple(description); if let Some(work_items) = wiql_response.get("workItems").and_then(|v| v.as_array()) {
let excerpt = clean_description.chars().take(200).collect(); // Fetch details for first 5 work items
for item in work_items.iter().take(5) {
if let Some(id) = item.get("id").and_then(|i| i.as_i64()) {
let details_url = format!(
"{}/_apis/wit/workitems/{}?api-version=7.0",
org_url.trim_end_matches('/'),
id
);
let url = if let Ok(details) =
format!("{}/_workitems/edit/{id}", org_url.trim_end_matches('/')); fetch_from_webview(webview_window, &details_url, "GET", None).await
{
if let Some(fields) = details.get("fields") {
let title = fields
.get("System.Title")
.and_then(|t| t.as_str())
.unwrap_or("Untitled");
let work_item_type = fields
.get("System.WorkItemType")
.and_then(|t| t.as_str())
.unwrap_or("Item");
let description = fields
.get("System.Description")
.and_then(|d| d.as_str())
.unwrap_or("");
let full_content = if clean_description.len() > 3000 { let clean_description = strip_html_simple(description);
format!("{}...", &clean_description[..3000]) let excerpt = clean_description.chars().take(200).collect();
} else {
clean_description.clone()
};
all_results.push(SearchResult { let url = format!("{}/_workitems/edit/{id}", org_url.trim_end_matches('/'));
title: format!("{work_item_type} #{id}: {title}"),
url, let full_content = if clean_description.len() > 3000 {
excerpt, format!("{}...", &clean_description[..3000])
content: Some(full_content), } else {
source: "Azure DevOps".to_string(), clean_description.clone()
}); };
}
results.push(SearchResult {
title: format!("{work_item_type} #{id}: {title}"),
url,
excerpt,
content: Some(full_content),
source: "Azure DevOps".to_string(),
});
} }
} }
} }
} }
} }
all_results.sort_by(|a, b| a.url.cmp(&b.url));
all_results.dedup_by(|a, b| a.url == b.url);
tracing::info!( tracing::info!(
"Azure DevOps work items webview search returned {} results", "Azure DevOps work items webview search returned {} results",
all_results.len() results.len()
); );
Ok(all_results) Ok(results)
} }
/// Add a comment to an Azure DevOps work item /// Add a comment to an Azure DevOps work item

View File

@ -69,19 +69,14 @@ pub fn run() {
commands::db::add_five_why, commands::db::add_five_why,
commands::db::update_five_why, commands::db::update_five_why,
commands::db::add_timeline_event, commands::db::add_timeline_event,
commands::db::get_timeline_events,
// Analysis / PII // Analysis / PII
commands::analysis::upload_log_file, commands::analysis::upload_log_file,
commands::analysis::upload_log_file_by_content,
commands::analysis::detect_pii, commands::analysis::detect_pii,
commands::analysis::apply_redactions, commands::analysis::apply_redactions,
commands::image::upload_image_attachment, commands::image::upload_image_attachment,
commands::image::upload_image_attachment_by_content,
commands::image::list_image_attachments, commands::image::list_image_attachments,
commands::image::delete_image_attachment, commands::image::delete_image_attachment,
commands::image::upload_paste_image, commands::image::upload_paste_image,
commands::image::upload_file_to_datastore,
commands::image::upload_file_to_datastore_any,
// AI // AI
commands::ai::analyze_logs, commands::ai::analyze_logs,
commands::ai::chat_message, commands::ai::chat_message,
@ -121,7 +116,6 @@ pub fn run() {
commands::system::get_settings, commands::system::get_settings,
commands::system::update_settings, commands::system::update_settings,
commands::system::get_audit_log, commands::system::get_audit_log,
commands::system::get_app_version,
]) ])
.run(tauri::generate_context!()) .run(tauri::generate_context!())
.expect("Error running Troubleshooting and RCA Assistant application"); .expect("Error running Troubleshooting and RCA Assistant application");

View File

@ -39,9 +39,6 @@ pub struct ProviderConfig {
/// Optional: User ID for custom REST API cost tracking (CORE ID email) /// Optional: User ID for custom REST API cost tracking (CORE ID email)
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub user_id: Option<String>, pub user_id: Option<String>,
/// Optional: When true, file uploads go to GenAI datastore instead of prompt
#[serde(skip_serializing_if = "Option::is_none")]
pub use_datastore_upload: Option<bool>,
} }
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]

View File

@ -1,12 +1,12 @@
{ {
"productName": "Troubleshooting and RCA Assistant", "productName": "Troubleshooting and RCA Assistant",
"version": "0.2.50", "version": "0.2.10",
"identifier": "com.trcaa.app", "identifier": "com.trcaa.app",
"build": { "build": {
"frontendDist": "../dist", "frontendDist": "../dist",
"devUrl": "http://localhost:1420", "devUrl": "http://localhost:1420",
"beforeDevCommand": "npm run dev", "beforeDevCommand": "npm run dev",
"beforeBuildCommand": "npm run version:update && npm run build" "beforeBuildCommand": "npm run build"
}, },
"app": { "app": {
"security": { "security": {
@ -26,7 +26,7 @@
}, },
"bundle": { "bundle": {
"active": true, "active": true,
"targets": ["deb", "rpm", "nsis"], "targets": "all",
"icon": [ "icon": [
"icons/32x32.png", "icons/32x32.png",
"icons/128x128.png", "icons/128x128.png",
@ -42,6 +42,3 @@
"longDescription": "Structured AI-backed assistant for IT troubleshooting, 5-whys root cause analysis, and post-mortem documentation with offline Ollama support." "longDescription": "Structured AI-backed assistant for IT troubleshooting, 5-whys root cause analysis, and post-mortem documentation with offline Ollama support."
} }
} }

View File

@ -1,4 +1,5 @@
import React, { useState, useEffect } from "react"; import React, { useState, useEffect } from "react";
import { getVersion } from "@tauri-apps/api/app";
import { Routes, Route, NavLink, useLocation } from "react-router-dom"; import { Routes, Route, NavLink, useLocation } from "react-router-dom";
import { import {
Home, Home,
@ -14,7 +15,7 @@ import {
Moon, Moon,
} from "lucide-react"; } from "lucide-react";
import { useSettingsStore } from "@/stores/settingsStore"; import { useSettingsStore } from "@/stores/settingsStore";
import { getAppVersionCmd, loadAiProvidersCmd, testProviderConnectionCmd } from "@/lib/tauriCommands"; import { loadAiProvidersCmd, testProviderConnectionCmd } from "@/lib/tauriCommands";
import Dashboard from "@/pages/Dashboard"; import Dashboard from "@/pages/Dashboard";
import NewIssue from "@/pages/NewIssue"; import NewIssue from "@/pages/NewIssue";
@ -46,10 +47,10 @@ export default function App() {
const [collapsed, setCollapsed] = useState(false); const [collapsed, setCollapsed] = useState(false);
const [appVersion, setAppVersion] = useState(""); const [appVersion, setAppVersion] = useState("");
const { theme, setTheme, setProviders, getActiveProvider } = useSettingsStore(); const { theme, setTheme, setProviders, getActiveProvider } = useSettingsStore();
void useLocation(); const location = useLocation();
useEffect(() => { useEffect(() => {
getAppVersionCmd().then(setAppVersion).catch(() => {}); getVersion().then(setAppVersion).catch(() => {});
}, []); }, []);
// Load providers and auto-test active provider on startup // Load providers and auto-test active provider on startup

View File

@ -67,7 +67,7 @@ export function ImageGallery({ images, onDelete, showWarning = true }: ImageGall
)} )}
<div className="grid grid-cols-2 sm:grid-cols-3 md:grid-cols-4 lg:grid-cols-5 gap-4"> <div className="grid grid-cols-2 sm:grid-cols-3 md:grid-cols-4 lg:grid-cols-5 gap-4">
{images.map((image) => ( {images.map((image, idx) => (
<div key={image.id} className="group relative rounded-lg overflow-hidden bg-gray-100 border border-gray-200"> <div key={image.id} className="group relative rounded-lg overflow-hidden bg-gray-100 border border-gray-200">
<button <button
onClick={() => { onClick={() => {

View File

@ -1,4 +1,4 @@
import React, { HTMLAttributes } from "react"; import React from "react";
import { cva, type VariantProps } from "class-variance-authority"; import { cva, type VariantProps } from "class-variance-authority";
import { clsx, type ClassValue } from "clsx"; import { clsx, type ClassValue } from "clsx";
@ -6,26 +6,6 @@ function cn(...inputs: ClassValue[]) {
return clsx(inputs); return clsx(inputs);
} }
// ─── Separator (ForwardRef) ───────────────────────────────────────────────────
export const Separator = React.forwardRef<
HTMLDivElement,
HTMLAttributes<HTMLDivElement> & { orientation?: "horizontal" | "vertical" }
>(({ className, orientation = "horizontal", ...props }, ref) => (
<div
ref={ref}
role="separator"
aria-orientation={orientation}
className={cn(
"shrink-0 bg-border",
orientation === "horizontal" ? "h-[1px] w-full" : "h-full w-[1px]",
className
)}
{...props}
/>
));
Separator.displayName = "Separator";
// ─── Button ────────────────────────────────────────────────────────────────── // ─── Button ──────────────────────────────────────────────────────────────────
const buttonVariants = cva( const buttonVariants = cva(
@ -128,7 +108,7 @@ CardFooter.displayName = "CardFooter";
// ─── Input ─────────────────────────────────────────────────────────────────── // ─── Input ───────────────────────────────────────────────────────────────────
export type InputProps = React.InputHTMLAttributes<HTMLInputElement> export interface InputProps extends React.InputHTMLAttributes<HTMLInputElement> {}
export const Input = React.forwardRef<HTMLInputElement, InputProps>( export const Input = React.forwardRef<HTMLInputElement, InputProps>(
({ className, type, ...props }, ref) => ( ({ className, type, ...props }, ref) => (
@ -147,7 +127,7 @@ Input.displayName = "Input";
// ─── Label ─────────────────────────────────────────────────────────────────── // ─── Label ───────────────────────────────────────────────────────────────────
export type LabelProps = React.LabelHTMLAttributes<HTMLLabelElement> export interface LabelProps extends React.LabelHTMLAttributes<HTMLLabelElement> {}
export const Label = React.forwardRef<HTMLLabelElement, LabelProps>( export const Label = React.forwardRef<HTMLLabelElement, LabelProps>(
({ className, ...props }, ref) => ( ({ className, ...props }, ref) => (
@ -165,7 +145,7 @@ Label.displayName = "Label";
// ─── Textarea ──────────────────────────────────────────────────────────────── // ─── Textarea ────────────────────────────────────────────────────────────────
export type TextareaProps = React.TextareaHTMLAttributes<HTMLTextAreaElement> export interface TextareaProps extends React.TextareaHTMLAttributes<HTMLTextAreaElement> {}
export const Textarea = React.forwardRef<HTMLTextAreaElement, TextareaProps>( export const Textarea = React.forwardRef<HTMLTextAreaElement, TextareaProps>(
({ className, ...props }, ref) => ( ({ className, ...props }, ref) => (
@ -340,7 +320,28 @@ export function Progress({ value = 0, max = 100, className, ...props }: Progress
); );
} }
// ─── Separator ───────────────────────────────────────────────────────────────
interface SeparatorProps extends React.HTMLAttributes<HTMLDivElement> {
orientation?: "horizontal" | "vertical";
}
export function Separator({
orientation = "horizontal",
className,
...props
}: SeparatorProps) {
return (
<div
className={cn(
"shrink-0 bg-border",
orientation === "horizontal" ? "h-[1px] w-full" : "h-full w-[1px]",
className
)}
{...props}
/>
);
}
// ─── RadioGroup ────────────────────────────────────────────────────────────── // ─── RadioGroup ──────────────────────────────────────────────────────────────

View File

@ -331,58 +331,6 @@ When analyzing identity and access issues, focus on these key areas:
Always ask about the Keycloak version, realm configuration (external IdP vs local users vs LDAP), SSSD version and configured domains, and whether this is a first-time setup or a regression.`, Always ask about the Keycloak version, realm configuration (external IdP vs local users vs LDAP), SSSD version and configured domains, and whether this is a first-time setup or a regression.`,
}; };
export const INCIDENT_RESPONSE_FRAMEWORK = `
---
## INCIDENT RESPONSE METHODOLOGY
Follow this structured framework for every triage conversation. Each phase must be completed with evidence before advancing.
### Phase 1: Detection & Evidence Gathering
- **Do NOT propose fixes** until the problem is fully understood
- Gather: error messages, timestamps, affected systems, scope of impact, recent changes
- Ask: "What changed? When did it start? Who/what is affected? What has been tried?"
- Record all evidence with UTC timestamps
- Establish a clear problem statement before proceeding
### Phase 2: Diagnosis & Hypothesis Testing
- Apply the scientific method: form hypotheses, test them with evidence
- **The 3-Fix Rule**: If you cannot confidently identify the root cause after 3 hypotheses, STOP and reassess your assumptions you may be looking at the wrong system or the wrong layer
- Check the most common causes first (Occam's Razor): DNS, certificates, disk space, permissions, recent deployments
- Differentiate between symptoms and causes treat causes, not symptoms
- Use binary search to narrow scope: which component, which layer, which change
### Phase 3: Root Cause Analysis with 5-Whys
- Each "Why" must be backed by evidence, not speculation
- If you cannot provide evidence for a "Why", state what investigation is needed to confirm
- Look for systemic issues, not just proximate causes
- The root cause should explain ALL observed symptoms, not just some
- Common root cause categories: configuration drift, capacity exhaustion, dependency failure, race condition, human error in process
### Phase 4: Resolution & Prevention
- **Immediate fix**: What stops the bleeding right now? (rollback, restart, failover)
- **Permanent fix**: What prevents recurrence? (code fix, config change, automation)
- **Runbook update**: Document the fix for future oncall engineers
- Verify the fix resolves ALL symptoms, not just the primary one
- Monitor for regression after applying the fix
### Phase 5: Post-Incident Review
- Calculate incident metrics: MTTD (detect), MTTA (acknowledge), MTTR (resolve)
- Conduct blameless post-mortem focused on systems and processes
- Identify action items with owners and due dates
- Categories: monitoring gaps, process improvements, technical debt, training needs
- Ask: "What would have prevented this? What would have detected it faster? What would have resolved it faster?"
### Communication Practices
- State your current phase explicitly (e.g., "We are in Phase 2: Diagnosis")
- Summarize findings at each phase transition
- Flag assumptions clearly: "ASSUMPTION: ..." vs "CONFIRMED: ..."
- When advancing the Why level, explicitly state the evidence chain
`;
export function getDomainPrompt(domainId: string): string { export function getDomainPrompt(domainId: string): string {
const domainSpecific = domainPrompts[domainId] ?? ""; return domainPrompts[domainId] ?? "";
if (!domainSpecific) return "";
return domainSpecific + INCIDENT_RESPONSE_FRAMEWORK;
} }

View File

@ -16,7 +16,6 @@ export interface ProviderConfig {
api_format?: string; api_format?: string;
session_id?: string; session_id?: string;
user_id?: string; user_id?: string;
use_datastore_upload?: boolean;
} }
export interface Message { export interface Message {
@ -74,11 +73,9 @@ export interface FiveWhyEntry {
export interface TimelineEvent { export interface TimelineEvent {
id: string; id: string;
issue_id: string;
event_type: string; event_type: string;
description: string; description: string;
metadata: string; created_at: number;
created_at: string;
} }
export interface AiConversation { export interface AiConversation {
@ -106,7 +103,6 @@ export interface IssueDetail {
image_attachments: ImageAttachment[]; image_attachments: ImageAttachment[];
resolution_steps: ResolutionStep[]; resolution_steps: ResolutionStep[];
conversations: AiConversation[]; conversations: AiConversation[];
timeline_events: TimelineEvent[];
} }
export interface IssueSummary { export interface IssueSummary {
@ -271,8 +267,8 @@ export interface TriageMessage {
export const analyzeLogsCmd = (issueId: string, logFileIds: string[], providerConfig: ProviderConfig) => export const analyzeLogsCmd = (issueId: string, logFileIds: string[], providerConfig: ProviderConfig) =>
invoke<AnalysisResult>("analyze_logs", { issueId, logFileIds, providerConfig }); invoke<AnalysisResult>("analyze_logs", { issueId, logFileIds, providerConfig });
export const chatMessageCmd = (issueId: string, message: string, providerConfig: ProviderConfig, systemPrompt?: string) => export const chatMessageCmd = (issueId: string, message: string, providerConfig: ProviderConfig) =>
invoke<ChatResponse>("chat_message", { issueId, message, providerConfig, systemPrompt: systemPrompt ?? null }); invoke<ChatResponse>("chat_message", { issueId, message, providerConfig });
export const listProvidersCmd = () => invoke<ProviderInfo[]>("list_providers"); export const listProvidersCmd = () => invoke<ProviderInfo[]>("list_providers");
@ -281,21 +277,9 @@ export const listProvidersCmd = () => invoke<ProviderInfo[]>("list_providers");
export const uploadLogFileCmd = (issueId: string, filePath: string) => export const uploadLogFileCmd = (issueId: string, filePath: string) =>
invoke<LogFile>("upload_log_file", { issueId, filePath }); invoke<LogFile>("upload_log_file", { issueId, filePath });
export const uploadLogFileByContentCmd = (issueId: string, fileName: string, content: string) =>
invoke<LogFile>("upload_log_file_by_content", { issueId, fileName, content });
export const uploadImageAttachmentCmd = (issueId: string, filePath: string) => export const uploadImageAttachmentCmd = (issueId: string, filePath: string) =>
invoke<ImageAttachment>("upload_image_attachment", { issueId, filePath }); invoke<ImageAttachment>("upload_image_attachment", { issueId, filePath });
export const uploadImageAttachmentByContentCmd = (issueId: string, fileName: string, base64Content: string) =>
invoke<ImageAttachment>("upload_image_attachment_by_content", { issueId, fileName, base64Content });
export const uploadFileToDatastoreCmd = (providerConfig: ProviderConfig, filePath: string) =>
invoke<string>("upload_file_to_datastore", { providerConfig, filePath });
export const uploadFileToDatastoreAnyCmd = (providerConfig: ProviderConfig, filePath: string) =>
invoke<string>("upload_file_to_datastore_any", { providerConfig, filePath });
export const uploadPasteImageCmd = (issueId: string, base64Image: string, mimeType: string) => export const uploadPasteImageCmd = (issueId: string, base64Image: string, mimeType: string) =>
invoke<ImageAttachment>("upload_paste_image", { issueId, base64Image, mimeType }); invoke<ImageAttachment>("upload_paste_image", { issueId, base64Image, mimeType });
@ -364,11 +348,8 @@ export const addFiveWhyCmd = (
export const updateFiveWhyCmd = (entryId: string, answer: string) => export const updateFiveWhyCmd = (entryId: string, answer: string) =>
invoke<void>("update_five_why", { entryId, answer }); invoke<void>("update_five_why", { entryId, answer });
export const addTimelineEventCmd = (issueId: string, eventType: string, description: string, metadata?: string) => export const addTimelineEventCmd = (issueId: string, eventType: string, description: string) =>
invoke<TimelineEvent>("add_timeline_event", { issueId, eventType, description, metadata: metadata ?? null }); invoke<TimelineEvent>("add_timeline_event", { issueId, eventType, description });
export const getTimelineEventsCmd = (issueId: string) =>
invoke<TimelineEvent[]>("get_timeline_events", { issueId });
// ─── Document commands ──────────────────────────────────────────────────────── // ─── Document commands ────────────────────────────────────────────────────────
@ -485,15 +466,10 @@ export const getAllIntegrationConfigsCmd = () =>
// ─── AI Provider Configuration ──────────────────────────────────────────────── // ─── AI Provider Configuration ────────────────────────────────────────────────
export const saveAiProviderCmd = (config: ProviderConfig) => export const saveAiProviderCmd = (config: ProviderConfig) =>
invoke<void>("save_ai_provider", { provider: config }); invoke<void>("save_ai_provider", { config });
export const loadAiProvidersCmd = () => export const loadAiProvidersCmd = () =>
invoke<ProviderConfig[]>("load_ai_providers"); invoke<ProviderConfig[]>("load_ai_providers");
export const deleteAiProviderCmd = (name: string) => export const deleteAiProviderCmd = (name: string) =>
invoke<void>("delete_ai_provider", { name }); invoke<void>("delete_ai_provider", { name });
// ─── System / Version ─────────────────────────────────────────────────────────
export const getAppVersionCmd = () =>
invoke<string>("get_app_version");

View File

@ -3,6 +3,8 @@ import { useNavigate } from "react-router-dom";
import { Search, Download, ExternalLink } from "lucide-react"; import { Search, Download, ExternalLink } from "lucide-react";
import { import {
Card, Card,
CardHeader,
CardTitle,
CardContent, CardContent,
Button, Button,
Input, Input,

View File

@ -1,4 +1,4 @@
import React, { useState, useCallback, useEffect } from "react"; import React, { useState, useCallback, useRef, useEffect } from "react";
import { useNavigate, useParams } from "react-router-dom"; import { useNavigate, useParams } from "react-router-dom";
import { Upload, File, Trash2, ShieldCheck, AlertTriangle, Image as ImageIcon } from "lucide-react"; import { Upload, File, Trash2, ShieldCheck, AlertTriangle, Image as ImageIcon } from "lucide-react";
import { Button, Card, CardHeader, CardTitle, CardContent, Badge } from "@/components/ui"; import { Button, Card, CardHeader, CardTitle, CardContent, Badge } from "@/components/ui";
@ -30,6 +30,8 @@ export default function LogUpload() {
const [isDetecting, setIsDetecting] = useState(false); const [isDetecting, setIsDetecting] = useState(false);
const [error, setError] = useState<string | null>(null); const [error, setError] = useState<string | null>(null);
const fileInputRef = useRef<HTMLInputElement>(null);
const handleDrop = useCallback( const handleDrop = useCallback(
(e: React.DragEvent) => { (e: React.DragEvent) => {
e.preventDefault(); e.preventDefault();
@ -58,7 +60,7 @@ export default function LogUpload() {
const uploaded = await Promise.all( const uploaded = await Promise.all(
files.map(async (entry) => { files.map(async (entry) => {
if (entry.uploaded) return entry; if (entry.uploaded) return entry;
void await entry.file.text(); const content = await entry.file.text();
const logFile = await uploadLogFileCmd(id, entry.file.name); const logFile = await uploadLogFileCmd(id, entry.file.name);
return { ...entry, uploaded: logFile }; return { ...entry, uploaded: logFile };
}) })
@ -127,8 +129,8 @@ export default function LogUpload() {
const handlePaste = useCallback( const handlePaste = useCallback(
async (e: React.ClipboardEvent) => { async (e: React.ClipboardEvent) => {
void e.clipboardData?.items; const items = e.clipboardData?.items;
const imageItems = Array.from(e.clipboardData?.items || []).filter((item: DataTransferItem) => item.type.startsWith("image/")); const imageItems = items ? Array.from(items).filter((item: DataTransferItem) => item.type.startsWith("image/")) : [];
for (const item of imageItems) { for (const item of imageItems) {
const file = item.getAsFile(); const file = item.getAsFile();
@ -179,7 +181,14 @@ export default function LogUpload() {
} }
}; };
const fileToBase64 = (file: File): Promise<string> => {
return new Promise((resolve, reject) => {
const reader = new FileReader();
reader.onload = () => resolve(reader.result as string);
reader.onerror = (err) => reject(err);
reader.readAsDataURL(file);
});
};
const allUploaded = files.length > 0 && files.every((f) => f.uploaded); const allUploaded = files.length > 0 && files.every((f) => f.uploaded);

View File

@ -66,7 +66,7 @@ export default function NewIssue() {
useEffect(() => { useEffect(() => {
const hasAcceptedDisclaimer = localStorage.getItem("tftsr-ai-disclaimer-accepted"); const hasAcceptedDisclaimer = localStorage.getItem("tftsr-ai-disclaimer-accepted");
if (!hasAcceptedDisclaimer) { if (!hasAcceptedDisclaimer) {
localStorage.setItem("tftsr-ai-disclaimer-accepted", "true"); setShowDisclaimer(true);
} }
}, []); }, []);

View File

@ -5,7 +5,7 @@ import { DocEditor } from "@/components/DocEditor";
import { useSettingsStore } from "@/stores/settingsStore"; import { useSettingsStore } from "@/stores/settingsStore";
import { import {
generatePostmortemCmd, generatePostmortemCmd,
addTimelineEventCmd,
updateDocumentCmd, updateDocumentCmd,
exportDocumentCmd, exportDocumentCmd,
type Document_, type Document_,
@ -13,7 +13,7 @@ import {
export default function Postmortem() { export default function Postmortem() {
const { id } = useParams<{ id: string }>(); const { id } = useParams<{ id: string }>();
void useSettingsStore((s) => s.getActiveProvider); const getActiveProvider = useSettingsStore((s) => s.getActiveProvider);
const [doc, setDoc] = useState<Document_ | null>(null); const [doc, setDoc] = useState<Document_ | null>(null);
const [content, setContent] = useState(""); const [content, setContent] = useState("");
@ -28,7 +28,6 @@ export default function Postmortem() {
const generated = await generatePostmortemCmd(id); const generated = await generatePostmortemCmd(id);
setDoc(generated); setDoc(generated);
setContent(generated.content_md); setContent(generated.content_md);
addTimelineEventCmd(id, "postmortem_generated", "Post-mortem document generated").catch(() => {});
} catch (err) { } catch (err) {
setError(String(err)); setError(String(err));
} finally { } finally {
@ -55,7 +54,6 @@ export default function Postmortem() {
try { try {
const path = await exportDocumentCmd(doc.id, doc.title, content, format, ""); const path = await exportDocumentCmd(doc.id, doc.title, content, format, "");
setError(`Document exported to: ${path}`); setError(`Document exported to: ${path}`);
addTimelineEventCmd(id!, "document_exported", `Post-mortem exported as ${format}`).catch(() => {});
setTimeout(() => setError(null), 5000); setTimeout(() => setError(null), 5000);
} catch (err) { } catch (err) {
setError(`Export failed: ${String(err)}`); setError(`Export failed: ${String(err)}`);

View File

@ -8,14 +8,13 @@ import {
generateRcaCmd, generateRcaCmd,
updateDocumentCmd, updateDocumentCmd,
exportDocumentCmd, exportDocumentCmd,
addTimelineEventCmd,
type Document_, type Document_,
} from "@/lib/tauriCommands"; } from "@/lib/tauriCommands";
export default function RCA() { export default function RCA() {
const { id } = useParams<{ id: string }>(); const { id } = useParams<{ id: string }>();
const navigate = useNavigate(); const navigate = useNavigate();
void useSettingsStore((s) => s.getActiveProvider); const getActiveProvider = useSettingsStore((s) => s.getActiveProvider);
const [doc, setDoc] = useState<Document_ | null>(null); const [doc, setDoc] = useState<Document_ | null>(null);
const [content, setContent] = useState(""); const [content, setContent] = useState("");
@ -30,7 +29,6 @@ export default function RCA() {
const generated = await generateRcaCmd(id); const generated = await generateRcaCmd(id);
setDoc(generated); setDoc(generated);
setContent(generated.content_md); setContent(generated.content_md);
addTimelineEventCmd(id, "rca_generated", "RCA document generated").catch(() => {});
} catch (err) { } catch (err) {
setError(String(err)); setError(String(err));
} finally { } finally {
@ -57,7 +55,6 @@ export default function RCA() {
try { try {
const path = await exportDocumentCmd(doc.id, doc.title, content, format, ""); const path = await exportDocumentCmd(doc.id, doc.title, content, format, "");
setError(`Document exported to: ${path}`); setError(`Document exported to: ${path}`);
addTimelineEventCmd(id!, "document_exported", `RCA exported as ${format}`).catch(() => {});
setTimeout(() => setError(null), 5000); setTimeout(() => setError(null), 5000);
} catch (err) { } catch (err) {
setError(`Export failed: ${String(err)}`); setError(`Export failed: ${String(err)}`);

View File

@ -6,6 +6,7 @@ import {
CardTitle, CardTitle,
CardContent, CardContent,
Badge, Badge,
Separator,
} from "@/components/ui"; } from "@/components/ui";
import { getAuditLogCmd, type AuditEntry } from "@/lib/tauriCommands"; import { getAuditLogCmd, type AuditEntry } from "@/lib/tauriCommands";
import { useSettingsStore } from "@/stores/settingsStore"; import { useSettingsStore } from "@/stores/settingsStore";

View File

@ -15,7 +15,6 @@ import {
updateIssueCmd, updateIssueCmd,
addFiveWhyCmd, addFiveWhyCmd,
} from "@/lib/tauriCommands"; } from "@/lib/tauriCommands";
import { getDomainPrompt } from "@/lib/domainPrompts";
import type { TriageMessage } from "@/lib/tauriCommands"; import type { TriageMessage } from "@/lib/tauriCommands";
const CLOSE_PATTERNS = [ const CLOSE_PATTERNS = [
@ -168,8 +167,7 @@ export default function Triage() {
setPendingFiles([]); setPendingFiles([]);
try { try {
const systemPrompt = currentIssue ? getDomainPrompt(currentIssue.category) : undefined; const response = await chatMessageCmd(id, aiMessage, provider);
const response = await chatMessageCmd(id, aiMessage, provider, systemPrompt);
const assistantMsg: TriageMessage = { const assistantMsg: TriageMessage = {
id: `asst-${Date.now()}`, id: `asst-${Date.now()}`,
issue_id: id, issue_id: id,

View File

@ -1,4 +1,4 @@
import { waitForApp } from "../helpers/app"; import { waitForApp, clickByText } from "../helpers/app";
describe("Log Upload Flow", () => { describe("Log Upload Flow", () => {
before(async () => { before(async () => {

View File

@ -1,5 +1,5 @@
import { join } from "path"; import { join } from "path";
import { spawn } from "child_process"; import { spawn, spawnSync } from "child_process";
import type { Options } from "@wdio/types"; import type { Options } from "@wdio/types";
// Path to the tauri-driver binary // Path to the tauri-driver binary

View File

@ -1,5 +1,5 @@
import { describe, it, expect, beforeEach, vi } from "vitest"; import { describe, it, expect, beforeEach, vi } from "vitest";
import { render, screen } from "@testing-library/react"; import { render, screen, fireEvent } from "@testing-library/react";
import Security from "@/pages/Settings/Security"; import Security from "@/pages/Settings/Security";
import * as tauriCommands from "@/lib/tauriCommands"; import * as tauriCommands from "@/lib/tauriCommands";
@ -42,8 +42,11 @@ describe("Audit Log", () => {
it("displays audit entries", async () => { it("displays audit entries", async () => {
render(<Security />); render(<Security />);
// Wait for table to appear after async audit data loads // Wait for audit log to load
const table = await screen.findByRole("table"); await screen.findByText("Audit Log");
// Check that the table has rows (header + data rows)
const table = screen.getByRole("table");
expect(table).toBeInTheDocument(); expect(table).toBeInTheDocument();
const rows = screen.getAllByRole("row"); const rows = screen.getAllByRole("row");
@ -53,7 +56,9 @@ describe("Audit Log", () => {
it("provides way to view transmitted data details", async () => { it("provides way to view transmitted data details", async () => {
render(<Security />); render(<Security />);
// Wait for async data to load and render the table await screen.findByText("Audit Log");
// Should have View/Hide buttons for expanding details
const viewButtons = await screen.findAllByRole("button", { name: /View/i }); const viewButtons = await screen.findAllByRole("button", { name: /View/i });
expect(viewButtons.length).toBeGreaterThan(0); expect(viewButtons.length).toBeGreaterThan(0);
}); });
@ -61,13 +66,14 @@ describe("Audit Log", () => {
it("details column or button exists for viewing data", async () => { it("details column or button exists for viewing data", async () => {
render(<Security />); render(<Security />);
// Wait for async data to load and render the table await screen.findByText("Audit Log");
await screen.findByRole("table");
// The audit log should have a Details column header
const detailsHeader = screen.getByText("Details"); const detailsHeader = screen.getByText("Details");
expect(detailsHeader).toBeInTheDocument(); expect(detailsHeader).toBeInTheDocument();
const viewButtons = screen.getAllByRole("button", { name: /View/i }); // Should have view buttons
const viewButtons = await screen.findAllByRole("button", { name: /View/i });
expect(viewButtons.length).toBe(2); // One for each mock entry expect(viewButtons.length).toBe(2); // One for each mock entry
}); });
}); });

View File

@ -129,12 +129,8 @@ describe("build-images.yml workflow", () => {
expect(wf).toContain("trcaa-linux-arm64:rust1.88-node22"); expect(wf).toContain("trcaa-linux-arm64:rust1.88-node22");
}); });
it("uses alpine:latest with docker-cli (not docker:24-cli which triggers duplicate socket mount in act_runner)", () => { it("uses docker:24-cli image for build jobs", () => {
// act_runner v0.3.1 special-cases docker:* images and adds the socket bind; expect(wf).toContain("docker:24-cli");
// combined with its global socket bind this causes a 'Duplicate mount point' error.
expect(wf).toContain("alpine:latest");
expect(wf).toContain("docker-cli");
expect(wf).not.toContain("docker:24-cli");
}); });
it("runs all three build jobs on linux-amd64 runner", () => { it("runs all three build jobs on linux-amd64 runner", () => {

View File

@ -1,63 +0,0 @@
import { describe, it, expect } from "vitest";
import { getDomainPrompt, DOMAINS, INCIDENT_RESPONSE_FRAMEWORK } from "@/lib/domainPrompts";
describe("Domain Prompts with Incident Response Framework", () => {
it("exports INCIDENT_RESPONSE_FRAMEWORK constant", () => {
expect(INCIDENT_RESPONSE_FRAMEWORK).toBeDefined();
expect(typeof INCIDENT_RESPONSE_FRAMEWORK).toBe("string");
expect(INCIDENT_RESPONSE_FRAMEWORK.length).toBeGreaterThan(100);
});
it("framework contains all 5 phases", () => {
expect(INCIDENT_RESPONSE_FRAMEWORK).toContain("Phase 1: Detection & Evidence Gathering");
expect(INCIDENT_RESPONSE_FRAMEWORK).toContain("Phase 2: Diagnosis & Hypothesis Testing");
expect(INCIDENT_RESPONSE_FRAMEWORK).toContain("Phase 3: Root Cause Analysis with 5-Whys");
expect(INCIDENT_RESPONSE_FRAMEWORK).toContain("Phase 4: Resolution & Prevention");
expect(INCIDENT_RESPONSE_FRAMEWORK).toContain("Phase 5: Post-Incident Review");
});
it("framework contains the 3-Fix Rule", () => {
expect(INCIDENT_RESPONSE_FRAMEWORK).toContain("3-Fix Rule");
});
it("framework contains communication practices", () => {
expect(INCIDENT_RESPONSE_FRAMEWORK).toContain("Communication Practices");
});
it("all defined domains include incident response methodology", () => {
for (const domain of DOMAINS) {
const prompt = getDomainPrompt(domain.id);
if (prompt) {
expect(prompt).toContain("INCIDENT RESPONSE METHODOLOGY");
expect(prompt).toContain("Phase 1:");
expect(prompt).toContain("Phase 5:");
}
}
});
it("returns empty string for unknown domain", () => {
expect(getDomainPrompt("nonexistent_domain")).toBe("");
expect(getDomainPrompt("")).toBe("");
});
it("preserves existing Linux domain content", () => {
const prompt = getDomainPrompt("linux");
expect(prompt).toContain("senior Linux systems engineer");
expect(prompt).toContain("RHEL");
expect(prompt).toContain("INCIDENT RESPONSE METHODOLOGY");
});
it("preserves existing Kubernetes domain content", () => {
const prompt = getDomainPrompt("kubernetes");
expect(prompt).toContain("Kubernetes platform engineer");
expect(prompt).toContain("k3s");
expect(prompt).toContain("INCIDENT RESPONSE METHODOLOGY");
});
it("preserves existing Network domain content", () => {
const prompt = getDomainPrompt("network");
expect(prompt).toContain("network engineer");
expect(prompt).toContain("Fortigate");
expect(prompt).toContain("INCIDENT RESPONSE METHODOLOGY");
});
});

View File

@ -1,5 +1,5 @@
import { describe, it, expect, beforeEach, vi } from "vitest"; import { describe, it, expect, beforeEach, vi } from "vitest";
import { render, screen } from "@testing-library/react"; import { render, screen, fireEvent } from "@testing-library/react";
import { MemoryRouter } from "react-router-dom"; import { MemoryRouter } from "react-router-dom";
import History from "@/pages/History"; import History from "@/pages/History";
import { useHistoryStore } from "@/stores/historyStore"; import { useHistoryStore } from "@/stores/historyStore";

View File

@ -44,13 +44,11 @@ describe("auto-tag release cross-platform artifact handling", () => {
expect(workflow).toContain("UPLOAD_NAME=\"linux-arm64-$NAME\""); expect(workflow).toContain("UPLOAD_NAME=\"linux-arm64-$NAME\"");
}); });
it("uses pre-baked Ubuntu 22.04 cross-compiler image for arm64", () => { it("uses Ubuntu 22.04 with ports mirror for arm64 cross-compile", () => {
const workflow = readFileSync(autoTagWorkflowPath, "utf-8"); const workflow = readFileSync(autoTagWorkflowPath, "utf-8");
// Multiarch ubuntu:22.04 + ports mirror setup moved to pre-baked image; expect(workflow).toContain("ubuntu:22.04");
// verify workflow references the correct image and cross-compile env vars. expect(workflow).toContain("ports.ubuntu.com/ubuntu-ports");
expect(workflow).toContain("trcaa-linux-arm64:rust1.88-node22"); expect(workflow).toContain("jammy");
expect(workflow).toContain("CC_aarch64_unknown_linux_gnu: aarch64-linux-gnu-gcc");
expect(workflow).toContain("aarch64-unknown-linux-gnu");
}); });
}); });

View File

@ -35,7 +35,6 @@ const mockIssueDetail = {
}, },
], ],
conversations: [], conversations: [],
timeline_events: [],
}; };
describe("Resolution Page", () => { describe("Resolution Page", () => {

View File

@ -32,7 +32,6 @@ vi.mock("@tauri-apps/plugin-fs", () => ({
exists: vi.fn(() => Promise.resolve(false)), exists: vi.fn(() => Promise.resolve(false)),
})); }));
// Mock console.error to suppress React warnings
const originalError = console.error; const originalError = console.error;
beforeAll(() => { beforeAll(() => {
console.error = (...args: unknown[]) => { console.error = (...args: unknown[]) => {

View File

@ -1,54 +0,0 @@
import { describe, it, expect, vi, beforeEach } from "vitest";
import { invoke } from "@tauri-apps/api/core";
const mockInvoke = vi.mocked(invoke);
describe("Timeline Event Commands", () => {
beforeEach(() => {
mockInvoke.mockReset();
});
it("addTimelineEventCmd calls invoke with correct params", async () => {
const mockEvent = {
id: "te-1",
issue_id: "issue-1",
event_type: "triage_started",
description: "Started",
metadata: "{}",
created_at: "2025-01-15 10:00:00 UTC",
};
mockInvoke.mockResolvedValueOnce(mockEvent as never);
const { addTimelineEventCmd } = await import("@/lib/tauriCommands");
const result = await addTimelineEventCmd("issue-1", "triage_started", "Started");
expect(mockInvoke).toHaveBeenCalledWith("add_timeline_event", {
issueId: "issue-1",
eventType: "triage_started",
description: "Started",
metadata: null,
});
expect(result).toEqual(mockEvent);
});
it("addTimelineEventCmd passes metadata when provided", async () => {
mockInvoke.mockResolvedValueOnce({} as never);
const { addTimelineEventCmd } = await import("@/lib/tauriCommands");
await addTimelineEventCmd("issue-1", "log_uploaded", "File uploaded", '{"file":"app.log"}');
expect(mockInvoke).toHaveBeenCalledWith("add_timeline_event", {
issueId: "issue-1",
eventType: "log_uploaded",
description: "File uploaded",
metadata: '{"file":"app.log"}',
});
});
it("getTimelineEventsCmd calls invoke with correct params", async () => {
mockInvoke.mockResolvedValueOnce([] as never);
const { getTimelineEventsCmd } = await import("@/lib/tauriCommands");
const result = await getTimelineEventsCmd("issue-1");
expect(mockInvoke).toHaveBeenCalledWith("get_timeline_events", { issueId: "issue-1" });
expect(result).toEqual([]);
});
});

View File

@ -1,74 +0,0 @@
# feat: Automated Changelog via git-cliff
## Description
Introduces automated changelog generation using **git-cliff**, a tool that parses
conventional commits and produces formatted Markdown changelogs.
Previously, every Gitea release body contained only the static text `"Release vX.Y.Z"`.
With this change, releases display a categorised, human-readable list of all commits
since the previous version.
**Root cause / motivation:** No changelog tooling existed. The project follows
Conventional Commits throughout but the information was never surfaced to end-users.
**Files changed:**
- `cliff.toml` (new) — git-cliff configuration; defines commit parsers, ignored tags,
output template, and which commit types appear in the changelog
- `CHANGELOG.md` (new) — bootstrapped from all existing tags; maintained by CI going forward
- `.gitea/workflows/auto-tag.yml` — new `changelog` job that runs after `autotag`
- `docs/wiki/CICD-Pipeline.md` — "Changelog Generation" section added
## Acceptance Criteria
- [ ] `cliff.toml` present at repo root with working Tera template
- [ ] `CHANGELOG.md` present at repo root, bootstrapped from all existing semver tags
- [ ] `changelog` job in `auto-tag.yml` runs after `autotag` (parallel with build jobs)
- [ ] Each Gitea release body shows grouped conventional-commit entries instead of
static `"Release vX.Y.Z"`
- [ ] `CHANGELOG.md` committed to master on every release with `[skip ci]` suffix
(no infinite re-trigger loop)
- [ ] `CHANGELOG.md` uploaded as a downloadable release asset
- [ ] CI/chore/build/test/style commits excluded from changelog output
- [ ] `docs/wiki/CICD-Pipeline.md` documents the changelog generation process
## Work Implemented
### `cliff.toml`
- Tera template with proper whitespace control (`-%}` / `{%- `) for clean output
- Included commit types: `feat`, `fix`, `perf`, `docs`, `refactor`
- Excluded commit types: `ci`, `chore`, `build`, `test`, `style`
- `ignore_tags = "rc|alpha|beta"` — pre-release tags excluded from version boundaries
- `filter_unconventional = true` — non-conventional commits dropped silently
- `sort_commits = "oldest"` — chronological order within each version
### `CHANGELOG.md`
- Bootstrapped locally using git-cliff v2.7.0 (aarch64 musl binary)
- Covers all tagged versions from `v0.1.0` through `v0.2.49` plus `[Unreleased]`
- 267 lines covering the full project history
### `.gitea/workflows/auto-tag.yml``changelog` job
- `needs: autotag` — waits for the new tag to exist before running
- Full history clone: `git fetch --tags --depth=2147483647` so git-cliff can resolve
all version boundaries
- git-cliff v2.7.0 downloaded as a static x86_64 musl binary (~5 MB); no custom
image required
- Generates full `CHANGELOG.md` and per-release notes (`--latest --strip all`)
- PATCHes the Gitea release body via API with JSON-safe escaping (`jq -Rs .`)
- Commits `CHANGELOG.md` to master with `[skip ci]` to prevent workflow re-trigger
- Deletes any existing `CHANGELOG.md` asset before re-uploading (rerun-safe)
- Runs in parallel with all build jobs — no added wall-clock latency
### `docs/wiki/CICD-Pipeline.md`
- Added "Changelog Generation" section before "Known Issues & Fixes"
- Describes the five-step process, cliff.toml settings, and loop prevention mechanism
## Testing Needed
- [ ] Merge this PR to master; verify `changelog` CI job succeeds in Gitea Actions
- [ ] Check Gitea release body for the new version tag — should show grouped commit list
- [ ] Verify `CHANGELOG.md` was committed to master (check git log after CI runs)
- [ ] Verify `CHANGELOG.md` appears as a downloadable asset on the release page
- [ ] Push a subsequent commit to master; confirm the `[skip ci]` CHANGELOG commit does
NOT trigger a second run of `auto-tag.yml`
- [ ] Confirm CI/chore commits are absent from the release body

View File

@ -1,107 +0,0 @@
# CI Runner Speed Optimization via Pre-baked Images + Caching
## Description
Every CI run (both `test.yml` and `auto-tag.yml`) was installing system packages from scratch
on each job invocation: `apt-get update`, Tauri system libs, Node.js via nodesource, and in
the arm64 job — a full `rustup` install. This was the primary cause of slow builds.
The repository already contains pre-baked builder Docker images (`.docker/Dockerfile.*`) and a
`build-images.yml` workflow to push them to the local Gitea registry at `172.0.0.29:3000`.
These images were never referenced by the actual CI jobs — a critical gap. This work closes
that gap and adds `actions/cache@v3` for Cargo and npm.
## Acceptance Criteria
- [ ] `Dockerfile.linux-amd64` includes `rustfmt` and `clippy` components
- [ ] `Dockerfile.linux-arm64` includes `rustfmt` and `clippy` components
- [ ] `test.yml` Rust jobs use `172.0.0.29:3000/sarman/trcaa-linux-amd64:rust1.88-node22`
- [ ] `test.yml` Rust jobs have no inline `apt-get` or `rustup component add` steps
- [ ] `test.yml` Rust jobs include `actions/cache@v3` for `~/.cargo/registry`
- [ ] `test.yml` frontend jobs include `actions/cache@v3` for `~/.npm`
- [ ] `auto-tag.yml` `build-linux-amd64` uses pre-baked `trcaa-linux-amd64` image
- [ ] `auto-tag.yml` `build-windows-amd64` uses pre-baked `trcaa-windows-cross` image
- [ ] `auto-tag.yml` `build-linux-arm64` uses pre-baked `trcaa-linux-arm64` image
- [ ] All three build jobs have no `Install dependencies` step
- [ ] All three build jobs include `actions/cache@v3` for Cargo and npm
- [ ] `docs/wiki/CICD-Pipeline.md` documents pre-baked images, cache keys, and server prerequisites
- [ ] `build-images.yml` triggered manually before merging to ensure images exist in registry
## Work Implemented
### `.docker/Dockerfile.linux-amd64`
Added `RUN rustup component add rustfmt clippy` after the existing target add line.
The `rust-fmt-check` and `rust-clippy` CI jobs now rely on these being pre-installed
in the image rather than installing them at job runtime.
### `.docker/Dockerfile.linux-arm64`
Added `&& /root/.cargo/bin/rustup component add rustfmt clippy` appended to the
existing `rustup` installation RUN command (chained with `&&` to keep it one layer).
### `.gitea/workflows/test.yml`
- **rust-fmt-check**, **rust-clippy**, **rust-tests**: switched container image from
`rust:1.88-slim``172.0.0.29:3000/sarman/trcaa-linux-amd64:rust1.88-node22`.
Removed `apt-get install git` from Checkout steps (git is pre-installed in image).
Removed `apt-get install libwebkit2gtk-...` steps.
Removed `rustup component add rustfmt` and `rustup component add clippy` steps.
Added `actions/cache@v3` step for `~/.cargo/registry/index`, `~/.cargo/registry/cache`,
`~/.cargo/git/db` keyed on `Cargo.lock` hash.
- **frontend-typecheck**, **frontend-tests**: kept `node:22-alpine` image (no change needed).
Added `actions/cache@v3` step for `~/.npm` keyed on `package-lock.json` hash.
### `.gitea/workflows/auto-tag.yml`
- **build-linux-amd64**: image `rust:1.88-slim``trcaa-linux-amd64:rust1.88-node22`.
Removed Checkout apt-get install git, removed entire Install dependencies step.
Removed `rustup target add x86_64-unknown-linux-gnu` from Build step. Added cargo + npm cache.
- **build-windows-amd64**: image `rust:1.88-slim``trcaa-windows-cross:rust1.88-node22`.
Removed Checkout apt-get install git, removed entire Install dependencies step.
Removed `rustup target add x86_64-pc-windows-gnu` from Build step.
Added cargo (with `-windows-` suffix key to avoid collision) + npm cache.
- **build-linux-arm64**: image `ubuntu:22.04``trcaa-linux-arm64:rust1.88-node22`.
Removed Checkout apt-get install git, removed entire Install dependencies step (~40 lines).
Removed `. "$HOME/.cargo/env"` (PATH already set via `ENV` in Dockerfile).
Removed `rustup target add aarch64-unknown-linux-gnu` from Build step.
Added cargo (with `-arm64-` suffix key) + npm cache.
### `docs/wiki/CICD-Pipeline.md`
Added two new sections before the Test Pipeline section:
- **Pre-baked Builder Images**: table of all three images and their contents, rebuild
triggers, how-to-rebuild instructions, and the insecure-registries Docker daemon
prerequisite for 172.0.0.29.
- **Cargo and npm Caching**: documents the `actions/cache@v3` key patterns in use,
including the per-platform cache key suffixes for cross-compile jobs.
Updated the Test Pipeline section to reference the correct pre-baked image name.
Updated the Release Pipeline job table to show which image each build job uses.
## Testing Needed
1. **Pre-build images** (prerequisite): Trigger `build-images.yml` via `workflow_dispatch`
on Gitea Actions UI. Confirm all 3 images are pushed and visible in the registry.
2. **Server prerequisite**: Confirm `/etc/docker/daemon.json` on `172.0.0.29` contains
`{"insecure-registries":["172.0.0.29:3000"]}` and Docker was restarted after.
3. **PR test suite**: Open a PR with these changes. Verify:
- All 5 test jobs pass (`rust-fmt-check`, `rust-clippy`, `rust-tests`,
`frontend-typecheck`, `frontend-tests`)
- Job logs show no `apt-get` or `rustup component add` output
- Cache hit messages appear on second run
4. **Release build**: Merge to master. Verify `auto-tag.yml` runs and:
- All 3 Linux/Windows build jobs start without Install dependencies step
- Artifacts are produced and uploaded to the Gitea release
- Total release time is significantly reduced (~7 min vs ~25 min before)
5. **Expected time savings after caching warms up**:
| Job | Before | After |
|-----|--------|-------|
| rust-fmt-check | ~2 min | ~20 sec |
| rust-clippy | ~4 min | ~45 sec |
| rust-tests | ~5 min | ~1.5 min |
| frontend-typecheck | ~2 min | ~30 sec |
| frontend-tests | ~3 min | ~40 sec |
| build-linux-amd64 | ~10 min | ~3 min |
| build-windows-amd64 | ~12 min | ~4 min |
| build-linux-arm64 | ~15 min | ~4 min |
| PR test total (parallel) | ~5 min | ~1.5 min |
| Release total | ~25 min | ~7 min |