tftsr-devops_investigation/src/lib/tauriCommands.ts

420 lines
12 KiB
TypeScript
Raw Normal View History

feat: initial implementation of TFTSR IT Triage & RCA application Implements Phases 1-8 of the TFTSR implementation plan. Rust backend (Tauri 2.x, src-tauri/): - Multi-provider AI: OpenAI-compatible, Anthropic, Gemini, Mistral, Ollama - PII detection engine: 11 regex patterns with overlap resolution - SQLCipher AES-256 encrypted database with 10 versioned migrations - 28 Tauri IPC commands for triage, analysis, document, and system ops - Ollama: hardware probe, model recommendations, pull/delete with events - RCA and blameless post-mortem Markdown document generators - PDF export via printpdf - Audit log: SHA-256 hash of every external data send - Integration stubs for Confluence, ServiceNow, Azure DevOps (v0.2) Frontend (React 18 + TypeScript + Vite, src/): - 9 pages: full triage workflow NewIssue→LogUpload→Triage→Resolution→RCA→Postmortem→History+Settings - 7 components: ChatWindow, TriageProgress, PiiDiffViewer, DocEditor, HardwareReport, ModelSelector, UI primitives - 3 Zustand stores: session, settings (persisted), history - Type-safe tauriCommands.ts matching Rust backend types exactly - 8 IT domain system prompts (Linux, Windows, Network, K8s, DB, Virt, HW, Obs) DevOps: - .woodpecker/test.yml: rustfmt, clippy, cargo test, tsc, vitest on every push - .woodpecker/release.yml: linux/amd64 + linux/arm64 builds, Gogs release upload Verified: - cargo check: zero errors - tsc --noEmit: zero errors - vitest run: 13/13 unit tests passing Co-Authored-By: Claude Sonnet 4.6 (1M context) <noreply@anthropic.com>
2026-03-15 03:36:25 +00:00
import { invoke } from "@tauri-apps/api/core";
// ─── Types matching Rust backend models ───────────────────────────────────────
export interface ProviderConfig {
provider_type?: string;
max_tokens?: number;
temperature?: number;
name: string;
api_url: string;
api_key: string;
model: string;
custom_endpoint_path?: string;
custom_auth_header?: string;
custom_auth_prefix?: string;
api_format?: string;
session_id?: string;
user_id?: string;
feat: initial implementation of TFTSR IT Triage & RCA application Implements Phases 1-8 of the TFTSR implementation plan. Rust backend (Tauri 2.x, src-tauri/): - Multi-provider AI: OpenAI-compatible, Anthropic, Gemini, Mistral, Ollama - PII detection engine: 11 regex patterns with overlap resolution - SQLCipher AES-256 encrypted database with 10 versioned migrations - 28 Tauri IPC commands for triage, analysis, document, and system ops - Ollama: hardware probe, model recommendations, pull/delete with events - RCA and blameless post-mortem Markdown document generators - PDF export via printpdf - Audit log: SHA-256 hash of every external data send - Integration stubs for Confluence, ServiceNow, Azure DevOps (v0.2) Frontend (React 18 + TypeScript + Vite, src/): - 9 pages: full triage workflow NewIssue→LogUpload→Triage→Resolution→RCA→Postmortem→History+Settings - 7 components: ChatWindow, TriageProgress, PiiDiffViewer, DocEditor, HardwareReport, ModelSelector, UI primitives - 3 Zustand stores: session, settings (persisted), history - Type-safe tauriCommands.ts matching Rust backend types exactly - 8 IT domain system prompts (Linux, Windows, Network, K8s, DB, Virt, HW, Obs) DevOps: - .woodpecker/test.yml: rustfmt, clippy, cargo test, tsc, vitest on every push - .woodpecker/release.yml: linux/amd64 + linux/arm64 builds, Gogs release upload Verified: - cargo check: zero errors - tsc --noEmit: zero errors - vitest run: 13/13 unit tests passing Co-Authored-By: Claude Sonnet 4.6 (1M context) <noreply@anthropic.com>
2026-03-15 03:36:25 +00:00
}
export interface Message {
role: string;
content: string;
}
export interface TokenUsage {
prompt_tokens: number;
completion_tokens: number;
total_tokens: number;
}
export interface ChatResponse {
content: string;
model: string;
usage?: TokenUsage;
}
export interface AnalysisResult {
summary: string;
key_findings: string[];
suggested_why1: string;
severity_assessment: string;
}
export interface ProviderInfo {
name: string;
supports_streaming: boolean;
models: string[];
}
export interface Issue {
id: string;
title: string;
description: string;
severity: string;
status: string;
category: string;
source: string;
created_at: string;
updated_at: string;
resolved_at?: string;
assigned_to: string;
tags: string;
}
export interface FiveWhyEntry {
id: string;
why_number: number;
question: string;
answer?: string;
created_at: number;
}
export interface TimelineEvent {
id: string;
event_type: string;
description: string;
created_at: number;
}
export interface AiConversation {
id: string;
issue_id: string;
provider: string;
model: string;
created_at: string;
title: string;
}
export interface ResolutionStep {
id: string;
issue_id: string;
step_order: number;
why_question: string;
answer: string;
evidence: string;
created_at: string;
}
export interface IssueDetail {
issue: Issue;
log_files: LogFile[];
resolution_steps: ResolutionStep[];
conversations: AiConversation[];
}
export interface IssueSummary {
id: string;
title: string;
severity: string;
status: string;
category: string;
created_at: string;
updated_at: string;
domain?: string;
log_count: number;
step_count: number;
}
export interface IssueListQuery {
status?: string;
domain?: string;
severity?: string;
search?: string;
limit?: number;
offset?: number;
}
export interface NewIssue {
title: string;
domain: string;
description?: string;
severity?: string;
}
export interface LogFile {
id: string;
issue_id: string;
file_name: string;
file_path: string;
file_size: number;
mime_type: string;
content_hash: string;
uploaded_at: string;
redacted: boolean;
}
export interface PiiSpan {
id: string;
pii_type: string;
start: number;
end: number;
original: string;
replacement: string;
}
export interface PiiDetectionResult {
log_file_id: string;
detections: PiiSpan[];
total_pii_found: number;
}
export interface RedactedLogFile {
id: string;
original_file_id: string;
file_name: string;
file_hash: string;
redaction_count: number;
}
export interface Document_ {
id: string;
issue_id: string;
doc_type: string;
title: string;
content_md: string;
created_at: number;
updated_at: number;
}
export interface HardwareInfo {
total_ram_gb: number;
cpu_arch: string;
gpu_vendor?: string;
gpu_vram_gb?: number;
}
export interface ModelRecommendation {
name: string;
size: string;
min_ram_gb: number;
description: string;
recommended: boolean;
}
export interface OllamaModel {
name: string;
size: number;
modified: string;
}
export interface OllamaStatus {
installed: boolean;
version?: string;
running: boolean;
}
export interface InstallGuide {
platform: string;
steps: string[];
url: string;
}
export interface AuditEntry {
id: string;
timestamp: string;
action: string;
entity_type: string;
entity_id: string;
user_id: string;
details: string;
}
export interface AuditFilter {
action?: string;
entity_type?: string;
entity_id?: string;
limit?: number;
}
export interface AppSettings {
theme: string;
ai_providers: ProviderConfig[];
active_provider?: string;
default_provider: string;
default_model: string;
ollama_url: string;
}
// ─── TriageMessage (for UI store, not a DB type) ──────────────────────────────
export interface TriageMessage {
id: string;
issue_id: string;
role: string;
content: string;
why_level?: number;
created_at: number;
}
// ─── AI commands ──────────────────────────────────────────────────────────────
export const analyzeLogsCmd = (issueId: string, logFileIds: string[], providerConfig: ProviderConfig) =>
invoke<AnalysisResult>("analyze_logs", { issueId, logFileIds, providerConfig });
export const chatMessageCmd = (issueId: string, message: string, providerConfig: ProviderConfig) =>
invoke<ChatResponse>("chat_message", { issueId, message, providerConfig });
export const listProvidersCmd = () => invoke<ProviderInfo[]>("list_providers");
// ─── Analysis / PII commands ──────────────────────────────────────────────────
export const uploadLogFileCmd = (issueId: string, filePath: string) =>
invoke<LogFile>("upload_log_file", { issueId, filePath });
export const detectPiiCmd = (logFileId: string) =>
invoke<PiiDetectionResult>("detect_pii", { logFileId });
export const applyRedactionsCmd = (logFileId: string, approvedSpanIds: string[]) =>
invoke<RedactedLogFile>("apply_redactions", { logFileId, approvedSpanIds });
// ─── Issue CRUD ───────────────────────────────────────────────────────────────
export const testProviderConnectionCmd = (providerConfig: ProviderConfig) =>
invoke<ChatResponse>("test_provider_connection", { providerConfig });
feat: initial implementation of TFTSR IT Triage & RCA application Implements Phases 1-8 of the TFTSR implementation plan. Rust backend (Tauri 2.x, src-tauri/): - Multi-provider AI: OpenAI-compatible, Anthropic, Gemini, Mistral, Ollama - PII detection engine: 11 regex patterns with overlap resolution - SQLCipher AES-256 encrypted database with 10 versioned migrations - 28 Tauri IPC commands for triage, analysis, document, and system ops - Ollama: hardware probe, model recommendations, pull/delete with events - RCA and blameless post-mortem Markdown document generators - PDF export via printpdf - Audit log: SHA-256 hash of every external data send - Integration stubs for Confluence, ServiceNow, Azure DevOps (v0.2) Frontend (React 18 + TypeScript + Vite, src/): - 9 pages: full triage workflow NewIssue→LogUpload→Triage→Resolution→RCA→Postmortem→History+Settings - 7 components: ChatWindow, TriageProgress, PiiDiffViewer, DocEditor, HardwareReport, ModelSelector, UI primitives - 3 Zustand stores: session, settings (persisted), history - Type-safe tauriCommands.ts matching Rust backend types exactly - 8 IT domain system prompts (Linux, Windows, Network, K8s, DB, Virt, HW, Obs) DevOps: - .woodpecker/test.yml: rustfmt, clippy, cargo test, tsc, vitest on every push - .woodpecker/release.yml: linux/amd64 + linux/arm64 builds, Gogs release upload Verified: - cargo check: zero errors - tsc --noEmit: zero errors - vitest run: 13/13 unit tests passing Co-Authored-By: Claude Sonnet 4.6 (1M context) <noreply@anthropic.com>
2026-03-15 03:36:25 +00:00
export const createIssueCmd = (newIssue: NewIssue) =>
invoke<Issue>("create_issue", {
title: newIssue.title,
description: newIssue.description ?? "",
severity: newIssue.severity ?? "P3",
category: newIssue.domain,
});
feat: initial implementation of TFTSR IT Triage & RCA application Implements Phases 1-8 of the TFTSR implementation plan. Rust backend (Tauri 2.x, src-tauri/): - Multi-provider AI: OpenAI-compatible, Anthropic, Gemini, Mistral, Ollama - PII detection engine: 11 regex patterns with overlap resolution - SQLCipher AES-256 encrypted database with 10 versioned migrations - 28 Tauri IPC commands for triage, analysis, document, and system ops - Ollama: hardware probe, model recommendations, pull/delete with events - RCA and blameless post-mortem Markdown document generators - PDF export via printpdf - Audit log: SHA-256 hash of every external data send - Integration stubs for Confluence, ServiceNow, Azure DevOps (v0.2) Frontend (React 18 + TypeScript + Vite, src/): - 9 pages: full triage workflow NewIssue→LogUpload→Triage→Resolution→RCA→Postmortem→History+Settings - 7 components: ChatWindow, TriageProgress, PiiDiffViewer, DocEditor, HardwareReport, ModelSelector, UI primitives - 3 Zustand stores: session, settings (persisted), history - Type-safe tauriCommands.ts matching Rust backend types exactly - 8 IT domain system prompts (Linux, Windows, Network, K8s, DB, Virt, HW, Obs) DevOps: - .woodpecker/test.yml: rustfmt, clippy, cargo test, tsc, vitest on every push - .woodpecker/release.yml: linux/amd64 + linux/arm64 builds, Gogs release upload Verified: - cargo check: zero errors - tsc --noEmit: zero errors - vitest run: 13/13 unit tests passing Co-Authored-By: Claude Sonnet 4.6 (1M context) <noreply@anthropic.com>
2026-03-15 03:36:25 +00:00
export const getIssueCmd = (issueId: string) =>
invoke<IssueDetail>("get_issue", { issueId });
export const listIssuesCmd = (query: IssueListQuery) =>
invoke<IssueSummary[]>("list_issues", { filter: query });
feat: initial implementation of TFTSR IT Triage & RCA application Implements Phases 1-8 of the TFTSR implementation plan. Rust backend (Tauri 2.x, src-tauri/): - Multi-provider AI: OpenAI-compatible, Anthropic, Gemini, Mistral, Ollama - PII detection engine: 11 regex patterns with overlap resolution - SQLCipher AES-256 encrypted database with 10 versioned migrations - 28 Tauri IPC commands for triage, analysis, document, and system ops - Ollama: hardware probe, model recommendations, pull/delete with events - RCA and blameless post-mortem Markdown document generators - PDF export via printpdf - Audit log: SHA-256 hash of every external data send - Integration stubs for Confluence, ServiceNow, Azure DevOps (v0.2) Frontend (React 18 + TypeScript + Vite, src/): - 9 pages: full triage workflow NewIssue→LogUpload→Triage→Resolution→RCA→Postmortem→History+Settings - 7 components: ChatWindow, TriageProgress, PiiDiffViewer, DocEditor, HardwareReport, ModelSelector, UI primitives - 3 Zustand stores: session, settings (persisted), history - Type-safe tauriCommands.ts matching Rust backend types exactly - 8 IT domain system prompts (Linux, Windows, Network, K8s, DB, Virt, HW, Obs) DevOps: - .woodpecker/test.yml: rustfmt, clippy, cargo test, tsc, vitest on every push - .woodpecker/release.yml: linux/amd64 + linux/arm64 builds, Gogs release upload Verified: - cargo check: zero errors - tsc --noEmit: zero errors - vitest run: 13/13 unit tests passing Co-Authored-By: Claude Sonnet 4.6 (1M context) <noreply@anthropic.com>
2026-03-15 03:36:25 +00:00
export const updateIssueCmd = (
issueId: string,
updates: { title?: string; status?: string; severity?: string; description?: string; domain?: string }
) => invoke<Issue>("update_issue", { issueId, updates });
feat: initial implementation of TFTSR IT Triage & RCA application Implements Phases 1-8 of the TFTSR implementation plan. Rust backend (Tauri 2.x, src-tauri/): - Multi-provider AI: OpenAI-compatible, Anthropic, Gemini, Mistral, Ollama - PII detection engine: 11 regex patterns with overlap resolution - SQLCipher AES-256 encrypted database with 10 versioned migrations - 28 Tauri IPC commands for triage, analysis, document, and system ops - Ollama: hardware probe, model recommendations, pull/delete with events - RCA and blameless post-mortem Markdown document generators - PDF export via printpdf - Audit log: SHA-256 hash of every external data send - Integration stubs for Confluence, ServiceNow, Azure DevOps (v0.2) Frontend (React 18 + TypeScript + Vite, src/): - 9 pages: full triage workflow NewIssue→LogUpload→Triage→Resolution→RCA→Postmortem→History+Settings - 7 components: ChatWindow, TriageProgress, PiiDiffViewer, DocEditor, HardwareReport, ModelSelector, UI primitives - 3 Zustand stores: session, settings (persisted), history - Type-safe tauriCommands.ts matching Rust backend types exactly - 8 IT domain system prompts (Linux, Windows, Network, K8s, DB, Virt, HW, Obs) DevOps: - .woodpecker/test.yml: rustfmt, clippy, cargo test, tsc, vitest on every push - .woodpecker/release.yml: linux/amd64 + linux/arm64 builds, Gogs release upload Verified: - cargo check: zero errors - tsc --noEmit: zero errors - vitest run: 13/13 unit tests passing Co-Authored-By: Claude Sonnet 4.6 (1M context) <noreply@anthropic.com>
2026-03-15 03:36:25 +00:00
export const deleteIssueCmd = (issueId: string) =>
invoke<void>("delete_issue", { issueId });
export const searchIssuesCmd = (query: string) =>
invoke<IssueSummary[]>("search_issues", { query });
export interface IssueMessage {
id: string;
conversation_id: string;
role: string;
content: string;
token_count: number;
created_at: string;
}
export const getIssueMessagesCmd = (issueId: string) =>
invoke<IssueMessage[]>("get_issue_messages", { issueId });
export const addFiveWhyCmd = (
issueId: string,
stepOrder: number,
whyQuestion: string,
answer: string,
evidence: string
) => invoke<ResolutionStep>("add_five_why", { issueId, stepOrder, whyQuestion, answer, evidence });
feat: initial implementation of TFTSR IT Triage & RCA application Implements Phases 1-8 of the TFTSR implementation plan. Rust backend (Tauri 2.x, src-tauri/): - Multi-provider AI: OpenAI-compatible, Anthropic, Gemini, Mistral, Ollama - PII detection engine: 11 regex patterns with overlap resolution - SQLCipher AES-256 encrypted database with 10 versioned migrations - 28 Tauri IPC commands for triage, analysis, document, and system ops - Ollama: hardware probe, model recommendations, pull/delete with events - RCA and blameless post-mortem Markdown document generators - PDF export via printpdf - Audit log: SHA-256 hash of every external data send - Integration stubs for Confluence, ServiceNow, Azure DevOps (v0.2) Frontend (React 18 + TypeScript + Vite, src/): - 9 pages: full triage workflow NewIssue→LogUpload→Triage→Resolution→RCA→Postmortem→History+Settings - 7 components: ChatWindow, TriageProgress, PiiDiffViewer, DocEditor, HardwareReport, ModelSelector, UI primitives - 3 Zustand stores: session, settings (persisted), history - Type-safe tauriCommands.ts matching Rust backend types exactly - 8 IT domain system prompts (Linux, Windows, Network, K8s, DB, Virt, HW, Obs) DevOps: - .woodpecker/test.yml: rustfmt, clippy, cargo test, tsc, vitest on every push - .woodpecker/release.yml: linux/amd64 + linux/arm64 builds, Gogs release upload Verified: - cargo check: zero errors - tsc --noEmit: zero errors - vitest run: 13/13 unit tests passing Co-Authored-By: Claude Sonnet 4.6 (1M context) <noreply@anthropic.com>
2026-03-15 03:36:25 +00:00
export const updateFiveWhyCmd = (entryId: string, answer: string) =>
invoke<void>("update_five_why", { entryId, answer });
export const addTimelineEventCmd = (issueId: string, eventType: string, description: string) =>
invoke<TimelineEvent>("add_timeline_event", { issueId, eventType, description });
// ─── Document commands ────────────────────────────────────────────────────────
export const generateRcaCmd = (issueId: string) => invoke<Document_>("generate_rca", { issueId });
export const generatePostmortemCmd = (issueId: string) =>
invoke<Document_>("generate_postmortem", { issueId });
export const updateDocumentCmd = (docId: string, contentMd: string) =>
invoke<Document_>("update_document", { docId, contentMd });
export const exportDocumentCmd = (docId: string, title: string, contentMd: string, format: string, outputDir: string) =>
invoke<string>("export_document", { title, contentMd, format, outputDir });
feat: initial implementation of TFTSR IT Triage & RCA application Implements Phases 1-8 of the TFTSR implementation plan. Rust backend (Tauri 2.x, src-tauri/): - Multi-provider AI: OpenAI-compatible, Anthropic, Gemini, Mistral, Ollama - PII detection engine: 11 regex patterns with overlap resolution - SQLCipher AES-256 encrypted database with 10 versioned migrations - 28 Tauri IPC commands for triage, analysis, document, and system ops - Ollama: hardware probe, model recommendations, pull/delete with events - RCA and blameless post-mortem Markdown document generators - PDF export via printpdf - Audit log: SHA-256 hash of every external data send - Integration stubs for Confluence, ServiceNow, Azure DevOps (v0.2) Frontend (React 18 + TypeScript + Vite, src/): - 9 pages: full triage workflow NewIssue→LogUpload→Triage→Resolution→RCA→Postmortem→History+Settings - 7 components: ChatWindow, TriageProgress, PiiDiffViewer, DocEditor, HardwareReport, ModelSelector, UI primitives - 3 Zustand stores: session, settings (persisted), history - Type-safe tauriCommands.ts matching Rust backend types exactly - 8 IT domain system prompts (Linux, Windows, Network, K8s, DB, Virt, HW, Obs) DevOps: - .woodpecker/test.yml: rustfmt, clippy, cargo test, tsc, vitest on every push - .woodpecker/release.yml: linux/amd64 + linux/arm64 builds, Gogs release upload Verified: - cargo check: zero errors - tsc --noEmit: zero errors - vitest run: 13/13 unit tests passing Co-Authored-By: Claude Sonnet 4.6 (1M context) <noreply@anthropic.com>
2026-03-15 03:36:25 +00:00
// ─── Ollama & System ──────────────────────────────────────────────────────────
export const checkOllamaInstalledCmd = () => invoke<OllamaStatus>("check_ollama_installed");
export const getOllamaInstallGuideCmd = (platform: string) =>
invoke<InstallGuide>("get_ollama_install_guide", { platform });
export const listOllamaModelsCmd = () => invoke<OllamaModel[]>("list_ollama_models");
export const pullOllamaModelCmd = (modelName: string) =>
invoke<void>("pull_ollama_model", { modelName });
export const deleteOllamaModelCmd = (modelName: string) =>
invoke<void>("delete_ollama_model", { modelName });
export const detectHardwareCmd = () => invoke<HardwareInfo>("detect_hardware");
export const recommendModelsCmd = () => invoke<ModelRecommendation[]>("recommend_models");
// ─── Settings & Audit ─────────────────────────────────────────────────────────
export const getSettingsCmd = () => invoke<AppSettings>("get_settings");
export const updateSettingsCmd = (partialSettings: Partial<AppSettings>) =>
invoke<AppSettings>("update_settings", { partialSettings });
export const getAuditLogCmd = (filter: AuditFilter) =>
invoke<AuditEntry[]>("get_audit_log", { filter });
feat: add OAuth2 frontend UI and complete integration flow Phase 2.2: OAuth2 flow - FRONTEND COMPLETE ✅ Implemented: - TypeScript command wrappers in tauriCommands.ts * initiateOauthCmd(service) -> OAuthInitResponse * handleOauthCallbackCmd(service, code, stateKey) * test*ConnectionCmd() for all services * OAuthInitResponse and ConnectionResult types - Complete Settings/Integrations UI * Three integration cards: Confluence, ServiceNow, ADO * Connect with OAuth2 buttons (Confluence, ADO) * Basic auth note for ServiceNow * Configuration inputs: baseUrl, username, projectName, spaceKey * Test connection buttons with loading states * Success/error feedback with color-coded messages * OAuth2 flow instructions for users - OAuth2 flow in browser * Opens auth URL in default browser via shell plugin * User authenticates with service * Redirected to localhost:8765/callback * Callback server handles token exchange automatically * Success message shown to user - CSP updates in tauri.conf.json * Added http://localhost:8765 (callback server) * Added https://auth.atlassian.com (Confluence OAuth) * Added https://*.atlassian.net (Confluence API) * Added https://login.microsoftonline.com (ADO OAuth) * Added https://dev.azure.com (ADO API) - UI improvements * Fixed Cancel button variant (ghost instead of secondary) * Loading spinners with Loader2 icon * Check/X icons for success/error states * Disabled states when not configured * Optimistic UI updates on connect Frontend + Backend = COMPLETE END-TO-END OAUTH2 FLOW: 1. User goes to Settings → Integrations 2. Enters base URL and config 3. Clicks 'Connect with OAuth2' 4. Browser opens with service auth page 5. User logs in and authorizes 6. Redirected to localhost:8765/callback 7. Token exchanged and encrypted automatically 8. Stored in SQLite credentials table 9. Ready for API calls to external services ✅ TypeScript: All types checked, no errors Frontend build: ✅ Built in 2.26s Total lines: ~400 lines of new UI code Next: Phase 2.3 - Integration API clients (Confluence REST, ServiceNow REST, ADO REST)
2026-04-03 20:04:12 +00:00
// ─── OAuth & Integrations ─────────────────────────────────────────────────────
export interface OAuthInitResponse {
auth_url: string;
state: string;
}
export interface ConnectionResult {
success: boolean;
message: string;
}
export const initiateOauthCmd = (service: string) =>
invoke<OAuthInitResponse>("initiate_oauth", { service });
export const handleOauthCallbackCmd = (service: string, code: string, stateKey: string) =>
invoke<void>("handle_oauth_callback", { service, code, stateKey });
export const testConfluenceConnectionCmd = (baseUrl: string, credentials: Record<string, unknown>) =>
invoke<ConnectionResult>("test_confluence_connection", { baseUrl, credentials });
export const testServiceNowConnectionCmd = (instanceUrl: string, credentials: Record<string, unknown>) =>
invoke<ConnectionResult>("test_servicenow_connection", { instanceUrl, credentials });
export const testAzureDevOpsConnectionCmd = (orgUrl: string, credentials: Record<string, unknown>) =>
invoke<ConnectionResult>("test_azuredevops_connection", { orgUrl, credentials });
// ─── Webview & Token Authentication ──────────────────────────────────────────
export interface WebviewAuthResponse {
success: boolean;
message: string;
webview_id: string;
}
export interface TokenAuthRequest {
service: string;
token: string;
token_type: string;
base_url: string;
}
export const authenticateWithWebviewCmd = (service: string, baseUrl: string) =>
invoke<WebviewAuthResponse>("authenticate_with_webview", { service, baseUrl });
export const extractCookiesFromWebviewCmd = (service: string, webviewId: string) =>
invoke<ConnectionResult>("extract_cookies_from_webview", { service, webviewId });
export const saveManualTokenCmd = (request: TokenAuthRequest) =>
invoke<ConnectionResult>("save_manual_token", { request });