tftsr-devops_investigation/src-tauri/src/ai/ollama.rs
Shaun Arman ccbfcad5d0 feat(ai): add tool-calling and integration search as AI data source
This commit implements two major features:

1. Integration Search as Primary AI Data Source
   - Confluence, ServiceNow, and Azure DevOps searches execute before AI queries
   - Search results injected as system context for AI providers
   - Parallel search execution for performance
   - Webview-based fetch for HttpOnly cookie support
   - Persistent browser windows maintain authenticated sessions

2. AI Tool-Calling (Function Calling)
   - Allows AI to automatically execute functions during conversation
   - Implemented for OpenAI-compatible providers and MSI GenAI
   - Created add_ado_comment tool for updating Azure DevOps tickets
   - Iterative tool-calling loop supports multi-step workflows
   - Extensible architecture for adding new tools

Key Files:
- src-tauri/src/ai/tools.rs (NEW) - Tool definitions
- src-tauri/src/integrations/*_search.rs (NEW) - Integration search modules
- src-tauri/src/integrations/webview_fetch.rs (NEW) - HttpOnly cookie workaround
- src-tauri/src/commands/ai.rs - Tool execution and integration search
- src-tauri/src/ai/openai.rs - Tool-calling for OpenAI and MSI GenAI
- All providers updated with tools parameter support

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
2026-04-07 09:35:34 -05:00

107 lines
3.1 KiB
Rust

use async_trait::async_trait;
use std::time::Duration;
use crate::ai::provider::Provider;
use crate::ai::{ChatResponse, Message, ProviderInfo, TokenUsage};
use crate::state::ProviderConfig;
pub struct OllamaProvider;
#[async_trait]
impl Provider for OllamaProvider {
fn name(&self) -> &str {
"ollama"
}
fn info(&self) -> ProviderInfo {
ProviderInfo {
name: "Ollama (Local)".to_string(),
supports_streaming: true,
models: vec![
"llama3.1".to_string(),
"llama3".to_string(),
"mistral".to_string(),
"codellama".to_string(),
"phi3".to_string(),
],
}
}
async fn chat(
&self,
messages: Vec<Message>,
config: &ProviderConfig,
_tools: Option<Vec<crate::ai::Tool>>,
) -> anyhow::Result<ChatResponse> {
let client = reqwest::Client::builder()
.timeout(Duration::from_secs(60))
.build()?;
let base_url = if config.api_url.is_empty() {
"http://localhost:11434".to_string()
} else {
config.api_url.trim_end_matches('/').to_string()
};
let url = format!("{base_url}/api/chat");
// Ollama expects {model, messages: [{role, content}], stream: false}
let api_messages: Vec<serde_json::Value> = messages
.iter()
.map(|m| {
serde_json::json!({
"role": m.role,
"content": m.content,
})
})
.collect();
let body = serde_json::json!({
"model": config.model,
"messages": api_messages,
"stream": false,
});
let resp = client
.post(&url)
.header("Content-Type", "application/json")
.json(&body)
.send()
.await?;
if !resp.status().is_success() {
let status = resp.status();
let text = resp.text().await?;
anyhow::bail!("Ollama API error {status}: {text}");
}
let json: serde_json::Value = resp.json().await?;
// Parse response.message.content
let content = json["message"]["content"]
.as_str()
.ok_or_else(|| anyhow::anyhow!("No content in Ollama response"))?
.to_string();
// Ollama provides eval_count / prompt_eval_count
let usage = {
let prompt_tokens = json["prompt_eval_count"].as_u64().unwrap_or(0) as u32;
let completion_tokens = json["eval_count"].as_u64().unwrap_or(0) as u32;
if prompt_tokens > 0 || completion_tokens > 0 {
Some(TokenUsage {
prompt_tokens,
completion_tokens,
total_tokens: prompt_tokens + completion_tokens,
})
} else {
None
}
};
Ok(ChatResponse {
content,
model: config.model.clone(),
usage,
tool_calls: None,
})
}
}