From 2c5e04a6ce6d411a0348703a5df758069b6d8763 Mon Sep 17 00:00:00 2001 From: Shaun Arman Date: Fri, 3 Apr 2026 17:08:34 -0500 Subject: [PATCH] feat: add temperature and max_tokens support for Custom REST providers (v0.2.9) - Added max_tokens and temperature fields to ProviderConfig - Custom REST providers now send modelConfig with temperature and max_tokens - OpenAI-compatible providers now use configured max_tokens/temperature - Both formats fall back to defaults if not specified - Bumped version to 0.2.9 This allows users to configure response length and randomness for all AI providers, including Custom REST providers which require modelConfig format. --- src-tauri/src/ai/openai.rs | 23 +++++++++++++++++++++-- src-tauri/src/state.rs | 6 ++++++ src-tauri/tauri.conf.json | 2 +- 3 files changed, 28 insertions(+), 3 deletions(-) diff --git a/src-tauri/src/ai/openai.rs b/src-tauri/src/ai/openai.rs index a5b85eb0..5307d2f8 100644 --- a/src-tauri/src/ai/openai.rs +++ b/src-tauri/src/ai/openai.rs @@ -56,12 +56,19 @@ impl OpenAiProvider { .unwrap_or("/chat/completions"); let url = format!("{}{}", config.api_url.trim_end_matches('/'), endpoint_path); - let body = serde_json::json!({ + let mut body = serde_json::json!({ "model": config.model, "messages": messages, - "max_tokens": 4096, }); + // Add max_tokens if provided, otherwise use default 4096 + body["max_tokens"] = serde_json::Value::from(config.max_tokens.unwrap_or(4096)); + + // Add temperature if provided + if let Some(temp) = config.temperature { + body["temperature"] = serde_json::Value::from(temp); + } + // Use custom auth header and prefix if provided let auth_header = config.custom_auth_header.as_deref().unwrap_or("Authorization"); let auth_prefix = config.custom_auth_prefix.as_deref().unwrap_or("Bearer "); @@ -149,6 +156,18 @@ impl OpenAiProvider { body["sessionId"] = serde_json::Value::String(session_id.clone()); } + // Add modelConfig with temperature and max_tokens if provided + let mut model_config = serde_json::json!({}); + if let Some(temp) = config.temperature { + model_config["temperature"] = serde_json::Value::from(temp); + } + if let Some(max_tokens) = config.max_tokens { + model_config["max_tokens"] = serde_json::Value::from(max_tokens); + } + if !model_config.is_null() && model_config.as_object().map_or(false, |obj| !obj.is_empty()) { + body["modelConfig"] = model_config; + } + // Use custom auth header and prefix (no prefix for MSI GenAI) let auth_header = config .custom_auth_header diff --git a/src-tauri/src/state.rs b/src-tauri/src/state.rs index 4aee86a0..9907e7a4 100644 --- a/src-tauri/src/state.rs +++ b/src-tauri/src/state.rs @@ -10,6 +10,12 @@ pub struct ProviderConfig { pub api_url: String, pub api_key: String, pub model: String, + /// Optional: Maximum tokens for response + #[serde(skip_serializing_if = "Option::is_none")] + pub max_tokens: Option, + /// Optional: Temperature (0.0-2.0) - controls randomness + #[serde(skip_serializing_if = "Option::is_none")] + pub temperature: Option, /// Optional: Custom endpoint path (e.g., "" for no path, "/v1/chat" for custom path) /// If None, defaults to "/chat/completions" for OpenAI compatibility #[serde(skip_serializing_if = "Option::is_none")] diff --git a/src-tauri/tauri.conf.json b/src-tauri/tauri.conf.json index e4ea5c39..a69fa85f 100644 --- a/src-tauri/tauri.conf.json +++ b/src-tauri/tauri.conf.json @@ -1,6 +1,6 @@ { "productName": "TFTSR", - "version": "0.2.8", + "version": "0.2.9", "identifier": "com.tftsr.devops", "build": { "frontendDist": "../dist",