feat: add temperature and max_tokens support for Custom REST providers (v0.2.9)
Some checks failed
Release / build-linux-amd64 (push) Has been cancelled
Release / build-windows-amd64 (push) Has been cancelled
Release / build-macos-arm64 (push) Has been cancelled
Release / build-linux-arm64 (push) Has been cancelled

- Added max_tokens and temperature fields to ProviderConfig
- Custom REST providers now send modelConfig with temperature and max_tokens
- OpenAI-compatible providers now use configured max_tokens/temperature
- Both formats fall back to defaults if not specified
- Bumped version to 0.2.9

This allows users to configure response length and randomness for all
AI providers, including Custom REST providers which require modelConfig format.
This commit is contained in:
Shaun Arman 2026-04-03 17:08:34 -05:00
parent 1d40dfb15b
commit 2c5e04a6ce
3 changed files with 28 additions and 3 deletions

View File

@ -56,12 +56,19 @@ impl OpenAiProvider {
.unwrap_or("/chat/completions");
let url = format!("{}{}", config.api_url.trim_end_matches('/'), endpoint_path);
let body = serde_json::json!({
let mut body = serde_json::json!({
"model": config.model,
"messages": messages,
"max_tokens": 4096,
});
// Add max_tokens if provided, otherwise use default 4096
body["max_tokens"] = serde_json::Value::from(config.max_tokens.unwrap_or(4096));
// Add temperature if provided
if let Some(temp) = config.temperature {
body["temperature"] = serde_json::Value::from(temp);
}
// Use custom auth header and prefix if provided
let auth_header = config.custom_auth_header.as_deref().unwrap_or("Authorization");
let auth_prefix = config.custom_auth_prefix.as_deref().unwrap_or("Bearer ");
@ -149,6 +156,18 @@ impl OpenAiProvider {
body["sessionId"] = serde_json::Value::String(session_id.clone());
}
// Add modelConfig with temperature and max_tokens if provided
let mut model_config = serde_json::json!({});
if let Some(temp) = config.temperature {
model_config["temperature"] = serde_json::Value::from(temp);
}
if let Some(max_tokens) = config.max_tokens {
model_config["max_tokens"] = serde_json::Value::from(max_tokens);
}
if !model_config.is_null() && model_config.as_object().map_or(false, |obj| !obj.is_empty()) {
body["modelConfig"] = model_config;
}
// Use custom auth header and prefix (no prefix for MSI GenAI)
let auth_header = config
.custom_auth_header

View File

@ -10,6 +10,12 @@ pub struct ProviderConfig {
pub api_url: String,
pub api_key: String,
pub model: String,
/// Optional: Maximum tokens for response
#[serde(skip_serializing_if = "Option::is_none")]
pub max_tokens: Option<u32>,
/// Optional: Temperature (0.0-2.0) - controls randomness
#[serde(skip_serializing_if = "Option::is_none")]
pub temperature: Option<f64>,
/// Optional: Custom endpoint path (e.g., "" for no path, "/v1/chat" for custom path)
/// If None, defaults to "/chat/completions" for OpenAI compatibility
#[serde(skip_serializing_if = "Option::is_none")]

View File

@ -1,6 +1,6 @@
{
"productName": "TFTSR",
"version": "0.2.8",
"version": "0.2.9",
"identifier": "com.tftsr.devops",
"build": {
"frontendDist": "../dist",