diff --git a/nvidia/txt2kg/assets/frontend/components/llm-selector-compact.tsx b/nvidia/txt2kg/assets/frontend/components/llm-selector-compact.tsx new file mode 100644 index 0000000..ffaaeba --- /dev/null +++ b/nvidia/txt2kg/assets/frontend/components/llm-selector-compact.tsx @@ -0,0 +1,160 @@ +"use client" + +import { useState, useEffect } from "react" +import { ChevronDown, Cpu } from "lucide-react" +import { OllamaIcon } from "@/components/ui/ollama-icon" + +interface LLMModel { + id: string + name: string + model: string + provider: string + description?: string +} + +// Default models +const DEFAULT_MODELS: LLMModel[] = [ + { + id: "ollama-llama3.1:8b", + name: "Llama 3.1 8B", + model: "llama3.1:8b", + provider: "ollama", + description: "Local Ollama model" + }, + { + id: "nvidia-nemotron", + name: "Nemotron 70B", + model: "nvdev/nvidia/llama-3.1-nemotron-70b-instruct", + provider: "nvidia", + description: "NVIDIA API (requires key)" + }, +] + +export function LLMSelectorCompact() { + const [models, setModels] = useState(DEFAULT_MODELS) + const [selectedModel, setSelectedModel] = useState(DEFAULT_MODELS[0]) + const [isOpen, setIsOpen] = useState(false) + + // Load Ollama models from settings + useEffect(() => { + try { + const selectedOllamaModels = localStorage.getItem("selected_ollama_models") + if (selectedOllamaModels) { + const modelNames: string[] = JSON.parse(selectedOllamaModels) + const ollamaModels: LLMModel[] = modelNames.map(name => ({ + id: `ollama-${name}`, + name: name, + model: name, + provider: "ollama", + description: "Local Ollama model" + })) + + // Combine with default models, avoiding duplicates + const defaultOllamaIds = DEFAULT_MODELS + .filter(m => m.provider === "ollama") + .map(m => m.model) + const uniqueOllamaModels = ollamaModels.filter( + m => !defaultOllamaIds.includes(m.model) + ) + + const allModels = [...DEFAULT_MODELS, ...uniqueOllamaModels] + setModels(allModels) + } + } catch (error) { + console.error("Error loading Ollama models:", error) + } + }, []) + + // Load selected model from localStorage + useEffect(() => { + try { + const saved = localStorage.getItem("selectedModelForRAG") + if (saved) { + const savedModel: LLMModel = JSON.parse(saved) + setSelectedModel(savedModel) + } + } catch (error) { + console.error("Error loading selected model:", error) + } + }, []) + + // Save selected model to localStorage and dispatch event + const handleSelectModel = (model: LLMModel) => { + setSelectedModel(model) + setIsOpen(false) + localStorage.setItem("selectedModelForRAG", JSON.stringify(model)) + + // Dispatch event for other components + window.dispatchEvent(new CustomEvent('ragModelSelected', { + detail: { model } + })) + } + + const getModelIcon = (provider: string) => { + if (provider === "ollama") { + return + } + return + } + + return ( +
+ + + {isOpen && ( + <> + {/* Backdrop */} +
setIsOpen(false)} + /> + + {/* Dropdown */} +
+
+

Select LLM for Answer Generation

+
+
+ {models.map((model) => ( + + ))} +
+
+ + )} +
+ ) +} +