import React from "react"
import { Database, FileText, Cpu, Sparkles, Zap } from "lucide-react"
import { ModelSelector } from "@/components/model-selector"
import { EmbeddingsGenerator } from "@/components/embeddings-generator"
import { useDocuments } from "@/contexts/document-context"
import { useState, useEffect } from "react"
import { OllamaIcon } from "@/components/ui/ollama-icon"
export function ConfigureTab() {
// Use state from the parent component
const [selectedModelInfo, setSelectedModelInfo] = useState({
name: "Ollama Qwen3 1.7B",
icon:
})
const [embeddingModelInfo, setEmbeddingModelInfo] = useState("all-MiniLM-L6-v2")
const [embeddingsProvider, setEmbeddingsProvider] = useState("local")
const [nvidiaEmbeddingsModel, setNvidiaEmbeddingsModel] = useState("nvidia/llama-3.2-nv-embedqa-1b-v2")
// Update model info when component mounts and when localStorage changes
useEffect(() => {
// Initial load from localStorage
const updateModelInfo = () => {
try {
const savedModel = localStorage.getItem("selectedModel")
if (savedModel) {
const model = JSON.parse(savedModel)
setSelectedModelInfo({
name: model.name,
icon: getModelIcon(model.id)
})
}
// Load embedding settings
const provider = localStorage.getItem("embeddings_provider") || "local"
setEmbeddingsProvider(provider)
// Load NVIDIA model if using NVIDIA
if (provider === "nvidia") {
const model = localStorage.getItem("nvidia_embeddings_model") || "nvidia/llama-3.2-nv-embedqa-1b-v2"
setNvidiaEmbeddingsModel(model)
}
} catch (e) {
console.error("Error loading model info:", e)
}
}
// Update on load
updateModelInfo()
// Set up event listener for storage changes
window.addEventListener('storage', updateModelInfo)
// Custom event for when model selection changes
const handleModelChange = (e: CustomEvent) => {
if (e.detail?.model) {
setSelectedModelInfo({
name: e.detail.model.name,
icon: getModelIcon(e.detail.model.id)
})
}
}
// Listen for LangChain toggle changes
const handleLangChainToggle = (e: CustomEvent) => {
if (e.detail?.useLangChain !== undefined) {
// When LangChain is enabled, use GTE-large model, otherwise use default model
if (embeddingsProvider === "local") {
setEmbeddingModelInfo(e.detail.useLangChain ? "Alibaba-NLP/gte-modernbert-base" : "all-MiniLM-L6-v2")
}
}
}
// Listen for embeddings settings changes
const handleEmbeddingsSettingsChanged = () => {
const provider = localStorage.getItem("embeddings_provider") || "local"
setEmbeddingsProvider(provider)
if (provider === "nvidia") {
const model = localStorage.getItem("nvidia_embeddings_model") || "nvidia/llama-3.2-nv-embedqa-1b-v2"
setNvidiaEmbeddingsModel(model)
} else {
// Local provider - use sentence transformers
const useLangChain = localStorage.getItem("useLangChain") === "true"
setEmbeddingModelInfo(useLangChain ? "Alibaba-NLP/gte-modernbert-base" : "all-MiniLM-L6-v2")
}
}
// Listen for custom model change events
window.addEventListener('modelSelected', handleModelChange as EventListener)
window.addEventListener('langChainToggled', handleLangChainToggle as EventListener)
window.addEventListener('embeddings-settings-changed', handleEmbeddingsSettingsChanged as EventListener)
return () => {
window.removeEventListener('storage', updateModelInfo)
window.removeEventListener('modelSelected', handleModelChange as EventListener)
window.removeEventListener('langChainToggled', handleLangChainToggle as EventListener)
window.removeEventListener('embeddings-settings-changed', handleEmbeddingsSettingsChanged as EventListener)
}
}, [embeddingsProvider])
// Function to get the appropriate icon based on model ID
const getModelIcon = (modelId: string) => {
if (modelId?.startsWith("nvidia-")) {
return
} else if (modelId?.startsWith("ollama-")) {
return
}
return
}
// Calculate which model to display
const displayEmbeddingModel = embeddingsProvider === "nvidia" ? nvidiaEmbeddingsModel : embeddingModelInfo
const embeddingProviderIcon = embeddingsProvider === "nvidia" ?
:
const embeddingTooltip = embeddingsProvider === "nvidia" ?
"Using NVIDIA API embedding model" :
"Using sentence-transformers service model"
return (
{/* Left column: Model selection */}
Current Configuration
Selected Model
{selectedModelInfo.icon}
{selectedModelInfo.name}
Embedding Model
{embeddingProviderIcon}
{displayEmbeddingModel}
Select Triple Extraction Model
{/* Right column: Document Processing */}
Process Documents
Extract structured knowledge triples from documents for knowledge graph construction
)
}
// Create a wrapper component that uses the document context
function ProcessingSummary() {
const { documents } = useDocuments()
// Count documents with "New" status that are ready for processing
const docsReadyCount = documents.filter(doc => doc.status === "New").length
return (
Documents Ready
{docsReadyCount === 1
? '1 document ready for processing'
: `${docsReadyCount} documents ready for processing`}