"use client"; import { useState, useEffect } from 'react'; import { Card, CardContent, CardDescription, CardHeader, CardTitle } from '@/components/ui/card'; import { Badge } from '@/components/ui/badge'; import { Button } from '@/components/ui/button'; import { Progress } from '@/components/ui/progress'; import { CheckCircle, AlertCircle, Clock, RefreshCw, Zap, Cpu, Activity, TrendingUp } from 'lucide-react'; import { LineChart, Line, XAxis, YAxis, CartesianGrid, Tooltip, ResponsiveContainer, BarChart, Bar } from 'recharts'; interface HealthMetrics { total_models: number; healthy_models: number; unhealthy_models: number; unknown_models: number; avg_latency: number; uptime_percentage: number; last_updated: string; } interface ModelHealth { model_id: string; name: string; provider: string; health_status: 'healthy' | 'unhealthy' | 'unknown'; latency_ms: number; success_rate: number; last_check: string; error_message?: string; uptime_24h: number; } // Mock data for charts const latencyData = [ { time: '00:00', groq: 120, bge_m3: 45 }, { time: '04:00', groq: 135, bge_m3: 52 }, { time: '08:00', groq: 180, bge_m3: 67 }, { time: '12:00', groq: 220, bge_m3: 89 }, { time: '16:00', groq: 195, bge_m3: 71 }, { time: '20:00', groq: 165, bge_m3: 58 }, ]; const requestVolumeData = [ { hour: '00', requests: 120 }, { hour: '04', requests: 89 }, { hour: '08', requests: 456 }, { hour: '12', requests: 892 }, { hour: '16', requests: 743 }, { hour: '20', requests: 567 }, ]; export default function ModelHealthDashboard() { const [metrics, setMetrics] = useState(null); const [modelHealth, setModelHealth] = useState([]); const [loading, setLoading] = useState(true); const [refreshing, setRefreshing] = useState(false); useEffect(() => { loadHealthData(); }, []); const loadHealthData = async () => { setLoading(true); // Mock data - replace with API calls await new Promise(resolve => setTimeout(resolve, 1000)); const mockMetrics: HealthMetrics = { total_models: 20, healthy_models: 18, unhealthy_models: 1, unknown_models: 1, avg_latency: 167, uptime_percentage: 99.2, last_updated: new Date().toISOString() }; const mockModelHealth: ModelHealth[] = [ { model_id: "llama-3.3-70b-versatile", name: "Llama 3.3 70B Versatile", provider: "groq", health_status: "healthy", latency_ms: 234, success_rate: 99.8, last_check: new Date(Date.now() - 30000).toISOString(), uptime_24h: 99.9 }, { model_id: "bge-m3", name: "BGE-M3 Embeddings", provider: "external", health_status: "healthy", latency_ms: 67, success_rate: 100.0, last_check: new Date(Date.now() - 15000).toISOString(), uptime_24h: 99.5 }, { model_id: "whisper-large-v3", name: "Whisper Large v3", provider: "groq", health_status: "unhealthy", latency_ms: 0, success_rate: 87.2, last_check: new Date(Date.now() - 120000).toISOString(), error_message: "API rate limit exceeded", uptime_24h: 87.2 }, { model_id: "llama-3.1-405b-reasoning", name: "Llama 3.1 405B Reasoning", provider: "groq", health_status: "unknown", latency_ms: 0, success_rate: 0, last_check: new Date(Date.now() - 300000).toISOString(), uptime_24h: 0 } ]; setMetrics(mockMetrics); setModelHealth(mockModelHealth); setLoading(false); }; const handleRefresh = async () => { setRefreshing(true); await loadHealthData(); setRefreshing(false); }; const getStatusIcon = (status: string) => { switch (status) { case 'healthy': return ; case 'unhealthy': return ; default: return ; } }; const getStatusBadge = (status: string) => { const variants: Record = { healthy: "default", unhealthy: "destructive", unknown: "secondary" }; return ( {getStatusIcon(status)} {status} ); }; const getUptimeColor = (uptime: number) => { if (uptime >= 99) return "text-green-600"; if (uptime >= 95) return "text-yellow-600"; return "text-red-600"; }; if (loading) { return
Loading health data...
; } return (
{/* Header with Refresh */}

Model Health Overview

Last updated: {metrics?.last_updated ? new Date(metrics.last_updated).toLocaleString() : 'Never'}

{/* Metrics Cards */}
Total Models
{metrics?.total_models}
{metrics?.healthy_models} healthy {metrics?.unhealthy_models} unhealthy
System Uptime
{metrics?.uptime_percentage}%
Avg Latency
{metrics?.avg_latency}ms

Across all models

Health Score
{metrics ? Math.round((metrics.healthy_models / metrics.total_models) * 100) : 0}%

Models responding

{/* Charts */}
Latency Trends (24h) Response times by provider Request Volume (24h) Total requests per hour
{/* Individual Model Health */} Individual Model Status Detailed health information for each model
{modelHealth.map((model) => (

{model.name}

{model.provider}

{model.model_id}

{model.error_message && (

{model.error_message}

)}
{model.latency_ms}ms
Latency
{model.success_rate}%
Success Rate
{model.uptime_24h}%
24h Uptime
{getStatusBadge(model.health_status)}
))}
); }