import { useEffect, useState } from "react"; import { MdGraphicEq, MdSignalWifi4Bar, MdError, MdMic } from "react-icons/md"; import { LuActivity, LuClock, LuHardDrive, LuSettings } from "react-icons/lu"; import { AudioLevelMeter } from "@components/AudioLevelMeter"; import { cx } from "@/cva.config"; import { useMicrophone } from "@/hooks/useMicrophone"; import { useAudioLevel } from "@/hooks/useAudioLevel"; import { useAudioEvents } from "@/hooks/useAudioEvents"; import api from "@/api"; interface AudioMetrics { frames_received: number; frames_dropped: number; bytes_processed: number; last_frame_time: string; connection_drops: number; average_latency: string; } interface MicrophoneMetrics { frames_sent: number; frames_dropped: number; bytes_processed: number; last_frame_time: string; connection_drops: number; average_latency: string; } interface AudioConfig { Quality: number; Bitrate: number; SampleRate: number; Channels: number; FrameSize: string; } const qualityLabels = { 0: "Low", 1: "Medium", 2: "High", 3: "Ultra" }; export default function AudioMetricsDashboard() { // Use WebSocket-based audio events for real-time updates const { audioMetrics, microphoneMetrics: wsMicrophoneMetrics, isConnected: wsConnected } = useAudioEvents(); // Fallback state for when WebSocket is not connected const [fallbackMetrics, setFallbackMetrics] = useState(null); const [fallbackMicrophoneMetrics, setFallbackMicrophoneMetrics] = useState(null); const [fallbackConnected, setFallbackConnected] = useState(false); // Configuration state (these don't change frequently, so we can load them once) const [config, setConfig] = useState(null); const [microphoneConfig, setMicrophoneConfig] = useState(null); const [lastUpdate, setLastUpdate] = useState(new Date()); // Use WebSocket data when available, fallback to polling data otherwise const metrics = wsConnected && audioMetrics !== null ? audioMetrics : fallbackMetrics; const microphoneMetrics = wsConnected && wsMicrophoneMetrics !== null ? wsMicrophoneMetrics : fallbackMicrophoneMetrics; const isConnected = wsConnected ? wsConnected : fallbackConnected; // Microphone state for audio level monitoring const { isMicrophoneActive, isMicrophoneMuted, microphoneStream } = useMicrophone(); const { audioLevel, isAnalyzing } = useAudioLevel( isMicrophoneActive ? microphoneStream : null, { enabled: isMicrophoneActive, updateInterval: 120, }); useEffect(() => { // Load initial configuration (only once) loadAudioConfig(); // Set up fallback polling only when WebSocket is not connected if (!wsConnected) { loadAudioData(); const interval = setInterval(loadAudioData, 1000); return () => clearInterval(interval); } }, [wsConnected]); const loadAudioConfig = async () => { try { // Load config const configResp = await api.GET("/audio/quality"); if (configResp.ok) { const configData = await configResp.json(); setConfig(configData.current); } // Load microphone config try { const micConfigResp = await api.GET("/microphone/quality"); if (micConfigResp.ok) { const micConfigData = await micConfigResp.json(); setMicrophoneConfig(micConfigData.current); } } catch (micConfigError) { console.debug("Microphone config not available:", micConfigError); } } catch (error) { console.error("Failed to load audio config:", error); } }; const loadAudioData = async () => { try { // Load metrics const metricsResp = await api.GET("/audio/metrics"); if (metricsResp.ok) { const metricsData = await metricsResp.json(); setFallbackMetrics(metricsData); // Consider connected if API call succeeds, regardless of frame count setFallbackConnected(true); setLastUpdate(new Date()); } else { setFallbackConnected(false); } // Load microphone metrics try { const micResp = await api.GET("/microphone/metrics"); if (micResp.ok) { const micData = await micResp.json(); setFallbackMicrophoneMetrics(micData); } } catch (micError) { // Microphone metrics might not be available, that's okay console.debug("Microphone metrics not available:", micError); } } catch (error) { console.error("Failed to load audio data:", error); setFallbackConnected(false); } }; const formatBytes = (bytes: number) => { if (bytes === 0) return "0 B"; const k = 1024; const sizes = ["B", "KB", "MB", "GB"]; const i = Math.floor(Math.log(bytes) / Math.log(k)); return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + " " + sizes[i]; }; const formatNumber = (num: number) => { return new Intl.NumberFormat().format(num); }; const getDropRate = () => { if (!metrics || metrics.frames_received === 0) return 0; return ((metrics.frames_dropped / metrics.frames_received) * 100); }; const getQualityColor = (quality: number) => { switch (quality) { case 0: return "text-yellow-600 dark:text-yellow-400"; case 1: return "text-blue-600 dark:text-blue-400"; case 2: return "text-green-600 dark:text-green-400"; case 3: return "text-purple-600 dark:text-purple-400"; default: return "text-slate-600 dark:text-slate-400"; } }; return (
{/* Header */}

Audio Metrics

{isConnected ? "Active" : "Inactive"}
{/* Current Configuration */}
{config && (
Audio Output Config
Quality: {qualityLabels[config.Quality as keyof typeof qualityLabels]}
Bitrate: {config.Bitrate}kbps
Sample Rate: {config.SampleRate}Hz
Channels: {config.Channels}
)} {microphoneConfig && (
Microphone Input Config
Quality: {qualityLabels[microphoneConfig.Quality as keyof typeof qualityLabels]}
Bitrate: {microphoneConfig.Bitrate}kbps
Sample Rate: {microphoneConfig.SampleRate}Hz
Channels: {microphoneConfig.Channels}
)}
{/* Performance Metrics */} {metrics && (
{/* Audio Output Frames */}
Audio Output
{formatNumber(metrics.frames_received)}
Frames Received
0 ? "text-red-600 dark:text-red-400" : "text-green-600 dark:text-green-400" )}> {formatNumber(metrics.frames_dropped)}
Frames Dropped
{/* Drop Rate */}
Drop Rate 5 ? "text-red-600 dark:text-red-400" : getDropRate() > 1 ? "text-yellow-600 dark:text-yellow-400" : "text-green-600 dark:text-green-400" )}> {getDropRate().toFixed(2)}%
5 ? "bg-red-500" : getDropRate() > 1 ? "bg-yellow-500" : "bg-green-500" )} style={{ width: `${Math.min(getDropRate(), 100)}%` }} />
{/* Microphone Input Metrics */} {microphoneMetrics && (
Microphone Input
{formatNumber(microphoneMetrics.frames_sent)}
Frames Sent
0 ? "text-red-600 dark:text-red-400" : "text-green-600 dark:text-green-400" )}> {formatNumber(microphoneMetrics.frames_dropped)}
Frames Dropped
{/* Microphone Drop Rate */}
Drop Rate 0 ? (microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * 100 : 0) > 5 ? "text-red-600 dark:text-red-400" : (microphoneMetrics.frames_sent > 0 ? (microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * 100 : 0) > 1 ? "text-yellow-600 dark:text-yellow-400" : "text-green-600 dark:text-green-400" )}> {microphoneMetrics.frames_sent > 0 ? ((microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * 100).toFixed(2) : "0.00"}%
0 ? (microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * 100 : 0) > 5 ? "bg-red-500" : (microphoneMetrics.frames_sent > 0 ? (microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * 100 : 0) > 1 ? "bg-yellow-500" : "bg-green-500" )} style={{ width: `${Math.min(microphoneMetrics.frames_sent > 0 ? (microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * 100 : 0, 100)}%` }} />
{/* Microphone Audio Level */} {isMicrophoneActive && (
)}
)} {/* Data Transfer */}
Data Transfer
{formatBytes(metrics.bytes_processed)}
Total Processed
{/* Connection Health */}
Connection Health
Connection Drops: 0 ? "text-red-600 dark:text-red-400" : "text-green-600 dark:text-green-400" )}> {formatNumber(metrics.connection_drops)}
{metrics.average_latency && (
Avg Latency: {metrics.average_latency}
)}
)} {/* Last Update */}
Last updated: {lastUpdate.toLocaleTimeString()}
{/* No Data State */} {!metrics && (

No Audio Data

Audio metrics will appear when audio streaming is active.

)}
); }