feat(audio): add system memory endpoint and process metrics monitoring

- Add new /system/memory endpoint to expose total system memory
- Implement process metrics collection for audio and microphone processes
- Update UI to display real-time process metrics with charts
- Replace environment variable check with CLI flag for audio input server
- Improve audio metrics broadcasting with 1-second intervals
- Add memory usage capping for CPU percentage metrics
This commit is contained in:
Alex P 2025-08-23 11:41:03 +00:00
parent 0e1c896aa2
commit 5e28a6c429
13 changed files with 504 additions and 134 deletions

View File

@ -231,6 +231,23 @@ systemctl restart jetkvm
cd ui && npm run lint cd ui && npm run lint
``` ```
### Local Code Quality Tools
The project includes several Makefile targets for local code quality checks that mirror the GitHub Actions workflows:
```bash
# Run Go linting (mirrors .github/workflows/lint.yml)
make lint
# Run Go linting with auto-fix
make lint-fix
# Run UI linting (mirrors .github/workflows/ui-lint.yml)
make ui-lint
```
**Note:** The `lint` and `lint-fix` targets require audio dependencies. Run `make dev_env` first if you haven't already.
### API Testing ### API Testing
```bash ```bash

View File

@ -1,5 +1,5 @@
# --- JetKVM Audio/Toolchain Dev Environment Setup --- # --- JetKVM Audio/Toolchain Dev Environment Setup ---
.PHONY: setup_toolchain build_audio_deps dev_env .PHONY: setup_toolchain build_audio_deps dev_env lint lint-fix ui-lint
# Clone the rv1106-system toolchain to $HOME/.jetkvm/rv1106-system # Clone the rv1106-system toolchain to $HOME/.jetkvm/rv1106-system
setup_toolchain: setup_toolchain:
@ -126,3 +126,27 @@ release:
@shasum -a 256 bin/jetkvm_app | cut -d ' ' -f 1 > bin/jetkvm_app.sha256 @shasum -a 256 bin/jetkvm_app | cut -d ' ' -f 1 > bin/jetkvm_app.sha256
rclone copyto bin/jetkvm_app r2://jetkvm-update/app/$(VERSION)/jetkvm_app rclone copyto bin/jetkvm_app r2://jetkvm-update/app/$(VERSION)/jetkvm_app
rclone copyto bin/jetkvm_app.sha256 r2://jetkvm-update/app/$(VERSION)/jetkvm_app.sha256 rclone copyto bin/jetkvm_app.sha256 r2://jetkvm-update/app/$(VERSION)/jetkvm_app.sha256
# Run golangci-lint locally with the same configuration as CI
lint: build_audio_deps
@echo "Running golangci-lint..."
@mkdir -p static && touch static/.gitkeep
CGO_ENABLED=1 \
CGO_CFLAGS="-I$(AUDIO_LIBS_DIR)/alsa-lib-$(ALSA_VERSION)/include -I$(AUDIO_LIBS_DIR)/opus-$(OPUS_VERSION)/include -I$(AUDIO_LIBS_DIR)/opus-$(OPUS_VERSION)/celt" \
CGO_LDFLAGS="-L$(AUDIO_LIBS_DIR)/alsa-lib-$(ALSA_VERSION)/src/.libs -lasound -L$(AUDIO_LIBS_DIR)/opus-$(OPUS_VERSION)/.libs -lopus -lm -ldl -static" \
golangci-lint run --verbose
# Run golangci-lint with auto-fix
lint-fix: build_audio_deps
@echo "Running golangci-lint with auto-fix..."
@mkdir -p static && touch static/.gitkeep
CGO_ENABLED=1 \
CGO_CFLAGS="-I$(AUDIO_LIBS_DIR)/alsa-lib-$(ALSA_VERSION)/include -I$(AUDIO_LIBS_DIR)/opus-$(OPUS_VERSION)/include -I$(AUDIO_LIBS_DIR)/opus-$(OPUS_VERSION)/celt" \
CGO_LDFLAGS="-L$(AUDIO_LIBS_DIR)/alsa-lib-$(ALSA_VERSION)/src/.libs -lasound -L$(AUDIO_LIBS_DIR)/opus-$(OPUS_VERSION)/.libs -lopus -lm -ldl -static" \
golangci-lint run --fix --verbose
# Run UI linting locally (mirrors GitHub workflow ui-lint.yml)
ui-lint:
@echo "Running UI lint..."
@cd ui && npm ci
@cd ui && npm run lint

View File

@ -12,6 +12,7 @@ func main() {
versionPtr := flag.Bool("version", false, "print version and exit") versionPtr := flag.Bool("version", false, "print version and exit")
versionJsonPtr := flag.Bool("version-json", false, "print version as json and exit") versionJsonPtr := flag.Bool("version-json", false, "print version as json and exit")
audioServerPtr := flag.Bool("audio-server", false, "Run as audio server subprocess") audioServerPtr := flag.Bool("audio-server", false, "Run as audio server subprocess")
audioInputServerPtr := flag.Bool("audio-input-server", false, "Run as audio input server subprocess")
flag.Parse() flag.Parse()
if *versionPtr || *versionJsonPtr { if *versionPtr || *versionJsonPtr {
@ -24,5 +25,5 @@ func main() {
return return
} }
kvm.Main(*audioServerPtr) kvm.Main(*audioServerPtr, *audioInputServerPtr)
} }

View File

@ -3,6 +3,13 @@ package audio
import ( import (
"os" "os"
"strings" "strings"
"sync/atomic"
"unsafe"
)
var (
// Global audio output supervisor instance
globalOutputSupervisor unsafe.Pointer // *AudioServerSupervisor
) )
// isAudioServerProcess detects if we're running as the audio server subprocess // isAudioServerProcess detects if we're running as the audio server subprocess
@ -49,3 +56,17 @@ func StartNonBlockingAudioStreaming(send func([]byte)) error {
func StopNonBlockingAudioStreaming() { func StopNonBlockingAudioStreaming() {
StopAudioOutputStreaming() StopAudioOutputStreaming()
} }
// SetAudioOutputSupervisor sets the global audio output supervisor
func SetAudioOutputSupervisor(supervisor *AudioServerSupervisor) {
atomic.StorePointer(&globalOutputSupervisor, unsafe.Pointer(supervisor))
}
// GetAudioOutputSupervisor returns the global audio output supervisor
func GetAudioOutputSupervisor() *AudioServerSupervisor {
ptr := atomic.LoadPointer(&globalOutputSupervisor)
if ptr == nil {
return nil
}
return (*AudioServerSupervisor)(ptr)
}

View File

@ -157,9 +157,20 @@ func GetMicrophoneConfig() AudioConfig {
// GetAudioMetrics returns current audio metrics // GetAudioMetrics returns current audio metrics
func GetAudioMetrics() AudioMetrics { func GetAudioMetrics() AudioMetrics {
// Get base metrics
framesReceived := atomic.LoadInt64(&metrics.FramesReceived)
framesDropped := atomic.LoadInt64(&metrics.FramesDropped)
// If audio relay is running, use relay stats instead
if IsAudioRelayRunning() {
relayReceived, relayDropped := GetAudioRelayStats()
framesReceived = relayReceived
framesDropped = relayDropped
}
return AudioMetrics{ return AudioMetrics{
FramesReceived: atomic.LoadInt64(&metrics.FramesReceived), FramesReceived: framesReceived,
FramesDropped: atomic.LoadInt64(&metrics.FramesDropped), FramesDropped: framesDropped,
BytesProcessed: atomic.LoadInt64(&metrics.BytesProcessed), BytesProcessed: atomic.LoadInt64(&metrics.BytesProcessed),
LastFrameTime: metrics.LastFrameTime, LastFrameTime: metrics.LastFrameTime,
ConnectionDrops: atomic.LoadInt64(&metrics.ConnectionDrops), ConnectionDrops: atomic.LoadInt64(&metrics.ConnectionDrops),

View File

@ -21,6 +21,8 @@ const (
AudioEventMetricsUpdate AudioEventType = "audio-metrics-update" AudioEventMetricsUpdate AudioEventType = "audio-metrics-update"
AudioEventMicrophoneState AudioEventType = "microphone-state-changed" AudioEventMicrophoneState AudioEventType = "microphone-state-changed"
AudioEventMicrophoneMetrics AudioEventType = "microphone-metrics-update" AudioEventMicrophoneMetrics AudioEventType = "microphone-metrics-update"
AudioEventProcessMetrics AudioEventType = "audio-process-metrics"
AudioEventMicProcessMetrics AudioEventType = "microphone-process-metrics"
) )
// AudioEvent represents a WebSocket audio event // AudioEvent represents a WebSocket audio event
@ -60,6 +62,17 @@ type MicrophoneMetricsData struct {
AverageLatency string `json:"average_latency"` AverageLatency string `json:"average_latency"`
} }
// ProcessMetricsData represents process metrics data for WebSocket events
type ProcessMetricsData struct {
PID int `json:"pid"`
CPUPercent float64 `json:"cpu_percent"`
MemoryRSS int64 `json:"memory_rss"`
MemoryVMS int64 `json:"memory_vms"`
MemoryPercent float64 `json:"memory_percent"`
Running bool `json:"running"`
ProcessName string `json:"process_name"`
}
// AudioEventSubscriber represents a WebSocket connection subscribed to audio events // AudioEventSubscriber represents a WebSocket connection subscribed to audio events
type AudioEventSubscriber struct { type AudioEventSubscriber struct {
conn *websocket.Conn conn *websocket.Conn
@ -220,6 +233,25 @@ func (aeb *AudioEventBroadcaster) sendCurrentMetrics(subscriber *AudioEventSubsc
} }
aeb.sendToSubscriber(subscriber, audioMetricsEvent) aeb.sendToSubscriber(subscriber, audioMetricsEvent)
// Send audio process metrics
if outputSupervisor := GetAudioOutputSupervisor(); outputSupervisor != nil {
if processMetrics := outputSupervisor.GetProcessMetrics(); processMetrics != nil {
audioProcessEvent := AudioEvent{
Type: AudioEventProcessMetrics,
Data: ProcessMetricsData{
PID: processMetrics.PID,
CPUPercent: processMetrics.CPUPercent,
MemoryRSS: processMetrics.MemoryRSS,
MemoryVMS: processMetrics.MemoryVMS,
MemoryPercent: processMetrics.MemoryPercent,
Running: outputSupervisor.IsRunning(),
ProcessName: processMetrics.ProcessName,
},
}
aeb.sendToSubscriber(subscriber, audioProcessEvent)
}
}
// Send microphone metrics using session provider // Send microphone metrics using session provider
sessionProvider := GetSessionProvider() sessionProvider := GetSessionProvider()
if sessionProvider.IsSessionActive() { if sessionProvider.IsSessionActive() {
@ -239,12 +271,31 @@ func (aeb *AudioEventBroadcaster) sendCurrentMetrics(subscriber *AudioEventSubsc
aeb.sendToSubscriber(subscriber, micMetricsEvent) aeb.sendToSubscriber(subscriber, micMetricsEvent)
} }
} }
// Send microphone process metrics
if inputSupervisor := GetAudioInputIPCSupervisor(); inputSupervisor != nil {
if processMetrics := inputSupervisor.GetProcessMetrics(); processMetrics != nil {
micProcessEvent := AudioEvent{
Type: AudioEventMicProcessMetrics,
Data: ProcessMetricsData{
PID: processMetrics.PID,
CPUPercent: processMetrics.CPUPercent,
MemoryRSS: processMetrics.MemoryRSS,
MemoryVMS: processMetrics.MemoryVMS,
MemoryPercent: processMetrics.MemoryPercent,
Running: inputSupervisor.IsRunning(),
ProcessName: processMetrics.ProcessName,
},
}
aeb.sendToSubscriber(subscriber, micProcessEvent)
}
}
} }
// startMetricsBroadcasting starts a goroutine that periodically broadcasts metrics // startMetricsBroadcasting starts a goroutine that periodically broadcasts metrics
func (aeb *AudioEventBroadcaster) startMetricsBroadcasting() { func (aeb *AudioEventBroadcaster) startMetricsBroadcasting() {
// Use 5-second interval instead of 2 seconds for constrained environments // Use 1-second interval to match Connection Stats sidebar frequency for smooth histogram progression
ticker := time.NewTicker(5 * time.Second) ticker := time.NewTicker(1 * time.Second)
defer ticker.Stop() defer ticker.Stop()
for range ticker.C { for range ticker.C {
@ -311,6 +362,44 @@ func (aeb *AudioEventBroadcaster) startMetricsBroadcasting() {
aeb.broadcast(micMetricsEvent) aeb.broadcast(micMetricsEvent)
} }
} }
// Broadcast audio process metrics
if outputSupervisor := GetAudioOutputSupervisor(); outputSupervisor != nil {
if processMetrics := outputSupervisor.GetProcessMetrics(); processMetrics != nil {
audioProcessEvent := AudioEvent{
Type: AudioEventProcessMetrics,
Data: ProcessMetricsData{
PID: processMetrics.PID,
CPUPercent: processMetrics.CPUPercent,
MemoryRSS: processMetrics.MemoryRSS,
MemoryVMS: processMetrics.MemoryVMS,
MemoryPercent: processMetrics.MemoryPercent,
Running: outputSupervisor.IsRunning(),
ProcessName: processMetrics.ProcessName,
},
}
aeb.broadcast(audioProcessEvent)
}
}
// Broadcast microphone process metrics
if inputSupervisor := GetAudioInputIPCSupervisor(); inputSupervisor != nil {
if processMetrics := inputSupervisor.GetProcessMetrics(); processMetrics != nil {
micProcessEvent := AudioEvent{
Type: AudioEventMicProcessMetrics,
Data: ProcessMetricsData{
PID: processMetrics.PID,
CPUPercent: processMetrics.CPUPercent,
MemoryRSS: processMetrics.MemoryRSS,
MemoryVMS: processMetrics.MemoryVMS,
MemoryPercent: processMetrics.MemoryPercent,
Running: inputSupervisor.IsRunning(),
ProcessName: processMetrics.ProcessName,
},
}
aeb.broadcast(micProcessEvent)
}
}
} }
} }

View File

@ -10,11 +10,6 @@ import (
"github.com/jetkvm/kvm/internal/logging" "github.com/jetkvm/kvm/internal/logging"
) )
// IsAudioInputServerProcess detects if we're running as the audio input server subprocess
func IsAudioInputServerProcess() bool {
return os.Getenv("JETKVM_AUDIO_INPUT_SERVER") == "true"
}
// RunAudioInputServer runs the audio input server subprocess // RunAudioInputServer runs the audio input server subprocess
// This should be called from main() when the subprocess is detected // This should be called from main() when the subprocess is detected
func RunAudioInputServer() error { func RunAudioInputServer() error {

View File

@ -53,10 +53,9 @@ func (ais *AudioInputSupervisor) Start() error {
} }
// Create command for audio input server subprocess // Create command for audio input server subprocess
cmd := exec.CommandContext(ctx, execPath) cmd := exec.CommandContext(ctx, execPath, "--audio-input-server")
cmd.Env = append(os.Environ(), cmd.Env = append(os.Environ(),
"JETKVM_AUDIO_INPUT_SERVER=true", // Flag to indicate this is the input server process "JETKVM_AUDIO_INPUT_IPC=true", // Enable IPC mode
"JETKVM_AUDIO_INPUT_IPC=true", // Enable IPC mode
) )
// Set process group to allow clean termination // Set process group to allow clean termination

View File

@ -208,6 +208,10 @@ func (pm *ProcessMonitor) collectMetrics(pid int, state *processState) (ProcessM
if timeDelta > 0 { if timeDelta > 0 {
metric.CPUPercent = (cpuSeconds / timeDelta) * 100.0 metric.CPUPercent = (cpuSeconds / timeDelta) * 100.0
// Cap CPU percentage at 100% to handle multi-core usage
if metric.CPUPercent > 100.0 {
metric.CPUPercent = 100.0
}
} }
} }
@ -249,6 +253,11 @@ func (pm *ProcessMonitor) getTotalMemory() int64 {
return 0 return 0
} }
// GetTotalMemory returns total system memory in bytes (public method)
func (pm *ProcessMonitor) GetTotalMemory() int64 {
return pm.getTotalMemory()
}
// Global process monitor instance // Global process monitor instance
var globalProcessMonitor *ProcessMonitor var globalProcessMonitor *ProcessMonitor
var processMonitorOnce sync.Once var processMonitorOnce sync.Once

19
main.go
View File

@ -63,6 +63,9 @@ func startAudioSubprocess() error {
// Create audio server supervisor // Create audio server supervisor
audioSupervisor = audio.NewAudioServerSupervisor() audioSupervisor = audio.NewAudioServerSupervisor()
// Set the global supervisor for access from audio package
audio.SetAudioOutputSupervisor(audioSupervisor)
// Set up callbacks for process lifecycle events // Set up callbacks for process lifecycle events
audioSupervisor.SetCallbacks( audioSupervisor.SetCallbacks(
// onProcessStart // onProcessStart
@ -112,7 +115,7 @@ func startAudioSubprocess() error {
return nil return nil
} }
func Main(audioServer bool) { func Main(audioServer bool, audioInputServer bool) {
// Initialize channel and set audio server flag // Initialize channel and set audio server flag
isAudioServer = audioServer isAudioServer = audioServer
audioProcessDone = make(chan struct{}) audioProcessDone = make(chan struct{})
@ -124,7 +127,7 @@ func Main(audioServer bool) {
} }
// If running as audio input server, only initialize audio input processing // If running as audio input server, only initialize audio input processing
if audio.IsAudioInputServerProcess() { if audioInputServer {
err := audio.RunAudioInputServer() err := audio.RunAudioInputServer()
if err != nil { if err != nil {
logger.Error().Err(err).Msg("audio input server failed") logger.Error().Err(err).Msg("audio input server failed")
@ -209,6 +212,14 @@ func Main(audioServer bool) {
audio.InitializeAudioEventBroadcaster() audio.InitializeAudioEventBroadcaster()
logger.Info().Msg("audio event broadcaster initialized") logger.Info().Msg("audio event broadcaster initialized")
// Start audio input system for microphone processing
err = audio.StartAudioInput()
if err != nil {
logger.Warn().Err(err).Msg("failed to start audio input system")
} else {
logger.Info().Msg("audio input system started")
}
if err := setInitialVirtualMediaState(); err != nil { if err := setInitialVirtualMediaState(); err != nil {
logger.Warn().Err(err).Msg("failed to set initial virtual media state") logger.Warn().Err(err).Msg("failed to set initial virtual media state")
} }
@ -261,6 +272,10 @@ func Main(audioServer bool) {
// Stop audio subprocess and wait for cleanup // Stop audio subprocess and wait for cleanup
if !isAudioServer { if !isAudioServer {
// Stop audio input system
logger.Info().Msg("stopping audio input system")
audio.StopAudioInput()
if audioSupervisor != nil { if audioSupervisor != nil {
logger.Info().Msg("stopping audio supervisor") logger.Info().Msg("stopping audio supervisor")
if err := audioSupervisor.Stop(); err != nil { if err := audioSupervisor.Stop(); err != nil {

View File

@ -3,6 +3,7 @@ import { MdGraphicEq, MdSignalWifi4Bar, MdError, MdMic } from "react-icons/md";
import { LuActivity, LuClock, LuHardDrive, LuSettings, LuCpu, LuMemoryStick } from "react-icons/lu"; import { LuActivity, LuClock, LuHardDrive, LuSettings, LuCpu, LuMemoryStick } from "react-icons/lu";
import { AudioLevelMeter } from "@components/AudioLevelMeter"; import { AudioLevelMeter } from "@components/AudioLevelMeter";
import StatChart from "@components/StatChart";
import { cx } from "@/cva.config"; import { cx } from "@/cva.config";
import { useMicrophone } from "@/hooks/useMicrophone"; import { useMicrophone } from "@/hooks/useMicrophone";
import { useAudioLevel } from "@/hooks/useAudioLevel"; import { useAudioLevel } from "@/hooks/useAudioLevel";
@ -50,28 +51,165 @@ const qualityLabels = {
3: "Ultra" 3: "Ultra"
}; };
// Format percentage values to 2 decimal places
function formatPercentage(value: number | null | undefined): string {
if (value === null || value === undefined || isNaN(value)) {
return "0.00%";
}
return `${value.toFixed(2)}%`;
}
function formatMemoryMB(rssBytes: number | null | undefined): string {
if (rssBytes === null || rssBytes === undefined || isNaN(rssBytes)) {
return "0.00 MB";
}
const mb = rssBytes / (1024 * 1024);
return `${mb.toFixed(2)} MB`;
}
// Default system memory estimate in MB (will be replaced by actual value from backend)
const DEFAULT_SYSTEM_MEMORY_MB = 4096; // 4GB default
// Create chart array similar to connectionStats.tsx
function createChartArray<T, K extends keyof T>(
stream: Map<number, T>,
metric: K,
): { date: number; stat: T[K] | null }[] {
const stat = Array.from(stream).map(([key, stats]) => {
return { date: key, stat: stats[metric] };
});
// Sort the dates to ensure they are in chronological order
const sortedStat = stat.map(x => x.date).sort((a, b) => a - b);
// Determine the earliest statistic date
const earliestStat = sortedStat[0];
// Current time in seconds since the Unix epoch
const now = Math.floor(Date.now() / 1000);
// Determine the starting point for the chart data
const firstChartDate = earliestStat ? Math.min(earliestStat, now - 120) : now - 120;
// Generate the chart array for the range between 'firstChartDate' and 'now'
return Array.from({ length: now - firstChartDate }, (_, i) => {
const currentDate = firstChartDate + i;
return {
date: currentDate,
// Find the statistic for 'currentDate', or use the last known statistic if none exists for that date
stat: stat.find(x => x.date === currentDate)?.stat ?? null,
};
});
}
export default function AudioMetricsDashboard() { export default function AudioMetricsDashboard() {
// System memory state
const [systemMemoryMB, setSystemMemoryMB] = useState(DEFAULT_SYSTEM_MEMORY_MB);
// Use WebSocket-based audio events for real-time updates // Use WebSocket-based audio events for real-time updates
const { const {
audioMetrics, audioMetrics,
microphoneMetrics: wsMicrophoneMetrics, microphoneMetrics: wsMicrophoneMetrics,
audioProcessMetrics: wsAudioProcessMetrics,
microphoneProcessMetrics: wsMicrophoneProcessMetrics,
isConnected: wsConnected isConnected: wsConnected
} = useAudioEvents(); } = useAudioEvents();
// Fetch system memory information on component mount
useEffect(() => {
const fetchSystemMemory = async () => {
try {
const response = await api.GET('/system/memory');
const data = await response.json();
setSystemMemoryMB(data.total_memory_mb);
} catch (error) {
console.warn('Failed to fetch system memory, using default:', error);
}
};
fetchSystemMemory();
}, []);
// Update historical data when WebSocket process metrics are received
useEffect(() => {
if (wsConnected && wsAudioProcessMetrics && wsAudioProcessMetrics.running) {
const now = Math.floor(Date.now() / 1000); // Convert to seconds for StatChart
// Validate that now is a valid number
if (isNaN(now)) return;
const cpuStat = isNaN(wsAudioProcessMetrics.cpu_percent) ? null : wsAudioProcessMetrics.cpu_percent;
setAudioCpuStats(prev => {
const newMap = new Map(prev);
newMap.set(now, { cpu_percent: cpuStat });
// Keep only last 120 seconds of data for memory management
const cutoff = now - 120;
for (const [key] of newMap) {
if (key < cutoff) newMap.delete(key);
}
return newMap;
});
setAudioMemoryStats(prev => {
const newMap = new Map(prev);
const memoryRss = isNaN(wsAudioProcessMetrics.memory_rss) ? null : wsAudioProcessMetrics.memory_rss;
newMap.set(now, { memory_rss: memoryRss });
// Keep only last 120 seconds of data for memory management
const cutoff = now - 120;
for (const [key] of newMap) {
if (key < cutoff) newMap.delete(key);
}
return newMap;
});
}
}, [wsConnected, wsAudioProcessMetrics]);
useEffect(() => {
if (wsConnected && wsMicrophoneProcessMetrics) {
const now = Math.floor(Date.now() / 1000); // Convert to seconds for StatChart
// Validate that now is a valid number
if (isNaN(now)) return;
const cpuStat = isNaN(wsMicrophoneProcessMetrics.cpu_percent) ? null : wsMicrophoneProcessMetrics.cpu_percent;
setMicCpuStats(prev => {
const newMap = new Map(prev);
newMap.set(now, { cpu_percent: cpuStat });
// Keep only last 120 seconds of data for memory management
const cutoff = now - 120;
for (const [key] of newMap) {
if (key < cutoff) newMap.delete(key);
}
return newMap;
});
setMicMemoryStats(prev => {
const newMap = new Map(prev);
const memoryRss = isNaN(wsMicrophoneProcessMetrics.memory_rss) ? null : wsMicrophoneProcessMetrics.memory_rss;
newMap.set(now, { memory_rss: memoryRss });
// Keep only last 120 seconds of data for memory management
const cutoff = now - 120;
for (const [key] of newMap) {
if (key < cutoff) newMap.delete(key);
}
return newMap;
});
}
}, [wsConnected, wsMicrophoneProcessMetrics]);
// Fallback state for when WebSocket is not connected // Fallback state for when WebSocket is not connected
const [fallbackMetrics, setFallbackMetrics] = useState<AudioMetrics | null>(null); const [fallbackMetrics, setFallbackMetrics] = useState<AudioMetrics | null>(null);
const [fallbackMicrophoneMetrics, setFallbackMicrophoneMetrics] = useState<MicrophoneMetrics | null>(null); const [fallbackMicrophoneMetrics, setFallbackMicrophoneMetrics] = useState<MicrophoneMetrics | null>(null);
const [fallbackConnected, setFallbackConnected] = useState(false); const [fallbackConnected, setFallbackConnected] = useState(false);
// Process metrics state // Process metrics state (fallback for when WebSocket is not connected)
const [audioProcessMetrics, setAudioProcessMetrics] = useState<ProcessMetrics | null>(null); const [fallbackAudioProcessMetrics, setFallbackAudioProcessMetrics] = useState<ProcessMetrics | null>(null);
const [microphoneProcessMetrics, setMicrophoneProcessMetrics] = useState<ProcessMetrics | null>(null); const [fallbackMicrophoneProcessMetrics, setFallbackMicrophoneProcessMetrics] = useState<ProcessMetrics | null>(null);
// Historical data for histograms (last 60 data points, ~1 minute at 1s intervals) // Historical data for charts using Maps for better memory management
const [audioCpuHistory, setAudioCpuHistory] = useState<number[]>([]); const [audioCpuStats, setAudioCpuStats] = useState<Map<number, { cpu_percent: number | null }>>(new Map());
const [audioMemoryHistory, setAudioMemoryHistory] = useState<number[]>([]); const [audioMemoryStats, setAudioMemoryStats] = useState<Map<number, { memory_rss: number | null }>>(new Map());
const [micCpuHistory, setMicCpuHistory] = useState<number[]>([]); const [micCpuStats, setMicCpuStats] = useState<Map<number, { cpu_percent: number | null }>>(new Map());
const [micMemoryHistory, setMicMemoryHistory] = useState<number[]>([]); const [micMemoryStats, setMicMemoryStats] = useState<Map<number, { memory_rss: number | null }>>(new Map());
// Configuration state (these don't change frequently, so we can load them once) // Configuration state (these don't change frequently, so we can load them once)
const [config, setConfig] = useState<AudioConfig | null>(null); const [config, setConfig] = useState<AudioConfig | null>(null);
@ -81,6 +219,8 @@ export default function AudioMetricsDashboard() {
// Use WebSocket data when available, fallback to polling data otherwise // Use WebSocket data when available, fallback to polling data otherwise
const metrics = wsConnected && audioMetrics !== null ? audioMetrics : fallbackMetrics; const metrics = wsConnected && audioMetrics !== null ? audioMetrics : fallbackMetrics;
const microphoneMetrics = wsConnected && wsMicrophoneMetrics !== null ? wsMicrophoneMetrics : fallbackMicrophoneMetrics; const microphoneMetrics = wsConnected && wsMicrophoneMetrics !== null ? wsMicrophoneMetrics : fallbackMicrophoneMetrics;
const audioProcessMetrics = wsConnected && wsAudioProcessMetrics !== null ? wsAudioProcessMetrics : fallbackAudioProcessMetrics;
const microphoneProcessMetrics = wsConnected && wsMicrophoneProcessMetrics !== null ? wsMicrophoneProcessMetrics : fallbackMicrophoneProcessMetrics;
const isConnected = wsConnected ? wsConnected : fallbackConnected; const isConnected = wsConnected ? wsConnected : fallbackConnected;
// Microphone state for audio level monitoring // Microphone state for audio level monitoring
@ -147,17 +287,37 @@ export default function AudioMetricsDashboard() {
const audioProcessResp = await api.GET("/audio/process-metrics"); const audioProcessResp = await api.GET("/audio/process-metrics");
if (audioProcessResp.ok) { if (audioProcessResp.ok) {
const audioProcessData = await audioProcessResp.json(); const audioProcessData = await audioProcessResp.json();
setAudioProcessMetrics(audioProcessData); setFallbackAudioProcessMetrics(audioProcessData);
// Update historical data for histograms (keep last 60 points) // Update historical data for charts (keep last 120 seconds)
if (audioProcessData.running) { if (audioProcessData.running) {
setAudioCpuHistory(prev => { const now = Math.floor(Date.now() / 1000); // Convert to seconds for StatChart
const newHistory = [...prev, audioProcessData.cpu_percent]; // Validate that now is a valid number
return newHistory.slice(-60); // Keep last 60 data points if (isNaN(now)) return;
const cpuStat = isNaN(audioProcessData.cpu_percent) ? null : audioProcessData.cpu_percent;
const memoryRss = isNaN(audioProcessData.memory_rss) ? null : audioProcessData.memory_rss;
setAudioCpuStats(prev => {
const newMap = new Map(prev);
newMap.set(now, { cpu_percent: cpuStat });
// Keep only last 120 seconds of data for memory management
const cutoff = now - 120;
for (const [key] of newMap) {
if (key < cutoff) newMap.delete(key);
}
return newMap;
}); });
setAudioMemoryHistory(prev => {
const newHistory = [...prev, audioProcessData.memory_percent]; setAudioMemoryStats(prev => {
return newHistory.slice(-60); const newMap = new Map(prev);
newMap.set(now, { memory_rss: memoryRss });
// Keep only last 120 seconds of data for memory management
const cutoff = now - 120;
for (const [key] of newMap) {
if (key < cutoff) newMap.delete(key);
}
return newMap;
}); });
} }
} }
@ -182,19 +342,37 @@ export default function AudioMetricsDashboard() {
const micProcessResp = await api.GET("/microphone/process-metrics"); const micProcessResp = await api.GET("/microphone/process-metrics");
if (micProcessResp.ok) { if (micProcessResp.ok) {
const micProcessData = await micProcessResp.json(); const micProcessData = await micProcessResp.json();
setMicrophoneProcessMetrics(micProcessData); setFallbackMicrophoneProcessMetrics(micProcessData);
// Update historical data for histograms (keep last 60 points) // Update historical data for charts (keep last 120 seconds)
if (micProcessData.running) { const now = Math.floor(Date.now() / 1000); // Convert to seconds for StatChart
setMicCpuHistory(prev => { // Validate that now is a valid number
const newHistory = [...prev, micProcessData.cpu_percent]; if (isNaN(now)) return;
return newHistory.slice(-60); // Keep last 60 data points
}); const cpuStat = isNaN(micProcessData.cpu_percent) ? null : micProcessData.cpu_percent;
setMicMemoryHistory(prev => { const memoryRss = isNaN(micProcessData.memory_rss) ? null : micProcessData.memory_rss;
const newHistory = [...prev, micProcessData.memory_percent];
return newHistory.slice(-60); setMicCpuStats(prev => {
}); const newMap = new Map(prev);
} newMap.set(now, { cpu_percent: cpuStat });
// Keep only last 120 seconds of data for memory management
const cutoff = now - 120;
for (const [key] of newMap) {
if (key < cutoff) newMap.delete(key);
}
return newMap;
});
setMicMemoryStats(prev => {
const newMap = new Map(prev);
newMap.set(now, { memory_rss: memoryRss });
// Keep only last 120 seconds of data for memory management
const cutoff = now - 120;
for (const [key] of newMap) {
if (key < cutoff) newMap.delete(key);
}
return newMap;
});
} }
} catch (micProcessError) { } catch (micProcessError) {
console.debug("Microphone process metrics not available:", micProcessError); console.debug("Microphone process metrics not available:", micProcessError);
@ -222,15 +400,7 @@ export default function AudioMetricsDashboard() {
return ((metrics.frames_dropped / metrics.frames_received) * 100); return ((metrics.frames_dropped / metrics.frames_received) * 100);
}; };
const formatMemory = (bytes: number) => {
if (bytes === 0) return "0 MB";
const mb = bytes / (1024 * 1024);
if (mb < 1024) {
return `${mb.toFixed(1)} MB`;
}
const gb = mb / 1024;
return `${gb.toFixed(2)} GB`;
};
@ -244,53 +414,6 @@ export default function AudioMetricsDashboard() {
} }
}; };
// Histogram component for displaying historical data
const Histogram = ({ data, title, unit, color }: {
data: number[],
title: string,
unit: string,
color: string
}) => {
if (data.length === 0) return null;
const maxValue = Math.max(...data, 1); // Avoid division by zero
const minValue = Math.min(...data);
const range = maxValue - minValue;
return (
<div className="space-y-2">
<div className="flex items-center justify-between">
<span className="text-sm font-medium text-slate-700 dark:text-slate-300">
{title}
</span>
<span className="text-xs text-slate-500 dark:text-slate-400">
{data.length > 0 ? `${data[data.length - 1].toFixed(1)}${unit}` : `0${unit}`}
</span>
</div>
<div className="flex items-end gap-0.5 h-16 bg-slate-50 dark:bg-slate-800 rounded p-2">
{data.slice(-30).map((value, index) => { // Show last 30 points
const height = range > 0 ? ((value - minValue) / range) * 100 : 0;
return (
<div
key={index}
className={cx(
"flex-1 rounded-sm transition-all duration-200",
color
)}
style={{ height: `${Math.max(height, 2)}%` }}
title={`${value.toFixed(1)}${unit}`}
/>
);
})}
</div>
<div className="flex justify-between text-xs text-slate-400 dark:text-slate-500">
<span>{minValue.toFixed(1)}{unit}</span>
<span>{maxValue.toFixed(1)}{unit}</span>
</div>
</div>
);
};
return ( return (
<div className="space-y-4"> <div className="space-y-4">
{/* Header */} {/* Header */}
@ -405,30 +528,41 @@ export default function AudioMetricsDashboard() {
)} /> )} />
</div> </div>
<div className="space-y-4"> <div className="space-y-4">
<Histogram <div>
data={audioCpuHistory} <h4 className="text-sm font-medium text-slate-900 dark:text-slate-100 mb-2">CPU Usage</h4>
title="CPU Usage" <div className="h-24">
unit="%" <StatChart
color="bg-blue-500 dark:bg-blue-400" data={createChartArray(audioCpuStats, 'cpu_percent')}
/> unit="%"
<Histogram domain={[0, 100]}
data={audioMemoryHistory} />
title="Memory Usage" </div>
unit="%" </div>
color="bg-purple-500 dark:bg-purple-400" <div>
/> <h4 className="text-sm font-medium text-slate-900 dark:text-slate-100 mb-2">Memory Usage</h4>
<div className="h-24">
<StatChart
data={createChartArray(audioMemoryStats, 'memory_rss').map(item => ({
date: item.date,
stat: item.stat ? item.stat / (1024 * 1024) : null // Convert bytes to MB
}))}
unit="MB"
domain={[0, systemMemoryMB]}
/>
</div>
</div>
<div className="grid grid-cols-2 gap-2 text-xs"> <div className="grid grid-cols-2 gap-2 text-xs">
<div className="text-center p-2 bg-slate-50 dark:bg-slate-800 rounded"> <div className="text-center p-2 bg-slate-50 dark:bg-slate-800 rounded">
<div className="font-medium text-slate-900 dark:text-slate-100"> <div className="font-medium text-slate-900 dark:text-slate-100">
{formatMemory(audioProcessMetrics.memory_rss)} {formatPercentage(audioProcessMetrics.cpu_percent)}
</div> </div>
<div className="text-slate-500 dark:text-slate-400">RSS</div> <div className="text-slate-500 dark:text-slate-400">CPU</div>
</div> </div>
<div className="text-center p-2 bg-slate-50 dark:bg-slate-800 rounded"> <div className="text-center p-2 bg-slate-50 dark:bg-slate-800 rounded">
<div className="font-medium text-slate-900 dark:text-slate-100"> <div className="font-medium text-slate-900 dark:text-slate-100">
{formatMemory(audioProcessMetrics.memory_vms)} {formatMemoryMB(audioProcessMetrics.memory_rss)}
</div> </div>
<div className="text-slate-500 dark:text-slate-400">VMS</div> <div className="text-slate-500 dark:text-slate-400">Memory</div>
</div> </div>
</div> </div>
</div> </div>
@ -449,30 +583,41 @@ export default function AudioMetricsDashboard() {
)} /> )} />
</div> </div>
<div className="space-y-4"> <div className="space-y-4">
<Histogram <div>
data={micCpuHistory} <h4 className="text-sm font-medium text-slate-900 dark:text-slate-100 mb-2">CPU Usage</h4>
title="CPU Usage" <div className="h-24">
unit="%" <StatChart
color="bg-green-500 dark:bg-green-400" data={createChartArray(micCpuStats, 'cpu_percent')}
/> unit="%"
<Histogram domain={[0, 100]}
data={micMemoryHistory} />
title="Memory Usage" </div>
unit="%" </div>
color="bg-orange-500 dark:bg-orange-400" <div>
/> <h4 className="text-sm font-medium text-slate-900 dark:text-slate-100 mb-2">Memory Usage</h4>
<div className="h-24">
<StatChart
data={createChartArray(micMemoryStats, 'memory_rss').map(item => ({
date: item.date,
stat: item.stat ? item.stat / (1024 * 1024) : null // Convert bytes to MB
}))}
unit="MB"
domain={[0, systemMemoryMB]}
/>
</div>
</div>
<div className="grid grid-cols-2 gap-2 text-xs"> <div className="grid grid-cols-2 gap-2 text-xs">
<div className="text-center p-2 bg-slate-50 dark:bg-slate-800 rounded"> <div className="text-center p-2 bg-slate-50 dark:bg-slate-800 rounded">
<div className="font-medium text-slate-900 dark:text-slate-100"> <div className="font-medium text-slate-900 dark:text-slate-100">
{formatMemory(microphoneProcessMetrics.memory_rss)} {formatPercentage(microphoneProcessMetrics.cpu_percent)}
</div> </div>
<div className="text-slate-500 dark:text-slate-400">RSS</div> <div className="text-slate-500 dark:text-slate-400">CPU</div>
</div> </div>
<div className="text-center p-2 bg-slate-50 dark:bg-slate-800 rounded"> <div className="text-center p-2 bg-slate-50 dark:bg-slate-800 rounded">
<div className="font-medium text-slate-900 dark:text-slate-100"> <div className="font-medium text-slate-900 dark:text-slate-100">
{formatMemory(microphoneProcessMetrics.memory_vms)} {formatMemoryMB(microphoneProcessMetrics.memory_rss)}
</div> </div>
<div className="text-slate-500 dark:text-slate-400">VMS</div> <div className="text-slate-500 dark:text-slate-400">Memory</div>
</div> </div>
</div> </div>
</div> </div>

View File

@ -6,7 +6,9 @@ export type AudioEventType =
| 'audio-mute-changed' | 'audio-mute-changed'
| 'audio-metrics-update' | 'audio-metrics-update'
| 'microphone-state-changed' | 'microphone-state-changed'
| 'microphone-metrics-update'; | 'microphone-metrics-update'
| 'audio-process-metrics'
| 'microphone-process-metrics';
// Audio event data interfaces // Audio event data interfaces
export interface AudioMuteData { export interface AudioMuteData {
@ -36,10 +38,20 @@ export interface MicrophoneMetricsData {
average_latency: string; average_latency: string;
} }
export interface ProcessMetricsData {
pid: number;
cpu_percent: number;
memory_rss: number;
memory_vms: number;
memory_percent: number;
running: boolean;
process_name: string;
}
// Audio event structure // Audio event structure
export interface AudioEvent { export interface AudioEvent {
type: AudioEventType; type: AudioEventType;
data: AudioMuteData | AudioMetricsData | MicrophoneStateData | MicrophoneMetricsData; data: AudioMuteData | AudioMetricsData | MicrophoneStateData | MicrophoneMetricsData | ProcessMetricsData;
} }
// Hook return type // Hook return type
@ -56,6 +68,10 @@ export interface UseAudioEventsReturn {
microphoneState: MicrophoneStateData | null; microphoneState: MicrophoneStateData | null;
microphoneMetrics: MicrophoneMetricsData | null; microphoneMetrics: MicrophoneMetricsData | null;
// Process metrics
audioProcessMetrics: ProcessMetricsData | null;
microphoneProcessMetrics: ProcessMetricsData | null;
// Manual subscription control // Manual subscription control
subscribe: () => void; subscribe: () => void;
unsubscribe: () => void; unsubscribe: () => void;
@ -74,6 +90,8 @@ export function useAudioEvents(): UseAudioEventsReturn {
const [audioMetrics, setAudioMetrics] = useState<AudioMetricsData | null>(null); const [audioMetrics, setAudioMetrics] = useState<AudioMetricsData | null>(null);
const [microphoneState, setMicrophoneState] = useState<MicrophoneStateData | null>(null); const [microphoneState, setMicrophoneState] = useState<MicrophoneStateData | null>(null);
const [microphoneMetrics, setMicrophoneMetricsData] = useState<MicrophoneMetricsData | null>(null); const [microphoneMetrics, setMicrophoneMetricsData] = useState<MicrophoneMetricsData | null>(null);
const [audioProcessMetrics, setAudioProcessMetrics] = useState<ProcessMetricsData | null>(null);
const [microphoneProcessMetrics, setMicrophoneProcessMetrics] = useState<ProcessMetricsData | null>(null);
// Local subscription state // Local subscription state
const [isLocallySubscribed, setIsLocallySubscribed] = useState(false); const [isLocallySubscribed, setIsLocallySubscribed] = useState(false);
@ -214,6 +232,18 @@ export function useAudioEvents(): UseAudioEventsReturn {
break; break;
} }
case 'audio-process-metrics': {
const audioProcessData = audioEvent.data as ProcessMetricsData;
setAudioProcessMetrics(audioProcessData);
break;
}
case 'microphone-process-metrics': {
const micProcessData = audioEvent.data as ProcessMetricsData;
setMicrophoneProcessMetrics(micProcessData);
break;
}
default: default:
// Ignore other message types (WebRTC signaling, etc.) // Ignore other message types (WebRTC signaling, etc.)
break; break;
@ -275,6 +305,10 @@ export function useAudioEvents(): UseAudioEventsReturn {
microphoneState, microphoneState,
microphoneMetrics: microphoneMetrics, microphoneMetrics: microphoneMetrics,
// Process metrics
audioProcessMetrics,
microphoneProcessMetrics,
// Manual subscription control // Manual subscription control
subscribe, subscribe,
unsubscribe, unsubscribe,

10
web.go
View File

@ -503,6 +503,16 @@ func setupRouter() *gin.Engine {
}) })
}) })
// System memory information endpoint
protected.GET("/system/memory", func(c *gin.Context) {
processMonitor := audio.GetProcessMonitor()
totalMemory := processMonitor.GetTotalMemory()
c.JSON(200, gin.H{
"total_memory_bytes": totalMemory,
"total_memory_mb": totalMemory / (1024 * 1024),
})
})
protected.POST("/microphone/reset", func(c *gin.Context) { protected.POST("/microphone/reset", func(c *gin.Context) {
if currentSession == nil { if currentSession == nil {
c.JSON(400, gin.H{"error": "no active session"}) c.JSON(400, gin.H{"error": "no active session"})