mirror of https://github.com/jetkvm/kvm.git
Compare commits
No commits in common. "94ca3fa3f4a0f67c2fb07f3320c52a8673be4119" and "3158ca59f78e39db6ba0eb1f4359db3f2b0de0b2" have entirely different histories.
94ca3fa3f4
...
3158ca59f7
|
|
@ -1,316 +0,0 @@
|
||||||
package audio
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/coder/websocket"
|
|
||||||
"github.com/coder/websocket/wsjson"
|
|
||||||
"github.com/jetkvm/kvm/internal/logging"
|
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AudioEventType represents different types of audio events
|
|
||||||
type AudioEventType string
|
|
||||||
|
|
||||||
const (
|
|
||||||
AudioEventMuteChanged AudioEventType = "audio-mute-changed"
|
|
||||||
AudioEventMetricsUpdate AudioEventType = "audio-metrics-update"
|
|
||||||
AudioEventMicrophoneState AudioEventType = "microphone-state-changed"
|
|
||||||
AudioEventMicrophoneMetrics AudioEventType = "microphone-metrics-update"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AudioEvent represents a WebSocket audio event
|
|
||||||
type AudioEvent struct {
|
|
||||||
Type AudioEventType `json:"type"`
|
|
||||||
Data interface{} `json:"data"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// AudioMuteData represents audio mute state change data
|
|
||||||
type AudioMuteData struct {
|
|
||||||
Muted bool `json:"muted"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// AudioMetricsData represents audio metrics data
|
|
||||||
type AudioMetricsData struct {
|
|
||||||
FramesReceived int64 `json:"frames_received"`
|
|
||||||
FramesDropped int64 `json:"frames_dropped"`
|
|
||||||
BytesProcessed int64 `json:"bytes_processed"`
|
|
||||||
LastFrameTime string `json:"last_frame_time"`
|
|
||||||
ConnectionDrops int64 `json:"connection_drops"`
|
|
||||||
AverageLatency string `json:"average_latency"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// MicrophoneStateData represents microphone state data
|
|
||||||
type MicrophoneStateData struct {
|
|
||||||
Running bool `json:"running"`
|
|
||||||
SessionActive bool `json:"session_active"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// MicrophoneMetricsData represents microphone metrics data
|
|
||||||
type MicrophoneMetricsData struct {
|
|
||||||
FramesSent int64 `json:"frames_sent"`
|
|
||||||
FramesDropped int64 `json:"frames_dropped"`
|
|
||||||
BytesProcessed int64 `json:"bytes_processed"`
|
|
||||||
LastFrameTime string `json:"last_frame_time"`
|
|
||||||
ConnectionDrops int64 `json:"connection_drops"`
|
|
||||||
AverageLatency string `json:"average_latency"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// AudioEventSubscriber represents a WebSocket connection subscribed to audio events
|
|
||||||
type AudioEventSubscriber struct {
|
|
||||||
conn *websocket.Conn
|
|
||||||
ctx context.Context
|
|
||||||
logger *zerolog.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
// AudioEventBroadcaster manages audio event subscriptions and broadcasting
|
|
||||||
type AudioEventBroadcaster struct {
|
|
||||||
subscribers map[string]*AudioEventSubscriber
|
|
||||||
mutex sync.RWMutex
|
|
||||||
logger *zerolog.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
audioEventBroadcaster *AudioEventBroadcaster
|
|
||||||
audioEventOnce sync.Once
|
|
||||||
)
|
|
||||||
|
|
||||||
// InitializeAudioEventBroadcaster initializes the global audio event broadcaster
|
|
||||||
func InitializeAudioEventBroadcaster() {
|
|
||||||
audioEventOnce.Do(func() {
|
|
||||||
l := logging.GetDefaultLogger().With().Str("component", "audio-events").Logger()
|
|
||||||
audioEventBroadcaster = &AudioEventBroadcaster{
|
|
||||||
subscribers: make(map[string]*AudioEventSubscriber),
|
|
||||||
logger: &l,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start metrics broadcasting goroutine
|
|
||||||
go audioEventBroadcaster.startMetricsBroadcasting()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAudioEventBroadcaster returns the singleton audio event broadcaster
|
|
||||||
func GetAudioEventBroadcaster() *AudioEventBroadcaster {
|
|
||||||
audioEventOnce.Do(func() {
|
|
||||||
l := logging.GetDefaultLogger().With().Str("component", "audio-events").Logger()
|
|
||||||
audioEventBroadcaster = &AudioEventBroadcaster{
|
|
||||||
subscribers: make(map[string]*AudioEventSubscriber),
|
|
||||||
logger: &l,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start metrics broadcasting goroutine
|
|
||||||
go audioEventBroadcaster.startMetricsBroadcasting()
|
|
||||||
})
|
|
||||||
return audioEventBroadcaster
|
|
||||||
}
|
|
||||||
|
|
||||||
// Subscribe adds a WebSocket connection to receive audio events
|
|
||||||
func (aeb *AudioEventBroadcaster) Subscribe(connectionID string, conn *websocket.Conn, ctx context.Context, logger *zerolog.Logger) {
|
|
||||||
aeb.mutex.Lock()
|
|
||||||
defer aeb.mutex.Unlock()
|
|
||||||
|
|
||||||
aeb.subscribers[connectionID] = &AudioEventSubscriber{
|
|
||||||
conn: conn,
|
|
||||||
ctx: ctx,
|
|
||||||
logger: logger,
|
|
||||||
}
|
|
||||||
|
|
||||||
aeb.logger.Info().Str("connectionID", connectionID).Msg("audio events subscription added")
|
|
||||||
|
|
||||||
// Send initial state to new subscriber
|
|
||||||
go aeb.sendInitialState(connectionID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unsubscribe removes a WebSocket connection from audio events
|
|
||||||
func (aeb *AudioEventBroadcaster) Unsubscribe(connectionID string) {
|
|
||||||
aeb.mutex.Lock()
|
|
||||||
defer aeb.mutex.Unlock()
|
|
||||||
|
|
||||||
delete(aeb.subscribers, connectionID)
|
|
||||||
aeb.logger.Info().Str("connectionID", connectionID).Msg("audio events subscription removed")
|
|
||||||
}
|
|
||||||
|
|
||||||
// BroadcastAudioMuteChanged broadcasts audio mute state changes
|
|
||||||
func (aeb *AudioEventBroadcaster) BroadcastAudioMuteChanged(muted bool) {
|
|
||||||
event := AudioEvent{
|
|
||||||
Type: AudioEventMuteChanged,
|
|
||||||
Data: AudioMuteData{Muted: muted},
|
|
||||||
}
|
|
||||||
aeb.broadcast(event)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BroadcastMicrophoneStateChanged broadcasts microphone state changes
|
|
||||||
func (aeb *AudioEventBroadcaster) BroadcastMicrophoneStateChanged(running, sessionActive bool) {
|
|
||||||
event := AudioEvent{
|
|
||||||
Type: AudioEventMicrophoneState,
|
|
||||||
Data: MicrophoneStateData{
|
|
||||||
Running: running,
|
|
||||||
SessionActive: sessionActive,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
aeb.broadcast(event)
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendInitialState sends current audio state to a new subscriber
|
|
||||||
func (aeb *AudioEventBroadcaster) sendInitialState(connectionID string) {
|
|
||||||
aeb.mutex.RLock()
|
|
||||||
subscriber, exists := aeb.subscribers[connectionID]
|
|
||||||
aeb.mutex.RUnlock()
|
|
||||||
|
|
||||||
if !exists {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send current audio mute state
|
|
||||||
muteEvent := AudioEvent{
|
|
||||||
Type: AudioEventMuteChanged,
|
|
||||||
Data: AudioMuteData{Muted: IsAudioMuted()},
|
|
||||||
}
|
|
||||||
aeb.sendToSubscriber(subscriber, muteEvent)
|
|
||||||
|
|
||||||
// Send current microphone state using session provider
|
|
||||||
sessionProvider := GetSessionProvider()
|
|
||||||
sessionActive := sessionProvider.IsSessionActive()
|
|
||||||
var running bool
|
|
||||||
if sessionActive {
|
|
||||||
if inputManager := sessionProvider.GetAudioInputManager(); inputManager != nil {
|
|
||||||
running = inputManager.IsRunning()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
micStateEvent := AudioEvent{
|
|
||||||
Type: AudioEventMicrophoneState,
|
|
||||||
Data: MicrophoneStateData{
|
|
||||||
Running: running,
|
|
||||||
SessionActive: sessionActive,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
aeb.sendToSubscriber(subscriber, micStateEvent)
|
|
||||||
|
|
||||||
// Send current metrics
|
|
||||||
aeb.sendCurrentMetrics(subscriber)
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendCurrentMetrics sends current audio and microphone metrics to a subscriber
|
|
||||||
func (aeb *AudioEventBroadcaster) sendCurrentMetrics(subscriber *AudioEventSubscriber) {
|
|
||||||
// Send audio metrics
|
|
||||||
audioMetrics := GetAudioMetrics()
|
|
||||||
audioMetricsEvent := AudioEvent{
|
|
||||||
Type: AudioEventMetricsUpdate,
|
|
||||||
Data: AudioMetricsData{
|
|
||||||
FramesReceived: audioMetrics.FramesReceived,
|
|
||||||
FramesDropped: audioMetrics.FramesDropped,
|
|
||||||
BytesProcessed: audioMetrics.BytesProcessed,
|
|
||||||
LastFrameTime: audioMetrics.LastFrameTime.Format("2006-01-02T15:04:05.000Z"),
|
|
||||||
ConnectionDrops: audioMetrics.ConnectionDrops,
|
|
||||||
AverageLatency: audioMetrics.AverageLatency.String(),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
aeb.sendToSubscriber(subscriber, audioMetricsEvent)
|
|
||||||
|
|
||||||
// Send microphone metrics using session provider
|
|
||||||
sessionProvider := GetSessionProvider()
|
|
||||||
if sessionProvider.IsSessionActive() {
|
|
||||||
if inputManager := sessionProvider.GetAudioInputManager(); inputManager != nil {
|
|
||||||
micMetrics := inputManager.GetMetrics()
|
|
||||||
micMetricsEvent := AudioEvent{
|
|
||||||
Type: AudioEventMicrophoneMetrics,
|
|
||||||
Data: MicrophoneMetricsData{
|
|
||||||
FramesSent: micMetrics.FramesSent,
|
|
||||||
FramesDropped: micMetrics.FramesDropped,
|
|
||||||
BytesProcessed: micMetrics.BytesProcessed,
|
|
||||||
LastFrameTime: micMetrics.LastFrameTime.Format("2006-01-02T15:04:05.000Z"),
|
|
||||||
ConnectionDrops: micMetrics.ConnectionDrops,
|
|
||||||
AverageLatency: micMetrics.AverageLatency.String(),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
aeb.sendToSubscriber(subscriber, micMetricsEvent)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// startMetricsBroadcasting starts a goroutine that periodically broadcasts metrics
|
|
||||||
func (aeb *AudioEventBroadcaster) startMetricsBroadcasting() {
|
|
||||||
ticker := time.NewTicker(2 * time.Second) // Same interval as current polling
|
|
||||||
defer ticker.Stop()
|
|
||||||
|
|
||||||
for range ticker.C {
|
|
||||||
aeb.mutex.RLock()
|
|
||||||
subscriberCount := len(aeb.subscribers)
|
|
||||||
aeb.mutex.RUnlock()
|
|
||||||
|
|
||||||
// Only broadcast if there are subscribers
|
|
||||||
if subscriberCount == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Broadcast audio metrics
|
|
||||||
audioMetrics := GetAudioMetrics()
|
|
||||||
audioMetricsEvent := AudioEvent{
|
|
||||||
Type: AudioEventMetricsUpdate,
|
|
||||||
Data: AudioMetricsData{
|
|
||||||
FramesReceived: audioMetrics.FramesReceived,
|
|
||||||
FramesDropped: audioMetrics.FramesDropped,
|
|
||||||
BytesProcessed: audioMetrics.BytesProcessed,
|
|
||||||
LastFrameTime: audioMetrics.LastFrameTime.Format("2006-01-02T15:04:05.000Z"),
|
|
||||||
ConnectionDrops: audioMetrics.ConnectionDrops,
|
|
||||||
AverageLatency: audioMetrics.AverageLatency.String(),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
aeb.broadcast(audioMetricsEvent)
|
|
||||||
|
|
||||||
// Broadcast microphone metrics if available using session provider
|
|
||||||
sessionProvider := GetSessionProvider()
|
|
||||||
if sessionProvider.IsSessionActive() {
|
|
||||||
if inputManager := sessionProvider.GetAudioInputManager(); inputManager != nil {
|
|
||||||
micMetrics := inputManager.GetMetrics()
|
|
||||||
micMetricsEvent := AudioEvent{
|
|
||||||
Type: AudioEventMicrophoneMetrics,
|
|
||||||
Data: MicrophoneMetricsData{
|
|
||||||
FramesSent: micMetrics.FramesSent,
|
|
||||||
FramesDropped: micMetrics.FramesDropped,
|
|
||||||
BytesProcessed: micMetrics.BytesProcessed,
|
|
||||||
LastFrameTime: micMetrics.LastFrameTime.Format("2006-01-02T15:04:05.000Z"),
|
|
||||||
ConnectionDrops: micMetrics.ConnectionDrops,
|
|
||||||
AverageLatency: micMetrics.AverageLatency.String(),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
aeb.broadcast(micMetricsEvent)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// broadcast sends an event to all subscribers
|
|
||||||
func (aeb *AudioEventBroadcaster) broadcast(event AudioEvent) {
|
|
||||||
aeb.mutex.RLock()
|
|
||||||
defer aeb.mutex.RUnlock()
|
|
||||||
|
|
||||||
for connectionID, subscriber := range aeb.subscribers {
|
|
||||||
go func(id string, sub *AudioEventSubscriber) {
|
|
||||||
if !aeb.sendToSubscriber(sub, event) {
|
|
||||||
// Remove failed subscriber
|
|
||||||
aeb.mutex.Lock()
|
|
||||||
delete(aeb.subscribers, id)
|
|
||||||
aeb.mutex.Unlock()
|
|
||||||
aeb.logger.Warn().Str("connectionID", id).Msg("removed failed audio events subscriber")
|
|
||||||
}
|
|
||||||
}(connectionID, subscriber)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendToSubscriber sends an event to a specific subscriber
|
|
||||||
func (aeb *AudioEventBroadcaster) sendToSubscriber(subscriber *AudioEventSubscriber, event AudioEvent) bool {
|
|
||||||
ctx, cancel := context.WithTimeout(subscriber.ctx, 5*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
err := wsjson.Write(ctx, subscriber.conn, event)
|
|
||||||
if err != nil {
|
|
||||||
subscriber.logger.Warn().Err(err).Msg("failed to send audio event to subscriber")
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
@ -64,7 +64,8 @@ func (aim *AudioInputManager) Stop() {
|
||||||
aim.logger.Info().Msg("Stopping audio input manager")
|
aim.logger.Info().Msg("Stopping audio input manager")
|
||||||
|
|
||||||
// Stop the non-blocking audio input stream
|
// Stop the non-blocking audio input stream
|
||||||
StopNonBlockingAudioInput()
|
// Note: This is handled by the global non-blocking audio manager
|
||||||
|
// Individual input streams are managed centrally
|
||||||
|
|
||||||
// Drain the input buffer
|
// Drain the input buffer
|
||||||
go func() {
|
go func() {
|
||||||
|
|
@ -77,8 +78,6 @@ func (aim *AudioInputManager) Stop() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
aim.logger.Info().Msg("Audio input manager stopped")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteOpusFrame writes an Opus frame to the input buffer
|
// WriteOpusFrame writes an Opus frame to the input buffer
|
||||||
|
|
|
||||||
|
|
@ -134,7 +134,7 @@ func (nam *NonBlockingAudioManager) outputWorkerThread() {
|
||||||
// Lock to OS thread to isolate blocking CGO operations
|
// Lock to OS thread to isolate blocking CGO operations
|
||||||
runtime.LockOSThread()
|
runtime.LockOSThread()
|
||||||
defer runtime.UnlockOSThread()
|
defer runtime.UnlockOSThread()
|
||||||
|
|
||||||
defer nam.wg.Done()
|
defer nam.wg.Done()
|
||||||
defer atomic.StoreInt32(&nam.outputWorkerRunning, 0)
|
defer atomic.StoreInt32(&nam.outputWorkerRunning, 0)
|
||||||
|
|
||||||
|
|
@ -271,7 +271,7 @@ func (nam *NonBlockingAudioManager) inputWorkerThread() {
|
||||||
// Lock to OS thread to isolate blocking CGO operations
|
// Lock to OS thread to isolate blocking CGO operations
|
||||||
runtime.LockOSThread()
|
runtime.LockOSThread()
|
||||||
defer runtime.UnlockOSThread()
|
defer runtime.UnlockOSThread()
|
||||||
|
|
||||||
defer nam.wg.Done()
|
defer nam.wg.Done()
|
||||||
defer atomic.StoreInt32(&nam.inputWorkerRunning, 0)
|
defer atomic.StoreInt32(&nam.inputWorkerRunning, 0)
|
||||||
|
|
||||||
|
|
@ -413,10 +413,6 @@ func (nam *NonBlockingAudioManager) StopAudioInput() {
|
||||||
// Stop only the input coordinator
|
// Stop only the input coordinator
|
||||||
atomic.StoreInt32(&nam.inputRunning, 0)
|
atomic.StoreInt32(&nam.inputRunning, 0)
|
||||||
|
|
||||||
// Allow coordinator thread to process the stop signal and update state
|
|
||||||
// This prevents race conditions in state queries immediately after stopping
|
|
||||||
time.Sleep(50 * time.Millisecond)
|
|
||||||
|
|
||||||
nam.logger.Info().Msg("audio input stopped")
|
nam.logger.Info().Msg("audio input stopped")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,30 +0,0 @@
|
||||||
package audio
|
|
||||||
|
|
||||||
// SessionProvider interface abstracts session management for audio events
|
|
||||||
type SessionProvider interface {
|
|
||||||
IsSessionActive() bool
|
|
||||||
GetAudioInputManager() *AudioInputManager
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultSessionProvider is a no-op implementation
|
|
||||||
type DefaultSessionProvider struct{}
|
|
||||||
|
|
||||||
func (d *DefaultSessionProvider) IsSessionActive() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DefaultSessionProvider) GetAudioInputManager() *AudioInputManager {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var sessionProvider SessionProvider = &DefaultSessionProvider{}
|
|
||||||
|
|
||||||
// SetSessionProvider allows the main package to inject session management
|
|
||||||
func SetSessionProvider(provider SessionProvider) {
|
|
||||||
sessionProvider = provider
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetSessionProvider returns the current session provider
|
|
||||||
func GetSessionProvider() SessionProvider {
|
|
||||||
return sessionProvider
|
|
||||||
}
|
|
||||||
7
main.go
7
main.go
|
|
@ -106,13 +106,6 @@ func Main() {
|
||||||
logger.Warn().Err(err).Msg("failed to start non-blocking audio streaming")
|
logger.Warn().Err(err).Msg("failed to start non-blocking audio streaming")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize session provider for audio events
|
|
||||||
initializeAudioSessionProvider()
|
|
||||||
|
|
||||||
// Initialize audio event broadcaster for WebSocket-based real-time updates
|
|
||||||
audio.InitializeAudioEventBroadcaster()
|
|
||||||
logger.Info().Msg("audio event broadcaster initialized")
|
|
||||||
|
|
||||||
if err := setInitialVirtualMediaState(); err != nil {
|
if err := setInitialVirtualMediaState(); err != nil {
|
||||||
logger.Warn().Err(err).Msg("failed to set initial virtual media state")
|
logger.Warn().Err(err).Msg("failed to set initial virtual media state")
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,24 +0,0 @@
|
||||||
package kvm
|
|
||||||
|
|
||||||
import "github.com/jetkvm/kvm/internal/audio"
|
|
||||||
|
|
||||||
// KVMSessionProvider implements the audio.SessionProvider interface
|
|
||||||
type KVMSessionProvider struct{}
|
|
||||||
|
|
||||||
// IsSessionActive returns whether there's an active session
|
|
||||||
func (k *KVMSessionProvider) IsSessionActive() bool {
|
|
||||||
return currentSession != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAudioInputManager returns the current session's audio input manager
|
|
||||||
func (k *KVMSessionProvider) GetAudioInputManager() *audio.AudioInputManager {
|
|
||||||
if currentSession == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return currentSession.AudioInputManager
|
|
||||||
}
|
|
||||||
|
|
||||||
// initializeAudioSessionProvider sets up the session provider for the audio package
|
|
||||||
func initializeAudioSessionProvider() {
|
|
||||||
audio.SetSessionProvider(&KVMSessionProvider{})
|
|
||||||
}
|
|
||||||
|
|
@ -20,7 +20,6 @@ import MountPopopover from "@/components/popovers/MountPopover";
|
||||||
import ExtensionPopover from "@/components/popovers/ExtensionPopover";
|
import ExtensionPopover from "@/components/popovers/ExtensionPopover";
|
||||||
import AudioControlPopover from "@/components/popovers/AudioControlPopover";
|
import AudioControlPopover from "@/components/popovers/AudioControlPopover";
|
||||||
import { useDeviceUiNavigation } from "@/hooks/useAppNavigation";
|
import { useDeviceUiNavigation } from "@/hooks/useAppNavigation";
|
||||||
import { useAudioEvents } from "@/hooks/useAudioEvents";
|
|
||||||
import api from "@/api";
|
import api from "@/api";
|
||||||
|
|
||||||
// Type for microphone error
|
// Type for microphone error
|
||||||
|
|
@ -82,36 +81,27 @@ export default function Actionbar({
|
||||||
[setDisableFocusTrap],
|
[setDisableFocusTrap],
|
||||||
);
|
);
|
||||||
|
|
||||||
// Use WebSocket-based audio events for real-time updates
|
// Mute/unmute state for button display
|
||||||
const { audioMuted, isConnected } = useAudioEvents();
|
const [isMuted, setIsMuted] = useState(false);
|
||||||
|
|
||||||
// Fallback to polling if WebSocket is not connected
|
|
||||||
const [fallbackMuted, setFallbackMuted] = useState(false);
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (!isConnected) {
|
api.GET("/audio/mute").then(async resp => {
|
||||||
// Load initial state
|
if (resp.ok) {
|
||||||
api.GET("/audio/mute").then(async resp => {
|
const data = await resp.json();
|
||||||
if (resp.ok) {
|
setIsMuted(!!data.muted);
|
||||||
const data = await resp.json();
|
}
|
||||||
setFallbackMuted(!!data.muted);
|
});
|
||||||
}
|
|
||||||
});
|
// Refresh mute state periodically for button display
|
||||||
|
const interval = setInterval(async () => {
|
||||||
// Fallback polling when WebSocket is not available
|
const resp = await api.GET("/audio/mute");
|
||||||
const interval = setInterval(async () => {
|
if (resp.ok) {
|
||||||
const resp = await api.GET("/audio/mute");
|
const data = await resp.json();
|
||||||
if (resp.ok) {
|
setIsMuted(!!data.muted);
|
||||||
const data = await resp.json();
|
}
|
||||||
setFallbackMuted(!!data.muted);
|
}, 1000);
|
||||||
}
|
|
||||||
}, 1000);
|
return () => clearInterval(interval);
|
||||||
|
}, []);
|
||||||
return () => clearInterval(interval);
|
|
||||||
}
|
|
||||||
}, [isConnected]);
|
|
||||||
|
|
||||||
// Use WebSocket data when available, fallback to polling data otherwise
|
|
||||||
const isMuted = isConnected && audioMuted !== null ? audioMuted : fallbackMuted;
|
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<Container className="border-b border-b-slate-800/20 bg-white dark:border-b-slate-300/20 dark:bg-slate-900">
|
<Container className="border-b border-b-slate-800/20 bg-white dark:border-b-slate-300/20 dark:bg-slate-900">
|
||||||
|
|
|
||||||
|
|
@ -6,7 +6,6 @@ import { AudioLevelMeter } from "@components/AudioLevelMeter";
|
||||||
import { cx } from "@/cva.config";
|
import { cx } from "@/cva.config";
|
||||||
import { useMicrophone } from "@/hooks/useMicrophone";
|
import { useMicrophone } from "@/hooks/useMicrophone";
|
||||||
import { useAudioLevel } from "@/hooks/useAudioLevel";
|
import { useAudioLevel } from "@/hooks/useAudioLevel";
|
||||||
import { useAudioEvents } from "@/hooks/useAudioEvents";
|
|
||||||
import api from "@/api";
|
import api from "@/api";
|
||||||
|
|
||||||
interface AudioMetrics {
|
interface AudioMetrics {
|
||||||
|
|
@ -43,46 +42,51 @@ const qualityLabels = {
|
||||||
};
|
};
|
||||||
|
|
||||||
export default function AudioMetricsDashboard() {
|
export default function AudioMetricsDashboard() {
|
||||||
// Use WebSocket-based audio events for real-time updates
|
const [metrics, setMetrics] = useState<AudioMetrics | null>(null);
|
||||||
const {
|
const [microphoneMetrics, setMicrophoneMetrics] = useState<MicrophoneMetrics | null>(null);
|
||||||
audioMetrics,
|
|
||||||
microphoneMetrics: wsMicrophoneMetrics,
|
|
||||||
isConnected: wsConnected
|
|
||||||
} = useAudioEvents();
|
|
||||||
|
|
||||||
// Fallback state for when WebSocket is not connected
|
|
||||||
const [fallbackMetrics, setFallbackMetrics] = useState<AudioMetrics | null>(null);
|
|
||||||
const [fallbackMicrophoneMetrics, setFallbackMicrophoneMetrics] = useState<MicrophoneMetrics | null>(null);
|
|
||||||
const [fallbackConnected, setFallbackConnected] = useState(false);
|
|
||||||
|
|
||||||
// Configuration state (these don't change frequently, so we can load them once)
|
|
||||||
const [config, setConfig] = useState<AudioConfig | null>(null);
|
const [config, setConfig] = useState<AudioConfig | null>(null);
|
||||||
const [microphoneConfig, setMicrophoneConfig] = useState<AudioConfig | null>(null);
|
const [microphoneConfig, setMicrophoneConfig] = useState<AudioConfig | null>(null);
|
||||||
|
const [isConnected, setIsConnected] = useState(false);
|
||||||
const [lastUpdate, setLastUpdate] = useState<Date>(new Date());
|
const [lastUpdate, setLastUpdate] = useState<Date>(new Date());
|
||||||
|
|
||||||
// Use WebSocket data when available, fallback to polling data otherwise
|
|
||||||
const metrics = wsConnected && audioMetrics !== null ? audioMetrics : fallbackMetrics;
|
|
||||||
const microphoneMetrics = wsConnected && wsMicrophoneMetrics !== null ? wsMicrophoneMetrics : fallbackMicrophoneMetrics;
|
|
||||||
const isConnected = wsConnected ? wsConnected : fallbackConnected;
|
|
||||||
|
|
||||||
// Microphone state for audio level monitoring
|
// Microphone state for audio level monitoring
|
||||||
const { isMicrophoneActive, isMicrophoneMuted, microphoneStream } = useMicrophone();
|
const { isMicrophoneActive, isMicrophoneMuted, microphoneStream } = useMicrophone();
|
||||||
const { audioLevel, isAnalyzing } = useAudioLevel(microphoneStream);
|
const { audioLevel, isAnalyzing } = useAudioLevel(microphoneStream);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
// Load initial configuration (only once)
|
loadAudioData();
|
||||||
loadAudioConfig();
|
|
||||||
|
|
||||||
// Set up fallback polling only when WebSocket is not connected
|
// Refresh every 1 second for real-time metrics
|
||||||
if (!wsConnected) {
|
const interval = setInterval(loadAudioData, 1000);
|
||||||
loadAudioData();
|
return () => clearInterval(interval);
|
||||||
const interval = setInterval(loadAudioData, 1000);
|
}, []);
|
||||||
return () => clearInterval(interval);
|
|
||||||
}
|
|
||||||
}, [wsConnected]);
|
|
||||||
|
|
||||||
const loadAudioConfig = async () => {
|
const loadAudioData = async () => {
|
||||||
try {
|
try {
|
||||||
|
// Load metrics
|
||||||
|
const metricsResp = await api.GET("/audio/metrics");
|
||||||
|
if (metricsResp.ok) {
|
||||||
|
const metricsData = await metricsResp.json();
|
||||||
|
setMetrics(metricsData);
|
||||||
|
// Consider connected if API call succeeds, regardless of frame count
|
||||||
|
setIsConnected(true);
|
||||||
|
setLastUpdate(new Date());
|
||||||
|
} else {
|
||||||
|
setIsConnected(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load microphone metrics
|
||||||
|
try {
|
||||||
|
const micResp = await api.GET("/microphone/metrics");
|
||||||
|
if (micResp.ok) {
|
||||||
|
const micData = await micResp.json();
|
||||||
|
setMicrophoneMetrics(micData);
|
||||||
|
}
|
||||||
|
} catch (micError) {
|
||||||
|
// Microphone metrics might not be available, that's okay
|
||||||
|
console.debug("Microphone metrics not available:", micError);
|
||||||
|
}
|
||||||
|
|
||||||
// Load config
|
// Load config
|
||||||
const configResp = await api.GET("/audio/quality");
|
const configResp = await api.GET("/audio/quality");
|
||||||
if (configResp.ok) {
|
if (configResp.ok) {
|
||||||
|
|
@ -100,39 +104,9 @@ export default function AudioMetricsDashboard() {
|
||||||
} catch (micConfigError) {
|
} catch (micConfigError) {
|
||||||
console.debug("Microphone config not available:", micConfigError);
|
console.debug("Microphone config not available:", micConfigError);
|
||||||
}
|
}
|
||||||
} catch (error) {
|
|
||||||
console.error("Failed to load audio config:", error);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
const loadAudioData = async () => {
|
|
||||||
try {
|
|
||||||
// Load metrics
|
|
||||||
const metricsResp = await api.GET("/audio/metrics");
|
|
||||||
if (metricsResp.ok) {
|
|
||||||
const metricsData = await metricsResp.json();
|
|
||||||
setFallbackMetrics(metricsData);
|
|
||||||
// Consider connected if API call succeeds, regardless of frame count
|
|
||||||
setFallbackConnected(true);
|
|
||||||
setLastUpdate(new Date());
|
|
||||||
} else {
|
|
||||||
setFallbackConnected(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load microphone metrics
|
|
||||||
try {
|
|
||||||
const micResp = await api.GET("/microphone/metrics");
|
|
||||||
if (micResp.ok) {
|
|
||||||
const micData = await micResp.json();
|
|
||||||
setFallbackMicrophoneMetrics(micData);
|
|
||||||
}
|
|
||||||
} catch (micError) {
|
|
||||||
// Microphone metrics might not be available, that's okay
|
|
||||||
console.debug("Microphone metrics not available:", micError);
|
|
||||||
}
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error("Failed to load audio data:", error);
|
console.error("Failed to load audio data:", error);
|
||||||
setFallbackConnected(false);
|
setIsConnected(false);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,6 @@ import { cx } from "@/cva.config";
|
||||||
import { useUiStore } from "@/hooks/stores";
|
import { useUiStore } from "@/hooks/stores";
|
||||||
import { useAudioDevices } from "@/hooks/useAudioDevices";
|
import { useAudioDevices } from "@/hooks/useAudioDevices";
|
||||||
import { useAudioLevel } from "@/hooks/useAudioLevel";
|
import { useAudioLevel } from "@/hooks/useAudioLevel";
|
||||||
import { useAudioEvents } from "@/hooks/useAudioEvents";
|
|
||||||
import api from "@/api";
|
import api from "@/api";
|
||||||
import notifications from "@/notifications";
|
import notifications from "@/notifications";
|
||||||
|
|
||||||
|
|
@ -75,27 +74,16 @@ interface AudioControlPopoverProps {
|
||||||
export default function AudioControlPopover({ microphone }: AudioControlPopoverProps) {
|
export default function AudioControlPopover({ microphone }: AudioControlPopoverProps) {
|
||||||
const [currentConfig, setCurrentConfig] = useState<AudioConfig | null>(null);
|
const [currentConfig, setCurrentConfig] = useState<AudioConfig | null>(null);
|
||||||
const [currentMicrophoneConfig, setCurrentMicrophoneConfig] = useState<AudioConfig | null>(null);
|
const [currentMicrophoneConfig, setCurrentMicrophoneConfig] = useState<AudioConfig | null>(null);
|
||||||
|
const [isMuted, setIsMuted] = useState(false);
|
||||||
|
const [metrics, setMetrics] = useState<AudioMetrics | null>(null);
|
||||||
const [showAdvanced, setShowAdvanced] = useState(false);
|
const [showAdvanced, setShowAdvanced] = useState(false);
|
||||||
const [isLoading, setIsLoading] = useState(false);
|
const [isLoading, setIsLoading] = useState(false);
|
||||||
|
const [isConnected, setIsConnected] = useState(false);
|
||||||
|
|
||||||
// Add cooldown to prevent rapid clicking
|
// Add cooldown to prevent rapid clicking
|
||||||
const [lastClickTime, setLastClickTime] = useState(0);
|
const [lastClickTime, setLastClickTime] = useState(0);
|
||||||
const CLICK_COOLDOWN = 500; // 500ms cooldown between clicks
|
const CLICK_COOLDOWN = 500; // 500ms cooldown between clicks
|
||||||
|
|
||||||
// Use WebSocket-based audio events for real-time updates
|
|
||||||
const {
|
|
||||||
audioMuted,
|
|
||||||
audioMetrics,
|
|
||||||
microphoneMetrics,
|
|
||||||
isConnected: wsConnected
|
|
||||||
} = useAudioEvents();
|
|
||||||
|
|
||||||
// Fallback state for when WebSocket is not connected
|
|
||||||
const [fallbackMuted, setFallbackMuted] = useState(false);
|
|
||||||
const [fallbackMetrics, setFallbackMetrics] = useState<AudioMetrics | null>(null);
|
|
||||||
const [fallbackMicMetrics, setFallbackMicMetrics] = useState<MicrophoneMetrics | null>(null);
|
|
||||||
const [fallbackConnected, setFallbackConnected] = useState(false);
|
|
||||||
|
|
||||||
// Microphone state from props
|
// Microphone state from props
|
||||||
const {
|
const {
|
||||||
isMicrophoneActive,
|
isMicrophoneActive,
|
||||||
|
|
@ -110,12 +98,7 @@ export default function AudioControlPopover({ microphone }: AudioControlPopoverP
|
||||||
isStopping,
|
isStopping,
|
||||||
isToggling,
|
isToggling,
|
||||||
} = microphone;
|
} = microphone;
|
||||||
|
const [microphoneMetrics, setMicrophoneMetrics] = useState<MicrophoneMetrics | null>(null);
|
||||||
// Use WebSocket data when available, fallback to polling data otherwise
|
|
||||||
const isMuted = wsConnected && audioMuted !== null ? audioMuted : fallbackMuted;
|
|
||||||
const metrics = wsConnected && audioMetrics !== null ? audioMetrics : fallbackMetrics;
|
|
||||||
const micMetrics = wsConnected && microphoneMetrics !== null ? microphoneMetrics : fallbackMicMetrics;
|
|
||||||
const isConnected = wsConnected ? wsConnected : fallbackConnected;
|
|
||||||
|
|
||||||
// Audio level monitoring
|
// Audio level monitoring
|
||||||
const { audioLevel, isAnalyzing } = useAudioLevel(microphoneStream);
|
const { audioLevel, isAnalyzing } = useAudioLevel(microphoneStream);
|
||||||
|
|
@ -135,33 +118,30 @@ export default function AudioControlPopover({ microphone }: AudioControlPopoverP
|
||||||
|
|
||||||
const { toggleSidebarView } = useUiStore();
|
const { toggleSidebarView } = useUiStore();
|
||||||
|
|
||||||
// Load initial configurations once (these don't change frequently)
|
// Load initial audio state
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
loadAudioConfigurations();
|
loadAudioState();
|
||||||
}, []);
|
loadAudioMetrics();
|
||||||
|
loadMicrophoneMetrics();
|
||||||
// Load initial audio state and set up fallback polling when WebSocket is not connected
|
syncMicrophoneState();
|
||||||
useEffect(() => {
|
|
||||||
if (!wsConnected) {
|
// Set up metrics refresh interval
|
||||||
loadAudioState();
|
const metricsInterval = setInterval(() => {
|
||||||
// Only load metrics as fallback when WebSocket is disconnected
|
|
||||||
loadAudioMetrics();
|
loadAudioMetrics();
|
||||||
loadMicrophoneMetrics();
|
loadMicrophoneMetrics();
|
||||||
|
}, 2000);
|
||||||
// Set up metrics refresh interval for fallback only
|
return () => clearInterval(metricsInterval);
|
||||||
const metricsInterval = setInterval(() => {
|
}, [syncMicrophoneState]);
|
||||||
loadAudioMetrics();
|
|
||||||
loadMicrophoneMetrics();
|
|
||||||
}, 2000);
|
|
||||||
return () => clearInterval(metricsInterval);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Always sync microphone state
|
|
||||||
syncMicrophoneState();
|
|
||||||
}, [wsConnected, syncMicrophoneState]);
|
|
||||||
|
|
||||||
const loadAudioConfigurations = async () => {
|
const loadAudioState = async () => {
|
||||||
try {
|
try {
|
||||||
|
// Load mute state
|
||||||
|
const muteResp = await api.GET("/audio/mute");
|
||||||
|
if (muteResp.ok) {
|
||||||
|
const muteData = await muteResp.json();
|
||||||
|
setIsMuted(!!muteData.muted);
|
||||||
|
}
|
||||||
|
|
||||||
// Load quality config
|
// Load quality config
|
||||||
const qualityResp = await api.GET("/audio/quality");
|
const qualityResp = await api.GET("/audio/quality");
|
||||||
if (qualityResp.ok) {
|
if (qualityResp.ok) {
|
||||||
|
|
@ -175,19 +155,6 @@ export default function AudioControlPopover({ microphone }: AudioControlPopoverP
|
||||||
const micQualityData = await micQualityResp.json();
|
const micQualityData = await micQualityResp.json();
|
||||||
setCurrentMicrophoneConfig(micQualityData.current);
|
setCurrentMicrophoneConfig(micQualityData.current);
|
||||||
}
|
}
|
||||||
} catch (error) {
|
|
||||||
console.error("Failed to load audio configurations:", error);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
const loadAudioState = async () => {
|
|
||||||
try {
|
|
||||||
// Load mute state only (configurations are loaded separately)
|
|
||||||
const muteResp = await api.GET("/audio/mute");
|
|
||||||
if (muteResp.ok) {
|
|
||||||
const muteData = await muteResp.json();
|
|
||||||
setFallbackMuted(!!muteData.muted);
|
|
||||||
}
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error("Failed to load audio state:", error);
|
console.error("Failed to load audio state:", error);
|
||||||
}
|
}
|
||||||
|
|
@ -198,15 +165,15 @@ export default function AudioControlPopover({ microphone }: AudioControlPopoverP
|
||||||
const resp = await api.GET("/audio/metrics");
|
const resp = await api.GET("/audio/metrics");
|
||||||
if (resp.ok) {
|
if (resp.ok) {
|
||||||
const data = await resp.json();
|
const data = await resp.json();
|
||||||
setFallbackMetrics(data);
|
setMetrics(data);
|
||||||
// Consider connected if API call succeeds, regardless of frame count
|
// Consider connected if API call succeeds, regardless of frame count
|
||||||
setFallbackConnected(true);
|
setIsConnected(true);
|
||||||
} else {
|
} else {
|
||||||
setFallbackConnected(false);
|
setIsConnected(false);
|
||||||
}
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error("Failed to load audio metrics:", error);
|
console.error("Failed to load audio metrics:", error);
|
||||||
setFallbackConnected(false);
|
setIsConnected(false);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -217,7 +184,7 @@ export default function AudioControlPopover({ microphone }: AudioControlPopoverP
|
||||||
const resp = await api.GET("/microphone/metrics");
|
const resp = await api.GET("/microphone/metrics");
|
||||||
if (resp.ok) {
|
if (resp.ok) {
|
||||||
const data = await resp.json();
|
const data = await resp.json();
|
||||||
setFallbackMicMetrics(data);
|
setMicrophoneMetrics(data);
|
||||||
}
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error("Failed to load microphone metrics:", error);
|
console.error("Failed to load microphone metrics:", error);
|
||||||
|
|
@ -229,10 +196,7 @@ export default function AudioControlPopover({ microphone }: AudioControlPopoverP
|
||||||
try {
|
try {
|
||||||
const resp = await api.POST("/audio/mute", { muted: !isMuted });
|
const resp = await api.POST("/audio/mute", { muted: !isMuted });
|
||||||
if (resp.ok) {
|
if (resp.ok) {
|
||||||
// WebSocket will handle the state update, but update fallback for immediate feedback
|
setIsMuted(!isMuted);
|
||||||
if (!wsConnected) {
|
|
||||||
setFallbackMuted(!isMuted);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error("Failed to toggle mute:", error);
|
console.error("Failed to toggle mute:", error);
|
||||||
|
|
@ -723,14 +687,14 @@ export default function AudioControlPopover({ microphone }: AudioControlPopoverP
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
{micMetrics && (
|
{microphoneMetrics && (
|
||||||
<div className="mb-4">
|
<div className="mb-4">
|
||||||
<h4 className="text-sm font-medium text-slate-700 dark:text-slate-300 mb-2">Microphone Input</h4>
|
<h4 className="text-sm font-medium text-slate-700 dark:text-slate-300 mb-2">Microphone Input</h4>
|
||||||
<div className="grid grid-cols-2 gap-3 text-xs">
|
<div className="grid grid-cols-2 gap-3 text-xs">
|
||||||
<div className="space-y-1">
|
<div className="space-y-1">
|
||||||
<div className="text-slate-500 dark:text-slate-400">Frames Sent</div>
|
<div className="text-slate-500 dark:text-slate-400">Frames Sent</div>
|
||||||
<div className="font-mono text-green-600 dark:text-green-400">
|
<div className="font-mono text-green-600 dark:text-green-400">
|
||||||
{formatNumber(micMetrics.frames_sent)}
|
{formatNumber(microphoneMetrics.frames_sent)}
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
|
@ -738,18 +702,18 @@ export default function AudioControlPopover({ microphone }: AudioControlPopoverP
|
||||||
<div className="text-slate-500 dark:text-slate-400">Frames Dropped</div>
|
<div className="text-slate-500 dark:text-slate-400">Frames Dropped</div>
|
||||||
<div className={cx(
|
<div className={cx(
|
||||||
"font-mono",
|
"font-mono",
|
||||||
micMetrics.frames_dropped > 0
|
microphoneMetrics.frames_dropped > 0
|
||||||
? "text-red-600 dark:text-red-400"
|
? "text-red-600 dark:text-red-400"
|
||||||
: "text-green-600 dark:text-green-400"
|
: "text-green-600 dark:text-green-400"
|
||||||
)}>
|
)}>
|
||||||
{formatNumber(micMetrics.frames_dropped)}
|
{formatNumber(microphoneMetrics.frames_dropped)}
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div className="space-y-1">
|
<div className="space-y-1">
|
||||||
<div className="text-slate-500 dark:text-slate-400">Data Processed</div>
|
<div className="text-slate-500 dark:text-slate-400">Data Processed</div>
|
||||||
<div className="font-mono text-blue-600 dark:text-blue-400">
|
<div className="font-mono text-blue-600 dark:text-blue-400">
|
||||||
{formatBytes(micMetrics.bytes_processed)}
|
{formatBytes(microphoneMetrics.bytes_processed)}
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
|
@ -757,11 +721,11 @@ export default function AudioControlPopover({ microphone }: AudioControlPopoverP
|
||||||
<div className="text-slate-500 dark:text-slate-400">Connection Drops</div>
|
<div className="text-slate-500 dark:text-slate-400">Connection Drops</div>
|
||||||
<div className={cx(
|
<div className={cx(
|
||||||
"font-mono",
|
"font-mono",
|
||||||
micMetrics.connection_drops > 0
|
microphoneMetrics.connection_drops > 0
|
||||||
? "text-red-600 dark:text-red-400"
|
? "text-red-600 dark:text-red-400"
|
||||||
: "text-green-600 dark:text-green-400"
|
: "text-green-600 dark:text-green-400"
|
||||||
)}>
|
)}>
|
||||||
{formatNumber(micMetrics.connection_drops)}
|
{formatNumber(microphoneMetrics.connection_drops)}
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
|
||||||
|
|
@ -1,202 +0,0 @@
|
||||||
import { useCallback, useEffect, useRef, useState } from 'react';
|
|
||||||
import useWebSocket, { ReadyState } from 'react-use-websocket';
|
|
||||||
|
|
||||||
// Audio event types matching the backend
|
|
||||||
export type AudioEventType =
|
|
||||||
| 'audio-mute-changed'
|
|
||||||
| 'audio-metrics-update'
|
|
||||||
| 'microphone-state-changed'
|
|
||||||
| 'microphone-metrics-update';
|
|
||||||
|
|
||||||
// Audio event data interfaces
|
|
||||||
export interface AudioMuteData {
|
|
||||||
muted: boolean;
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface AudioMetricsData {
|
|
||||||
frames_received: number;
|
|
||||||
frames_dropped: number;
|
|
||||||
bytes_processed: number;
|
|
||||||
last_frame_time: string;
|
|
||||||
connection_drops: number;
|
|
||||||
average_latency: string;
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface MicrophoneStateData {
|
|
||||||
running: boolean;
|
|
||||||
session_active: boolean;
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface MicrophoneMetricsData {
|
|
||||||
frames_sent: number;
|
|
||||||
frames_dropped: number;
|
|
||||||
bytes_processed: number;
|
|
||||||
last_frame_time: string;
|
|
||||||
connection_drops: number;
|
|
||||||
average_latency: string;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Audio event structure
|
|
||||||
export interface AudioEvent {
|
|
||||||
type: AudioEventType;
|
|
||||||
data: AudioMuteData | AudioMetricsData | MicrophoneStateData | MicrophoneMetricsData;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hook return type
|
|
||||||
export interface UseAudioEventsReturn {
|
|
||||||
// Connection state
|
|
||||||
connectionState: ReadyState;
|
|
||||||
isConnected: boolean;
|
|
||||||
|
|
||||||
// Audio state
|
|
||||||
audioMuted: boolean | null;
|
|
||||||
audioMetrics: AudioMetricsData | null;
|
|
||||||
|
|
||||||
// Microphone state
|
|
||||||
microphoneState: MicrophoneStateData | null;
|
|
||||||
microphoneMetrics: MicrophoneMetricsData | null;
|
|
||||||
|
|
||||||
// Manual subscription control
|
|
||||||
subscribe: () => void;
|
|
||||||
unsubscribe: () => void;
|
|
||||||
}
|
|
||||||
|
|
||||||
export function useAudioEvents(): UseAudioEventsReturn {
|
|
||||||
// State for audio data
|
|
||||||
const [audioMuted, setAudioMuted] = useState<boolean | null>(null);
|
|
||||||
const [audioMetrics, setAudioMetrics] = useState<AudioMetricsData | null>(null);
|
|
||||||
const [microphoneState, setMicrophoneState] = useState<MicrophoneStateData | null>(null);
|
|
||||||
const [microphoneMetrics, setMicrophoneMetrics] = useState<MicrophoneMetricsData | null>(null);
|
|
||||||
|
|
||||||
// Subscription state
|
|
||||||
const [isSubscribed, setIsSubscribed] = useState(false);
|
|
||||||
const subscriptionSent = useRef(false);
|
|
||||||
|
|
||||||
// Get WebSocket URL
|
|
||||||
const getWebSocketUrl = () => {
|
|
||||||
const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
|
|
||||||
const host = window.location.host;
|
|
||||||
return `${protocol}//${host}/webrtc/signaling/client`;
|
|
||||||
};
|
|
||||||
|
|
||||||
// WebSocket connection
|
|
||||||
const {
|
|
||||||
sendMessage,
|
|
||||||
lastMessage,
|
|
||||||
readyState,
|
|
||||||
} = useWebSocket(getWebSocketUrl(), {
|
|
||||||
shouldReconnect: () => true,
|
|
||||||
reconnectAttempts: 10,
|
|
||||||
reconnectInterval: 3000,
|
|
||||||
onOpen: () => {
|
|
||||||
console.log('[AudioEvents] WebSocket connected');
|
|
||||||
subscriptionSent.current = false;
|
|
||||||
},
|
|
||||||
onClose: () => {
|
|
||||||
console.log('[AudioEvents] WebSocket disconnected');
|
|
||||||
subscriptionSent.current = false;
|
|
||||||
setIsSubscribed(false);
|
|
||||||
},
|
|
||||||
onError: (event) => {
|
|
||||||
console.error('[AudioEvents] WebSocket error:', event);
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
// Subscribe to audio events
|
|
||||||
const subscribe = useCallback(() => {
|
|
||||||
if (readyState === ReadyState.OPEN && !subscriptionSent.current) {
|
|
||||||
const subscribeMessage = {
|
|
||||||
type: 'subscribe-audio-events',
|
|
||||||
data: {}
|
|
||||||
};
|
|
||||||
|
|
||||||
sendMessage(JSON.stringify(subscribeMessage));
|
|
||||||
subscriptionSent.current = true;
|
|
||||||
setIsSubscribed(true);
|
|
||||||
console.log('[AudioEvents] Subscribed to audio events');
|
|
||||||
}
|
|
||||||
}, [readyState, sendMessage]);
|
|
||||||
|
|
||||||
// Handle incoming messages
|
|
||||||
useEffect(() => {
|
|
||||||
if (lastMessage !== null) {
|
|
||||||
try {
|
|
||||||
const message = JSON.parse(lastMessage.data);
|
|
||||||
|
|
||||||
// Handle audio events
|
|
||||||
if (message.type && message.data) {
|
|
||||||
const audioEvent = message as AudioEvent;
|
|
||||||
|
|
||||||
switch (audioEvent.type) {
|
|
||||||
case 'audio-mute-changed': {
|
|
||||||
const muteData = audioEvent.data as AudioMuteData;
|
|
||||||
setAudioMuted(muteData.muted);
|
|
||||||
console.log('[AudioEvents] Audio mute changed:', muteData.muted);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'audio-metrics-update': {
|
|
||||||
const audioMetricsData = audioEvent.data as AudioMetricsData;
|
|
||||||
setAudioMetrics(audioMetricsData);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'microphone-state-changed': {
|
|
||||||
const micStateData = audioEvent.data as MicrophoneStateData;
|
|
||||||
setMicrophoneState(micStateData);
|
|
||||||
console.log('[AudioEvents] Microphone state changed:', micStateData);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'microphone-metrics-update': {
|
|
||||||
const micMetricsData = audioEvent.data as MicrophoneMetricsData;
|
|
||||||
setMicrophoneMetrics(micMetricsData);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
default:
|
|
||||||
// Ignore other message types (WebRTC signaling, etc.)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
// Ignore parsing errors for non-JSON messages (like "pong")
|
|
||||||
if (lastMessage.data !== 'pong') {
|
|
||||||
console.warn('[AudioEvents] Failed to parse WebSocket message:', error);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}, [lastMessage]);
|
|
||||||
|
|
||||||
// Auto-subscribe when connected
|
|
||||||
useEffect(() => {
|
|
||||||
if (readyState === ReadyState.OPEN && !subscriptionSent.current) {
|
|
||||||
subscribe();
|
|
||||||
}
|
|
||||||
}, [readyState, subscribe]);
|
|
||||||
|
|
||||||
// Unsubscribe from audio events (connection will be cleaned up automatically)
|
|
||||||
const unsubscribe = useCallback(() => {
|
|
||||||
setIsSubscribed(false);
|
|
||||||
subscriptionSent.current = false;
|
|
||||||
console.log('[AudioEvents] Unsubscribed from audio events');
|
|
||||||
}, []);
|
|
||||||
|
|
||||||
return {
|
|
||||||
// Connection state
|
|
||||||
connectionState: readyState,
|
|
||||||
isConnected: readyState === ReadyState.OPEN && isSubscribed,
|
|
||||||
|
|
||||||
// Audio state
|
|
||||||
audioMuted,
|
|
||||||
audioMetrics,
|
|
||||||
|
|
||||||
// Microphone state
|
|
||||||
microphoneState,
|
|
||||||
microphoneMetrics,
|
|
||||||
|
|
||||||
// Manual subscription control
|
|
||||||
subscribe,
|
|
||||||
unsubscribe,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
@ -327,18 +327,11 @@ export function useMicrophone() {
|
||||||
|
|
||||||
for (let attempt = 1; attempt <= 3; attempt++) {
|
for (let attempt = 1; attempt <= 3; attempt++) {
|
||||||
try {
|
try {
|
||||||
// If this is a retry, first try to reset the backend microphone state
|
// If this is a retry, first try to stop the backend microphone to reset state
|
||||||
if (attempt > 1) {
|
if (attempt > 1) {
|
||||||
console.log(`Backend start attempt ${attempt}, first trying to reset backend state...`);
|
console.log(`Backend start attempt ${attempt}, first trying to reset backend state...`);
|
||||||
try {
|
try {
|
||||||
// Try the new reset endpoint first
|
await api.POST("/microphone/stop", {});
|
||||||
const resetResp = await api.POST("/microphone/reset", {});
|
|
||||||
if (resetResp.ok) {
|
|
||||||
console.log("Backend reset successful");
|
|
||||||
} else {
|
|
||||||
// Fallback to stop
|
|
||||||
await api.POST("/microphone/stop", {});
|
|
||||||
}
|
|
||||||
// Wait a bit for the backend to reset
|
// Wait a bit for the backend to reset
|
||||||
await new Promise(resolve => setTimeout(resolve, 200));
|
await new Promise(resolve => setTimeout(resolve, 200));
|
||||||
} catch (resetError) {
|
} catch (resetError) {
|
||||||
|
|
@ -365,24 +358,6 @@ export function useMicrophone() {
|
||||||
console.log("Backend response data:", responseData);
|
console.log("Backend response data:", responseData);
|
||||||
if (responseData.status === "already running") {
|
if (responseData.status === "already running") {
|
||||||
console.info("Backend microphone was already running");
|
console.info("Backend microphone was already running");
|
||||||
|
|
||||||
// If we're on the first attempt and backend says "already running",
|
|
||||||
// but frontend thinks it's not active, this might be a stuck state
|
|
||||||
if (attempt === 1 && !isMicrophoneActive) {
|
|
||||||
console.warn("Backend reports 'already running' but frontend is not active - possible stuck state");
|
|
||||||
console.log("Attempting to reset backend state and retry...");
|
|
||||||
|
|
||||||
try {
|
|
||||||
const resetResp = await api.POST("/microphone/reset", {});
|
|
||||||
if (resetResp.ok) {
|
|
||||||
console.log("Backend reset successful, retrying start...");
|
|
||||||
await new Promise(resolve => setTimeout(resolve, 200));
|
|
||||||
continue; // Retry the start
|
|
||||||
}
|
|
||||||
} catch (resetError) {
|
|
||||||
console.warn("Failed to reset stuck backend state:", resetError);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
console.log("Backend microphone start successful");
|
console.log("Backend microphone start successful");
|
||||||
backendSuccess = true;
|
backendSuccess = true;
|
||||||
|
|
@ -482,47 +457,15 @@ export function useMicrophone() {
|
||||||
const resetBackendMicrophoneState = useCallback(async (): Promise<boolean> => {
|
const resetBackendMicrophoneState = useCallback(async (): Promise<boolean> => {
|
||||||
try {
|
try {
|
||||||
console.log("Resetting backend microphone state...");
|
console.log("Resetting backend microphone state...");
|
||||||
const response = await api.POST("/microphone/reset", {});
|
await api.POST("/microphone/stop", {});
|
||||||
|
// Wait for backend to process the stop
|
||||||
if (response.ok) {
|
await new Promise(resolve => setTimeout(resolve, 300));
|
||||||
const data = await response.json();
|
return true;
|
||||||
console.log("Backend microphone reset successful:", data);
|
|
||||||
|
|
||||||
// Update frontend state to match backend
|
|
||||||
setMicrophoneActive(false);
|
|
||||||
setMicrophoneMuted(false);
|
|
||||||
|
|
||||||
// Clean up any orphaned streams
|
|
||||||
if (microphoneStreamRef.current) {
|
|
||||||
console.log("Cleaning up orphaned stream after reset");
|
|
||||||
await stopMicrophoneStream();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait a bit for everything to settle
|
|
||||||
await new Promise(resolve => setTimeout(resolve, 200));
|
|
||||||
|
|
||||||
// Sync state to ensure consistency
|
|
||||||
await syncMicrophoneState();
|
|
||||||
|
|
||||||
return true;
|
|
||||||
} else {
|
|
||||||
console.error("Backend microphone reset failed:", response.status);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.warn("Failed to reset backend microphone state:", error);
|
console.warn("Failed to reset backend microphone state:", error);
|
||||||
// Fallback to old method
|
return false;
|
||||||
try {
|
|
||||||
console.log("Trying fallback reset method...");
|
|
||||||
await api.POST("/microphone/stop", {});
|
|
||||||
await new Promise(resolve => setTimeout(resolve, 300));
|
|
||||||
return true;
|
|
||||||
} catch (fallbackError) {
|
|
||||||
console.error("Fallback reset also failed:", fallbackError);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}, [setMicrophoneActive, setMicrophoneMuted, stopMicrophoneStream, syncMicrophoneState]);
|
}, []);
|
||||||
|
|
||||||
// Stop microphone
|
// Stop microphone
|
||||||
const stopMicrophone = useCallback(async (): Promise<{ success: boolean; error?: MicrophoneError }> => {
|
const stopMicrophone = useCallback(async (): Promise<{ success: boolean; error?: MicrophoneError }> => {
|
||||||
|
|
|
||||||
51
web.go
51
web.go
|
|
@ -173,11 +173,6 @@ func setupRouter() *gin.Engine {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
audio.SetAudioMuted(req.Muted)
|
audio.SetAudioMuted(req.Muted)
|
||||||
|
|
||||||
// Broadcast audio mute state change via WebSocket
|
|
||||||
broadcaster := audio.GetAudioEventBroadcaster()
|
|
||||||
broadcaster.BroadcastAudioMuteChanged(req.Muted)
|
|
||||||
|
|
||||||
c.JSON(200, gin.H{"muted": req.Muted})
|
c.JSON(200, gin.H{"muted": req.Muted})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
@ -311,10 +306,6 @@ func setupRouter() *gin.Engine {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Broadcast microphone state change via WebSocket
|
|
||||||
broadcaster := audio.GetAudioEventBroadcaster()
|
|
||||||
broadcaster.BroadcastMicrophoneStateChanged(true, true)
|
|
||||||
|
|
||||||
c.JSON(200, gin.H{
|
c.JSON(200, gin.H{
|
||||||
"status": "started",
|
"status": "started",
|
||||||
"running": currentSession.AudioInputManager.IsRunning(),
|
"running": currentSession.AudioInputManager.IsRunning(),
|
||||||
|
|
@ -346,10 +337,6 @@ func setupRouter() *gin.Engine {
|
||||||
// Also stop the non-blocking audio input specifically
|
// Also stop the non-blocking audio input specifically
|
||||||
audio.StopNonBlockingAudioInput()
|
audio.StopNonBlockingAudioInput()
|
||||||
|
|
||||||
// Broadcast microphone state change via WebSocket
|
|
||||||
broadcaster := audio.GetAudioEventBroadcaster()
|
|
||||||
broadcaster.BroadcastMicrophoneStateChanged(false, true)
|
|
||||||
|
|
||||||
c.JSON(200, gin.H{
|
c.JSON(200, gin.H{
|
||||||
"status": "stopped",
|
"status": "stopped",
|
||||||
"running": currentSession.AudioInputManager.IsRunning(),
|
"running": currentSession.AudioInputManager.IsRunning(),
|
||||||
|
|
@ -398,37 +385,6 @@ func setupRouter() *gin.Engine {
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
protected.POST("/microphone/reset", func(c *gin.Context) {
|
|
||||||
if currentSession == nil {
|
|
||||||
c.JSON(400, gin.H{"error": "no active session"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if currentSession.AudioInputManager == nil {
|
|
||||||
c.JSON(500, gin.H{"error": "audio input manager not available"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Info().Msg("forcing microphone state reset")
|
|
||||||
|
|
||||||
// Force stop both the AudioInputManager and NonBlockingAudioManager
|
|
||||||
currentSession.AudioInputManager.Stop()
|
|
||||||
audio.StopNonBlockingAudioInput()
|
|
||||||
|
|
||||||
// Wait a bit to ensure everything is stopped
|
|
||||||
time.Sleep(100 * time.Millisecond)
|
|
||||||
|
|
||||||
// Broadcast microphone state change via WebSocket
|
|
||||||
broadcaster := audio.GetAudioEventBroadcaster()
|
|
||||||
broadcaster.BroadcastMicrophoneStateChanged(false, true)
|
|
||||||
|
|
||||||
c.JSON(200, gin.H{
|
|
||||||
"status": "reset",
|
|
||||||
"audio_input_running": currentSession.AudioInputManager.IsRunning(),
|
|
||||||
"nonblocking_input_running": audio.IsNonBlockingAudioInputRunning(),
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
// Catch-all route for SPA
|
// Catch-all route for SPA
|
||||||
r.NoRoute(func(c *gin.Context) {
|
r.NoRoute(func(c *gin.Context) {
|
||||||
if c.Request.Method == "GET" && c.NegotiateFormat(gin.MIMEHTML) == gin.MIMEHTML {
|
if c.Request.Method == "GET" && c.NegotiateFormat(gin.MIMEHTML) == gin.MIMEHTML {
|
||||||
|
|
@ -577,9 +533,6 @@ func handleWebRTCSignalWsMessages(
|
||||||
if isCloudConnection {
|
if isCloudConnection {
|
||||||
setCloudConnectionState(CloudConnectionStateDisconnected)
|
setCloudConnectionState(CloudConnectionStateDisconnected)
|
||||||
}
|
}
|
||||||
// Clean up audio event subscription
|
|
||||||
broadcaster := audio.GetAudioEventBroadcaster()
|
|
||||||
broadcaster.Unsubscribe(connectionID)
|
|
||||||
cancelRun()
|
cancelRun()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|
@ -737,10 +690,6 @@ func handleWebRTCSignalWsMessages(
|
||||||
if err = currentSession.peerConnection.AddICECandidate(candidate); err != nil {
|
if err = currentSession.peerConnection.AddICECandidate(candidate); err != nil {
|
||||||
l.Warn().Str("error", err.Error()).Msg("failed to add incoming ICE candidate to our peer connection")
|
l.Warn().Str("error", err.Error()).Msg("failed to add incoming ICE candidate to our peer connection")
|
||||||
}
|
}
|
||||||
} else if message.Type == "subscribe-audio-events" {
|
|
||||||
l.Info().Msg("client subscribing to audio events")
|
|
||||||
broadcaster := audio.GetAudioEventBroadcaster()
|
|
||||||
broadcaster.Subscribe(connectionID, wsCon, runCtx, &l)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue