Compare commits

..

No commits in common. "8fb0b9f9c6790e7b4ed66a21cf75b0eedef97d54" and "fff2d2b7910e9916dfb03ee8d63e6b4f01dfc86f" have entirely different histories.

46 changed files with 613 additions and 4823 deletions

View File

@ -11,27 +11,7 @@ import (
"github.com/rs/zerolog"
)
// AdaptiveBufferConfig holds configuration for the adaptive buffer sizing algorithm.
//
// The adaptive buffer system dynamically adjusts audio buffer sizes based on real-time
// system conditions to optimize the trade-off between latency and stability. The algorithm
// uses multiple factors to make decisions:
//
// 1. System Load Monitoring:
// - CPU usage: High CPU load increases buffer sizes to prevent underruns
// - Memory usage: High memory pressure reduces buffer sizes to conserve RAM
//
// 2. Latency Tracking:
// - Target latency: Optimal latency for the current quality setting
// - Max latency: Hard limit beyond which buffers are aggressively reduced
//
// 3. Adaptation Strategy:
// - Exponential smoothing: Prevents oscillation and provides stable adjustments
// - Discrete steps: Buffer sizes change in fixed increments to avoid instability
// - Hysteresis: Different thresholds for increasing vs decreasing buffer sizes
//
// The algorithm is specifically tuned for embedded ARM systems with limited resources,
// prioritizing stability over absolute minimum latency.
// AdaptiveBufferConfig holds configuration for adaptive buffer sizing
type AdaptiveBufferConfig struct {
// Buffer size limits (in frames)
MinBufferSize int
@ -176,32 +156,6 @@ func (abm *AdaptiveBufferManager) adaptationLoop() {
}
// adaptBufferSizes analyzes system conditions and adjusts buffer sizes
// adaptBufferSizes implements the core adaptive buffer sizing algorithm.
//
// This function uses a multi-factor approach to determine optimal buffer sizes:
//
// Mathematical Model:
// 1. Factor Calculation:
//
// - CPU Factor: Sigmoid function that increases buffer size under high CPU load
//
// - Memory Factor: Inverse relationship that decreases buffer size under memory pressure
//
// - Latency Factor: Exponential decay that aggressively reduces buffers when latency exceeds targets
//
// 2. Combined Factor:
// Combined = (CPU_factor * Memory_factor * Latency_factor)
// This multiplicative approach ensures any single critical factor can override others
//
// 3. Exponential Smoothing:
// New_size = Current_size + smoothing_factor * (Target_size - Current_size)
// This prevents rapid oscillations and provides stable convergence
//
// 4. Discrete Quantization:
// Final sizes are rounded to frame boundaries and clamped to configured limits
//
// The algorithm runs periodically and only applies changes when the adaptation interval
// has elapsed, preventing excessive adjustments that could destabilize the audio pipeline.
func (abm *AdaptiveBufferManager) adaptBufferSizes() {
// Collect current system metrics
metrics := abm.processMonitor.GetCurrentMetrics()

View File

@ -45,7 +45,7 @@ func DefaultOptimizerConfig() OptimizerConfig {
CooldownPeriod: GetConfig().CooldownPeriod,
Aggressiveness: GetConfig().OptimizerAggressiveness,
RollbackThreshold: GetConfig().RollbackThreshold,
StabilityPeriod: GetConfig().AdaptiveOptimizerStability,
StabilityPeriod: 10 * time.Second,
}
}

View File

@ -9,7 +9,7 @@ import (
var (
// Global audio output supervisor instance
globalOutputSupervisor unsafe.Pointer // *AudioOutputSupervisor
globalOutputSupervisor unsafe.Pointer // *AudioServerSupervisor
)
// isAudioServerProcess detects if we're running as the audio server subprocess
@ -58,15 +58,15 @@ func StopNonBlockingAudioStreaming() {
}
// SetAudioOutputSupervisor sets the global audio output supervisor
func SetAudioOutputSupervisor(supervisor *AudioOutputSupervisor) {
func SetAudioOutputSupervisor(supervisor *AudioServerSupervisor) {
atomic.StorePointer(&globalOutputSupervisor, unsafe.Pointer(supervisor))
}
// GetAudioOutputSupervisor returns the global audio output supervisor
func GetAudioOutputSupervisor() *AudioOutputSupervisor {
func GetAudioOutputSupervisor() *AudioServerSupervisor {
ptr := atomic.LoadPointer(&globalOutputSupervisor)
if ptr == nil {
return nil
}
return (*AudioOutputSupervisor)(ptr)
return (*AudioServerSupervisor)(ptr)
}

View File

@ -1,204 +0,0 @@
package audio
import (
"sync/atomic"
"time"
)
// AtomicCounter provides thread-safe counter operations
type AtomicCounter struct {
value int64
}
// NewAtomicCounter creates a new atomic counter
func NewAtomicCounter() *AtomicCounter {
return &AtomicCounter{}
}
// Add atomically adds delta to the counter and returns the new value
func (c *AtomicCounter) Add(delta int64) int64 {
return atomic.AddInt64(&c.value, delta)
}
// Increment atomically increments the counter by 1
func (c *AtomicCounter) Increment() int64 {
return atomic.AddInt64(&c.value, 1)
}
// Load atomically loads the counter value
func (c *AtomicCounter) Load() int64 {
return atomic.LoadInt64(&c.value)
}
// Store atomically stores a new value
func (c *AtomicCounter) Store(value int64) {
atomic.StoreInt64(&c.value, value)
}
// Reset atomically resets the counter to zero
func (c *AtomicCounter) Reset() {
atomic.StoreInt64(&c.value, 0)
}
// Swap atomically swaps the value and returns the old value
func (c *AtomicCounter) Swap(new int64) int64 {
return atomic.SwapInt64(&c.value, new)
}
// FrameMetrics provides common frame tracking metrics
type FrameMetrics struct {
Total *AtomicCounter
Dropped *AtomicCounter
Bytes *AtomicCounter
}
// NewFrameMetrics creates a new frame metrics tracker
func NewFrameMetrics() *FrameMetrics {
return &FrameMetrics{
Total: NewAtomicCounter(),
Dropped: NewAtomicCounter(),
Bytes: NewAtomicCounter(),
}
}
// RecordFrame atomically records a successful frame with its size
func (fm *FrameMetrics) RecordFrame(size int64) {
fm.Total.Increment()
fm.Bytes.Add(size)
}
// RecordDrop atomically records a dropped frame
func (fm *FrameMetrics) RecordDrop() {
fm.Dropped.Increment()
}
// GetStats returns current metrics values
func (fm *FrameMetrics) GetStats() (total, dropped, bytes int64) {
return fm.Total.Load(), fm.Dropped.Load(), fm.Bytes.Load()
}
// Reset resets all metrics to zero
func (fm *FrameMetrics) Reset() {
fm.Total.Reset()
fm.Dropped.Reset()
fm.Bytes.Reset()
}
// GetDropRate calculates the drop rate as a percentage
func (fm *FrameMetrics) GetDropRate() float64 {
total := fm.Total.Load()
if total == 0 {
return 0.0
}
dropped := fm.Dropped.Load()
return float64(dropped) / float64(total) * 100.0
}
// LatencyTracker provides atomic latency tracking
type LatencyTracker struct {
current *AtomicCounter
min *AtomicCounter
max *AtomicCounter
average *AtomicCounter
samples *AtomicCounter
}
// NewLatencyTracker creates a new latency tracker
func NewLatencyTracker() *LatencyTracker {
lt := &LatencyTracker{
current: NewAtomicCounter(),
min: NewAtomicCounter(),
max: NewAtomicCounter(),
average: NewAtomicCounter(),
samples: NewAtomicCounter(),
}
// Initialize min to max value so first measurement sets it properly
lt.min.Store(int64(^uint64(0) >> 1)) // Max int64
return lt
}
// RecordLatency atomically records a new latency measurement
func (lt *LatencyTracker) RecordLatency(latency time.Duration) {
latencyNanos := latency.Nanoseconds()
lt.current.Store(latencyNanos)
lt.samples.Increment()
// Update min
for {
oldMin := lt.min.Load()
if latencyNanos >= oldMin {
break
}
if atomic.CompareAndSwapInt64(&lt.min.value, oldMin, latencyNanos) {
break
}
}
// Update max
for {
oldMax := lt.max.Load()
if latencyNanos <= oldMax {
break
}
if atomic.CompareAndSwapInt64(&lt.max.value, oldMax, latencyNanos) {
break
}
}
// Update average using exponential moving average
oldAvg := lt.average.Load()
newAvg := (oldAvg*7 + latencyNanos) / 8 // 87.5% weight to old average
lt.average.Store(newAvg)
}
// GetLatencyStats returns current latency statistics
func (lt *LatencyTracker) GetLatencyStats() (current, min, max, average time.Duration, samples int64) {
return time.Duration(lt.current.Load()),
time.Duration(lt.min.Load()),
time.Duration(lt.max.Load()),
time.Duration(lt.average.Load()),
lt.samples.Load()
}
// PoolMetrics provides common pool performance metrics
type PoolMetrics struct {
Hits *AtomicCounter
Misses *AtomicCounter
}
// NewPoolMetrics creates a new pool metrics tracker
func NewPoolMetrics() *PoolMetrics {
return &PoolMetrics{
Hits: NewAtomicCounter(),
Misses: NewAtomicCounter(),
}
}
// RecordHit atomically records a pool hit
func (pm *PoolMetrics) RecordHit() {
pm.Hits.Increment()
}
// RecordMiss atomically records a pool miss
func (pm *PoolMetrics) RecordMiss() {
pm.Misses.Increment()
}
// GetHitRate calculates the hit rate as a percentage
func (pm *PoolMetrics) GetHitRate() float64 {
hits := pm.Hits.Load()
misses := pm.Misses.Load()
total := hits + misses
if total == 0 {
return 0.0
}
return float64(hits) / float64(total) * 100.0
}
// GetStats returns hit and miss counts
func (pm *PoolMetrics) GetStats() (hits, misses int64, hitRate float64) {
hits = pm.Hits.Load()
misses = pm.Misses.Load()
hitRate = pm.GetHitRate()
return
}

View File

@ -40,8 +40,7 @@ func NewAudioBufferPool(bufferSize int) *AudioBufferPool {
preallocSize: preallocSize,
pool: sync.Pool{
New: func() interface{} {
buf := make([]byte, 0, bufferSize)
return &buf
return make([]byte, 0, bufferSize)
},
},
}

View File

@ -61,15 +61,12 @@ static volatile int capture_initialized = 0;
static volatile int playback_initializing = 0;
static volatile int playback_initialized = 0;
// Enhanced ALSA device opening with exponential backoff retry logic
// Safe ALSA device opening with retry logic
static int safe_alsa_open(snd_pcm_t **handle, const char *device, snd_pcm_stream_t stream) {
int max_attempts = 5; // Increased from 3 to 5
int attempt = 0;
int attempts = 3;
int err;
int backoff_us = sleep_microseconds; // Start with base sleep time
const int max_backoff_us = 500000; // Max 500ms backoff
while (attempt < max_attempts) {
while (attempts-- > 0) {
err = snd_pcm_open(handle, device, stream, SND_PCM_NONBLOCK);
if (err >= 0) {
// Switch to blocking mode after successful open
@ -77,26 +74,12 @@ static int safe_alsa_open(snd_pcm_t **handle, const char *device, snd_pcm_stream
return 0;
}
attempt++;
if (attempt >= max_attempts) break;
// Enhanced error handling with specific retry strategies
if (err == -EBUSY || err == -EAGAIN) {
// Device busy or temporarily unavailable - retry with backoff
usleep(backoff_us);
backoff_us = (backoff_us * 2 < max_backoff_us) ? backoff_us * 2 : max_backoff_us;
} else if (err == -ENODEV || err == -ENOENT) {
// Device not found - longer wait as device might be initializing
usleep(backoff_us * 2);
backoff_us = (backoff_us * 2 < max_backoff_us) ? backoff_us * 2 : max_backoff_us;
} else if (err == -EPERM || err == -EACCES) {
// Permission denied - shorter wait, likely persistent issue
usleep(backoff_us / 2);
} else {
// Other errors - standard backoff
usleep(backoff_us);
backoff_us = (backoff_us * 2 < max_backoff_us) ? backoff_us * 2 : max_backoff_us;
if (err == -EBUSY && attempts > 0) {
// Device busy, wait and retry
usleep(sleep_microseconds); // 50ms
continue;
}
break;
}
return err;
}
@ -234,114 +217,43 @@ int jetkvm_audio_init() {
return 0;
}
// jetkvm_audio_read_encode reads one audio frame from ALSA, encodes it with Opus, and handles errors.
//
// This function implements a robust audio capture pipeline with the following features:
// - ALSA PCM capture with automatic device recovery
// - Opus encoding with optimized settings for real-time processing
// - Progressive error recovery with exponential backoff
// - Buffer underrun and device suspension handling
//
// Error Recovery Strategy:
// 1. EPIPE (buffer underrun): Prepare device and retry with progressive delays
// 2. ESTRPIPE (device suspended): Resume device with timeout and fallback to prepare
// 3. Other errors: Log and attempt recovery up to max_recovery_attempts
//
// Performance Optimizations:
// - Stack-allocated PCM buffer to avoid heap allocations
// - Direct memory access for Opus encoding
// - Minimal system calls in the hot path
//
// Parameters:
// opus_buf: Output buffer for encoded Opus data (must be at least max_packet_size bytes)
//
// Returns:
// >0: Number of bytes written to opus_buf
// -1: Initialization error or safety check failure
// -2: Unrecoverable ALSA or Opus error after all retry attempts
// Read and encode one frame with enhanced error handling
int jetkvm_audio_read_encode(void *opus_buf) {
short pcm_buffer[1920]; // max 2ch*960
unsigned char *out = (unsigned char*)opus_buf;
int err = 0;
int recovery_attempts = 0;
const int max_recovery_attempts = 3;
// Safety checks
if (!capture_initialized || !pcm_handle || !encoder || !opus_buf) {
return -1;
}
retry_read:
;
int pcm_rc = snd_pcm_readi(pcm_handle, pcm_buffer, frame_size);
// Handle ALSA errors with robust recovery strategies
// Handle ALSA errors with enhanced recovery
if (pcm_rc < 0) {
if (pcm_rc == -EPIPE) {
// Buffer underrun - implement progressive recovery
recovery_attempts++;
if (recovery_attempts > max_recovery_attempts) {
return -1; // Give up after max attempts
}
// Try to recover with prepare
// Buffer underrun - try to recover
err = snd_pcm_prepare(pcm_handle);
if (err < 0) {
// If prepare fails, try drop and prepare
snd_pcm_drop(pcm_handle);
err = snd_pcm_prepare(pcm_handle);
if (err < 0) return -1;
}
if (err < 0) return -1;
// Wait before retry to allow device to stabilize
usleep(sleep_microseconds * recovery_attempts);
goto retry_read;
pcm_rc = snd_pcm_readi(pcm_handle, pcm_buffer, frame_size);
if (pcm_rc < 0) return -1;
} else if (pcm_rc == -EAGAIN) {
// No data available - return 0 to indicate no frame
return 0;
} else if (pcm_rc == -ESTRPIPE) {
// Device suspended, implement robust resume logic
recovery_attempts++;
if (recovery_attempts > max_recovery_attempts) {
return -1;
}
// Try to resume with timeout
int resume_attempts = 0;
while ((err = snd_pcm_resume(pcm_handle)) == -EAGAIN && resume_attempts < 10) {
usleep(sleep_microseconds);
resume_attempts++;
// Device suspended, try to resume
while ((err = snd_pcm_resume(pcm_handle)) == -EAGAIN) {
usleep(sleep_microseconds); // Use centralized constant
}
if (err < 0) {
// Resume failed, try prepare as fallback
err = snd_pcm_prepare(pcm_handle);
if (err < 0) return -1;
}
// Wait before retry to allow device to stabilize
usleep(sleep_microseconds * recovery_attempts);
return 0; // Skip this frame but don't fail
} else if (pcm_rc == -ENODEV) {
// Device disconnected - critical error
return -1;
} else if (pcm_rc == -EIO) {
// I/O error - try recovery once
recovery_attempts++;
if (recovery_attempts <= max_recovery_attempts) {
snd_pcm_drop(pcm_handle);
err = snd_pcm_prepare(pcm_handle);
if (err >= 0) {
usleep(sleep_microseconds);
goto retry_read;
}
}
return -1;
return 0; // Skip this frame
} else {
// Other errors - limited retry for transient issues
recovery_attempts++;
if (recovery_attempts <= 1 && (pcm_rc == -EINTR || pcm_rc == -EBUSY)) {
usleep(sleep_microseconds / 2);
goto retry_read;
}
// Other error - return error code
return -1;
}
}
@ -415,38 +327,11 @@ int jetkvm_audio_playback_init() {
return 0;
}
// jetkvm_audio_decode_write decodes Opus data and writes PCM to ALSA playback device.
//
// This function implements a robust audio playback pipeline with the following features:
// - Opus decoding with packet loss concealment
// - ALSA PCM playback with automatic device recovery
// - Progressive error recovery with exponential backoff
// - Buffer underrun and device suspension handling
//
// Error Recovery Strategy:
// 1. EPIPE (buffer underrun): Prepare device, optionally drop+prepare, retry with delays
// 2. ESTRPIPE (device suspended): Resume with timeout, fallback to prepare if needed
// 3. Opus decode errors: Attempt packet loss concealment before failing
//
// Performance Optimizations:
// - Stack-allocated PCM buffer to minimize heap allocations
// - Bounds checking to prevent buffer overruns
// - Direct ALSA device access for minimal latency
//
// Parameters:
// opus_buf: Input buffer containing Opus-encoded audio data
// opus_size: Size of the Opus data in bytes (must be > 0 and <= max_packet_size)
//
// Returns:
// 0: Success - audio frame decoded and written to playback device
// -1: Invalid parameters, initialization error, or bounds check failure
// -2: Unrecoverable ALSA or Opus error after all retry attempts
// Decode Opus and write PCM with enhanced error handling
int jetkvm_audio_decode_write(void *opus_buf, int opus_size) {
short pcm_buffer[1920]; // max 2ch*960
unsigned char *in = (unsigned char*)opus_buf;
int err = 0;
int recovery_attempts = 0;
const int max_recovery_attempts = 3;
// Safety checks
if (!playback_initialized || !pcm_playback_handle || !decoder || !opus_buf || opus_size <= 0) {
@ -458,91 +343,31 @@ int jetkvm_audio_decode_write(void *opus_buf, int opus_size) {
return -1;
}
// Decode Opus to PCM with error handling
// Decode Opus to PCM
int pcm_frames = opus_decode(decoder, in, opus_size, pcm_buffer, frame_size, 0);
if (pcm_frames < 0) {
// Try packet loss concealment on decode error
pcm_frames = opus_decode(decoder, NULL, 0, pcm_buffer, frame_size, 0);
if (pcm_frames < 0) return -1;
}
if (pcm_frames < 0) return -1;
retry_write:
;
// Write PCM to playback device with robust recovery
// Write PCM to playback device with enhanced recovery
int pcm_rc = snd_pcm_writei(pcm_playback_handle, pcm_buffer, pcm_frames);
if (pcm_rc < 0) {
if (pcm_rc == -EPIPE) {
// Buffer underrun - implement progressive recovery
recovery_attempts++;
if (recovery_attempts > max_recovery_attempts) {
return -2;
}
// Try to recover with prepare
// Buffer underrun - try to recover
err = snd_pcm_prepare(pcm_playback_handle);
if (err < 0) {
// If prepare fails, try drop and prepare
snd_pcm_drop(pcm_playback_handle);
err = snd_pcm_prepare(pcm_playback_handle);
if (err < 0) return -2;
}
if (err < 0) return -2;
// Wait before retry to allow device to stabilize
usleep(sleep_microseconds * recovery_attempts);
goto retry_write;
pcm_rc = snd_pcm_writei(pcm_playback_handle, pcm_buffer, pcm_frames);
} else if (pcm_rc == -ESTRPIPE) {
// Device suspended, implement robust resume logic
recovery_attempts++;
if (recovery_attempts > max_recovery_attempts) {
return -2;
}
// Try to resume with timeout
int resume_attempts = 0;
while ((err = snd_pcm_resume(pcm_playback_handle)) == -EAGAIN && resume_attempts < 10) {
usleep(sleep_microseconds);
resume_attempts++;
// Device suspended, try to resume
while ((err = snd_pcm_resume(pcm_playback_handle)) == -EAGAIN) {
usleep(sleep_microseconds); // Use centralized constant
}
if (err < 0) {
// Resume failed, try prepare as fallback
err = snd_pcm_prepare(pcm_playback_handle);
if (err < 0) return -2;
}
// Wait before retry to allow device to stabilize
usleep(sleep_microseconds * recovery_attempts);
return 0; // Skip this frame but don't fail
} else if (pcm_rc == -ENODEV) {
// Device disconnected - critical error
return -2;
} else if (pcm_rc == -EIO) {
// I/O error - try recovery once
recovery_attempts++;
if (recovery_attempts <= max_recovery_attempts) {
snd_pcm_drop(pcm_playback_handle);
err = snd_pcm_prepare(pcm_playback_handle);
if (err >= 0) {
usleep(sleep_microseconds);
goto retry_write;
}
}
return -2;
} else if (pcm_rc == -EAGAIN) {
// Device not ready - brief wait and retry
recovery_attempts++;
if (recovery_attempts <= max_recovery_attempts) {
usleep(sleep_microseconds / 4);
goto retry_write;
}
return -2;
} else {
// Other errors - limited retry for transient issues
recovery_attempts++;
if (recovery_attempts <= 1 && (pcm_rc == -EINTR || pcm_rc == -EBUSY)) {
usleep(sleep_microseconds / 2);
goto retry_write;
}
return -2;
return 0; // Skip this frame
}
if (pcm_rc < 0) return -2;
}
return pcm_frames;

View File

@ -881,12 +881,6 @@ type AudioConfigConstants struct {
// Default 5s provides responsive input monitoring.
InputSupervisorTimeout time.Duration // 5s
// OutputSupervisorTimeout defines timeout for output supervisor operations.
// Used in: supervisor.go for output process monitoring
// Impact: Shorter timeouts improve output responsiveness but may cause false timeouts.
// Default 5s provides responsive output monitoring.
OutputSupervisorTimeout time.Duration // 5s
// ShortTimeout defines brief timeout for time-critical operations.
// Used in: Real-time audio processing for minimal timeout scenarios
// Impact: Very short timeouts ensure responsiveness but may cause premature failures.
@ -1388,201 +1382,6 @@ type AudioConfigConstants struct {
// Impact: Controls scaling factor for memory influence on buffer sizing.
// Default 100 provides standard percentage scaling for memory calculations.
AdaptiveBufferMemoryMultiplier int
// Socket Names - Configuration for IPC socket file names
// Used in: IPC communication for audio input/output
// Impact: Controls socket file naming and IPC connection endpoints
// InputSocketName defines the socket file name for audio input IPC.
// Used in: input_ipc.go for microphone input communication
// Impact: Must be unique to prevent conflicts with other audio sockets.
// Default "audio_input.sock" provides clear identification for input socket.
InputSocketName string
// OutputSocketName defines the socket file name for audio output IPC.
// Used in: ipc.go for audio output communication
// Impact: Must be unique to prevent conflicts with other audio sockets.
// Default "audio_output.sock" provides clear identification for output socket.
OutputSocketName string
// Component Names - Standardized component identifiers for logging
// Used in: Logging and monitoring throughout audio system
// Impact: Provides consistent component identification across logs
// AudioInputComponentName defines component name for audio input logging.
// Used in: input_ipc.go and related input processing components
// Impact: Ensures consistent logging identification for input components.
// Default "audio-input" provides clear component identification.
AudioInputComponentName string
// AudioOutputComponentName defines component name for audio output logging.
// Used in: ipc.go and related output processing components
// Impact: Ensures consistent logging identification for output components.
// Default "audio-output" provides clear component identification.
AudioOutputComponentName string
// AudioServerComponentName defines component name for audio server logging.
// Used in: supervisor.go and server management components
// Impact: Ensures consistent logging identification for server components.
// Default "audio-server" provides clear component identification.
AudioServerComponentName string
// AudioRelayComponentName defines component name for audio relay logging.
// Used in: relay.go for audio relay operations
// Impact: Ensures consistent logging identification for relay components.
// Default "audio-relay" provides clear component identification.
AudioRelayComponentName string
// AudioEventsComponentName defines component name for audio events logging.
// Used in: events.go for event broadcasting operations
// Impact: Ensures consistent logging identification for event components.
// Default "audio-events" provides clear component identification.
AudioEventsComponentName string
// Test Configuration - Constants for testing scenarios
// Used in: Test files for consistent test configuration
// Impact: Provides standardized test parameters and timeouts
// TestSocketTimeout defines timeout for test socket operations.
// Used in: integration_test.go for test socket communication
// Impact: Prevents test hangs while allowing sufficient time for operations.
// Default 100ms provides quick test execution with adequate timeout.
TestSocketTimeout time.Duration
// TestBufferSize defines buffer size for test operations.
// Used in: test_utils.go for test buffer allocation
// Impact: Provides adequate buffer space for test scenarios.
// Default 4096 bytes matches production buffer sizes for realistic testing.
TestBufferSize int
// TestRetryDelay defines delay between test retry attempts.
// Used in: Test files for retry logic in test scenarios
// Impact: Provides reasonable delay for test retry operations.
// Default 200ms allows sufficient time for test state changes.
TestRetryDelay time.Duration
// Latency Histogram Configuration - Constants for latency tracking
// Used in: granular_metrics.go for latency distribution analysis
// Impact: Controls granularity and accuracy of latency measurements
// LatencyHistogramMaxSamples defines maximum samples for latency tracking.
// Used in: granular_metrics.go for latency histogram management
// Impact: Controls memory usage and accuracy of latency statistics.
// Default 1000 samples provides good statistical accuracy with reasonable memory usage.
LatencyHistogramMaxSamples int
// LatencyPercentile50 defines 50th percentile calculation factor.
// Used in: granular_metrics.go for median latency calculation
// Impact: Must be 50 for accurate median calculation.
// Default 50 provides standard median percentile calculation.
LatencyPercentile50 int
// LatencyPercentile95 defines 95th percentile calculation factor.
// Used in: granular_metrics.go for high-percentile latency calculation
// Impact: Must be 95 for accurate 95th percentile calculation.
// Default 95 provides standard high-percentile calculation.
LatencyPercentile95 int
// LatencyPercentile99 defines 99th percentile calculation factor.
// Used in: granular_metrics.go for extreme latency calculation
// Impact: Must be 99 for accurate 99th percentile calculation.
// Default 99 provides standard extreme percentile calculation.
LatencyPercentile99 int
// BufferPoolMaxOperations defines maximum operations to track for efficiency.
// Used in: granular_metrics.go for buffer pool efficiency tracking
// Impact: Controls memory usage and accuracy of efficiency statistics.
// Default 1000 operations provides good balance of accuracy and memory usage.
BufferPoolMaxOperations int
// HitRateCalculationBase defines base value for hit rate percentage calculation.
// Used in: granular_metrics.go for hit rate percentage calculation
// Impact: Must be 100 for accurate percentage calculation.
// Default 100 provides standard percentage calculation base.
HitRateCalculationBase float64
// Validation Constants - Configuration for input validation
// Used in: validation.go for parameter validation
// Impact: Controls validation thresholds and limits
// MaxLatency defines maximum allowed latency for audio processing.
// Used in: validation.go for latency validation
// Impact: Controls maximum acceptable latency before optimization triggers.
// Default 200ms provides reasonable upper bound for real-time audio.
MaxLatency time.Duration
// MinMetricsUpdateInterval defines minimum allowed metrics update interval.
// Used in: validation.go for metrics interval validation
// Impact: Prevents excessive metrics updates that could impact performance.
// Default 100ms provides reasonable minimum update frequency.
MinMetricsUpdateInterval time.Duration
// MaxMetricsUpdateInterval defines maximum allowed metrics update interval.
// Used in: validation.go for metrics interval validation
// Impact: Ensures metrics are updated frequently enough for monitoring.
// Default 30s provides reasonable maximum update interval.
MaxMetricsUpdateInterval time.Duration
// MinSampleRate defines minimum allowed audio sample rate.
// Used in: validation.go for sample rate validation
// Impact: Ensures sample rate is sufficient for audio quality.
// Default 8000Hz provides minimum for voice communication.
MinSampleRate int
// MaxSampleRate defines maximum allowed audio sample rate.
// Used in: validation.go for sample rate validation
// Impact: Prevents excessive sample rates that could impact performance.
// Default 192000Hz provides upper bound for high-quality audio.
MaxSampleRate int
// MaxChannels defines maximum allowed audio channels.
// Used in: validation.go for channel count validation
// Impact: Prevents excessive channel counts that could impact performance.
// Default 8 channels provides reasonable upper bound for multi-channel audio.
MaxChannels int
// Device Health Monitoring Configuration
// Used in: device_health.go for proactive device monitoring and recovery
// Impact: Controls health check frequency and recovery thresholds
// HealthCheckIntervalMS defines interval between device health checks in milliseconds.
// Used in: DeviceHealthMonitor for periodic health assessment
// Impact: Lower values provide faster detection but increase CPU usage.
// Default 5000ms (5s) provides good balance between responsiveness and overhead.
HealthCheckIntervalMS int
// HealthRecoveryThreshold defines number of consecutive successful operations
// required to mark a device as healthy after being unhealthy.
// Used in: DeviceHealthMonitor for recovery state management
// Impact: Higher values prevent premature recovery declarations.
// Default 3 consecutive successes ensures stable recovery.
HealthRecoveryThreshold int
// HealthLatencyThresholdMS defines maximum acceptable latency in milliseconds
// before considering a device unhealthy.
// Used in: DeviceHealthMonitor for latency-based health assessment
// Impact: Lower values trigger recovery sooner but may cause false positives.
// Default 100ms provides reasonable threshold for real-time audio.
HealthLatencyThresholdMS int
// HealthErrorRateLimit defines maximum error rate (0.0-1.0) before
// considering a device unhealthy.
// Used in: DeviceHealthMonitor for error rate assessment
// Impact: Lower values trigger recovery sooner for error-prone devices.
// Default 0.1 (10%) allows some transient errors while detecting problems.
HealthErrorRateLimit float64
// Latency Histogram Bucket Configuration
// Used in: LatencyHistogram for granular latency measurement buckets
// Impact: Defines the boundaries for latency distribution analysis
LatencyBucket10ms time.Duration // 10ms latency bucket
LatencyBucket25ms time.Duration // 25ms latency bucket
LatencyBucket50ms time.Duration // 50ms latency bucket
LatencyBucket100ms time.Duration // 100ms latency bucket
LatencyBucket250ms time.Duration // 250ms latency bucket
LatencyBucket500ms time.Duration // 500ms latency bucket
LatencyBucket1s time.Duration // 1s latency bucket
LatencyBucket2s time.Duration // 2s latency bucket
}
// DefaultAudioConfig returns the default configuration constants
@ -2405,12 +2204,6 @@ func DefaultAudioConfig() *AudioConfigConstants {
// Default 5s (shorter than general supervisor) for faster input recovery
InputSupervisorTimeout: 5 * time.Second,
// OutputSupervisorTimeout defines timeout for output supervisor operations.
// Used in: Output process monitoring, speaker supervision
// Impact: Controls responsiveness of output failure detection
// Default 5s (shorter than general supervisor) for faster output recovery
OutputSupervisorTimeout: 5 * time.Second,
// ShortTimeout defines brief timeout for quick operations (5ms).
// Used in: Lock acquisition, quick IPC operations, immediate responses
// Impact: Critical for maintaining real-time performance
@ -2572,56 +2365,6 @@ func DefaultAudioConfig() *AudioConfigConstants {
// Adaptive Buffer Constants
AdaptiveBufferCPUMultiplier: 100, // 100 multiplier for CPU percentage
AdaptiveBufferMemoryMultiplier: 100, // 100 multiplier for memory percentage
// Socket Names
InputSocketName: "audio_input.sock", // Socket name for audio input IPC
OutputSocketName: "audio_output.sock", // Socket name for audio output IPC
// Component Names
AudioInputComponentName: "audio-input", // Component name for input logging
AudioOutputComponentName: "audio-output", // Component name for output logging
AudioServerComponentName: "audio-server", // Component name for server logging
AudioRelayComponentName: "audio-relay", // Component name for relay logging
AudioEventsComponentName: "audio-events", // Component name for events logging
// Test Configuration
TestSocketTimeout: 100 * time.Millisecond, // 100ms timeout for test socket operations
TestBufferSize: 4096, // 4096 bytes buffer size for test operations
TestRetryDelay: 200 * time.Millisecond, // 200ms delay between test retry attempts
// Latency Histogram Configuration
LatencyHistogramMaxSamples: 1000, // 1000 samples for latency tracking
LatencyPercentile50: 50, // 50th percentile calculation factor
LatencyPercentile95: 95, // 95th percentile calculation factor
LatencyPercentile99: 99, // 99th percentile calculation factor
// Buffer Pool Efficiency Constants
BufferPoolMaxOperations: 1000, // 1000 operations for efficiency tracking
HitRateCalculationBase: 100.0, // 100.0 base for hit rate percentage calculation
// Validation Constants
MaxLatency: 500 * time.Millisecond, // 500ms maximum allowed latency
MinMetricsUpdateInterval: 100 * time.Millisecond, // 100ms minimum metrics update interval
MaxMetricsUpdateInterval: 10 * time.Second, // 10s maximum metrics update interval
MinSampleRate: 8000, // 8kHz minimum sample rate
MaxSampleRate: 48000, // 48kHz maximum sample rate
MaxChannels: 8, // 8 maximum audio channels
// Device Health Monitoring Configuration
HealthCheckIntervalMS: 5000, // 5000ms (5s) health check interval
HealthRecoveryThreshold: 3, // 3 consecutive successes for recovery
HealthLatencyThresholdMS: 100, // 100ms latency threshold for health
HealthErrorRateLimit: 0.1, // 10% error rate limit for health
// Latency Histogram Bucket Configuration
LatencyBucket10ms: 10 * time.Millisecond, // 10ms latency bucket
LatencyBucket25ms: 25 * time.Millisecond, // 25ms latency bucket
LatencyBucket50ms: 50 * time.Millisecond, // 50ms latency bucket
LatencyBucket100ms: 100 * time.Millisecond, // 100ms latency bucket
LatencyBucket250ms: 250 * time.Millisecond, // 250ms latency bucket
LatencyBucket500ms: 500 * time.Millisecond, // 500ms latency bucket
LatencyBucket1s: 1 * time.Second, // 1s latency bucket
LatencyBucket2s: 2 * time.Second, // 2s latency bucket
}
}

View File

@ -1,514 +0,0 @@
package audio
import (
"context"
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/jetkvm/kvm/internal/logging"
"github.com/rs/zerolog"
)
// DeviceHealthStatus represents the health status of an audio device
type DeviceHealthStatus int
const (
DeviceHealthUnknown DeviceHealthStatus = iota
DeviceHealthHealthy
DeviceHealthDegraded
DeviceHealthFailing
DeviceHealthCritical
)
func (s DeviceHealthStatus) String() string {
switch s {
case DeviceHealthHealthy:
return "healthy"
case DeviceHealthDegraded:
return "degraded"
case DeviceHealthFailing:
return "failing"
case DeviceHealthCritical:
return "critical"
default:
return "unknown"
}
}
// DeviceHealthMetrics tracks health-related metrics for audio devices
type DeviceHealthMetrics struct {
// Error tracking
ConsecutiveErrors int64 `json:"consecutive_errors"`
TotalErrors int64 `json:"total_errors"`
LastErrorTime time.Time `json:"last_error_time"`
ErrorRate float64 `json:"error_rate"` // errors per minute
// Performance metrics
AverageLatency time.Duration `json:"average_latency"`
MaxLatency time.Duration `json:"max_latency"`
LatencySpikes int64 `json:"latency_spikes"`
Underruns int64 `json:"underruns"`
Overruns int64 `json:"overruns"`
// Device availability
LastSuccessfulOp time.Time `json:"last_successful_op"`
DeviceDisconnects int64 `json:"device_disconnects"`
RecoveryAttempts int64 `json:"recovery_attempts"`
SuccessfulRecoveries int64 `json:"successful_recoveries"`
// Health assessment
CurrentStatus DeviceHealthStatus `json:"current_status"`
StatusLastChanged time.Time `json:"status_last_changed"`
HealthScore float64 `json:"health_score"` // 0.0 to 1.0
}
// DeviceHealthMonitor monitors the health of audio devices and triggers recovery
type DeviceHealthMonitor struct {
// Atomic fields first for ARM32 alignment
running int32
monitoringEnabled int32
// Configuration
checkInterval time.Duration
recoveryThreshold int
latencyThreshold time.Duration
errorRateLimit float64 // max errors per minute
// State tracking
captureMetrics *DeviceHealthMetrics
playbackMetrics *DeviceHealthMetrics
mutex sync.RWMutex
// Control channels
ctx context.Context
cancel context.CancelFunc
stopChan chan struct{}
doneChan chan struct{}
// Recovery callbacks
recoveryCallbacks map[string]func() error
callbackMutex sync.RWMutex
// Logging
logger zerolog.Logger
config *AudioConfigConstants
}
// NewDeviceHealthMonitor creates a new device health monitor
func NewDeviceHealthMonitor() *DeviceHealthMonitor {
ctx, cancel := context.WithCancel(context.Background())
config := GetConfig()
return &DeviceHealthMonitor{
checkInterval: time.Duration(config.HealthCheckIntervalMS) * time.Millisecond,
recoveryThreshold: config.HealthRecoveryThreshold,
latencyThreshold: time.Duration(config.HealthLatencyThresholdMS) * time.Millisecond,
errorRateLimit: config.HealthErrorRateLimit,
captureMetrics: &DeviceHealthMetrics{
CurrentStatus: DeviceHealthUnknown,
HealthScore: 1.0,
},
playbackMetrics: &DeviceHealthMetrics{
CurrentStatus: DeviceHealthUnknown,
HealthScore: 1.0,
},
ctx: ctx,
cancel: cancel,
stopChan: make(chan struct{}),
doneChan: make(chan struct{}),
recoveryCallbacks: make(map[string]func() error),
logger: logging.GetDefaultLogger().With().Str("component", "device-health-monitor").Logger(),
config: config,
}
}
// Start begins health monitoring
func (dhm *DeviceHealthMonitor) Start() error {
if !atomic.CompareAndSwapInt32(&dhm.running, 0, 1) {
return fmt.Errorf("device health monitor already running")
}
dhm.logger.Info().Msg("starting device health monitor")
atomic.StoreInt32(&dhm.monitoringEnabled, 1)
go dhm.monitoringLoop()
return nil
}
// Stop stops health monitoring
func (dhm *DeviceHealthMonitor) Stop() {
if !atomic.CompareAndSwapInt32(&dhm.running, 1, 0) {
return
}
dhm.logger.Info().Msg("stopping device health monitor")
atomic.StoreInt32(&dhm.monitoringEnabled, 0)
close(dhm.stopChan)
dhm.cancel()
// Wait for monitoring loop to finish
select {
case <-dhm.doneChan:
dhm.logger.Info().Msg("device health monitor stopped")
case <-time.After(time.Duration(dhm.config.SupervisorTimeout)):
dhm.logger.Warn().Msg("device health monitor stop timeout")
}
}
// RegisterRecoveryCallback registers a recovery function for a specific component
func (dhm *DeviceHealthMonitor) RegisterRecoveryCallback(component string, callback func() error) {
dhm.callbackMutex.Lock()
defer dhm.callbackMutex.Unlock()
dhm.recoveryCallbacks[component] = callback
dhm.logger.Info().Str("component", component).Msg("registered recovery callback")
}
// RecordError records an error for health tracking
func (dhm *DeviceHealthMonitor) RecordError(deviceType string, err error) {
if atomic.LoadInt32(&dhm.monitoringEnabled) == 0 {
return
}
dhm.mutex.Lock()
defer dhm.mutex.Unlock()
var metrics *DeviceHealthMetrics
switch deviceType {
case "capture":
metrics = dhm.captureMetrics
case "playback":
metrics = dhm.playbackMetrics
default:
dhm.logger.Warn().Str("device_type", deviceType).Msg("unknown device type for error recording")
return
}
atomic.AddInt64(&metrics.ConsecutiveErrors, 1)
atomic.AddInt64(&metrics.TotalErrors, 1)
metrics.LastErrorTime = time.Now()
// Update error rate (errors per minute)
if !metrics.LastErrorTime.IsZero() {
timeSinceFirst := time.Since(metrics.LastErrorTime)
if timeSinceFirst > 0 {
metrics.ErrorRate = float64(metrics.TotalErrors) / timeSinceFirst.Minutes()
}
}
dhm.logger.Debug().
Str("device_type", deviceType).
Err(err).
Int64("consecutive_errors", metrics.ConsecutiveErrors).
Float64("error_rate", metrics.ErrorRate).
Msg("recorded device error")
// Trigger immediate health assessment
dhm.assessDeviceHealth(deviceType, metrics)
}
// RecordSuccess records a successful operation
func (dhm *DeviceHealthMonitor) RecordSuccess(deviceType string) {
if atomic.LoadInt32(&dhm.monitoringEnabled) == 0 {
return
}
dhm.mutex.Lock()
defer dhm.mutex.Unlock()
var metrics *DeviceHealthMetrics
switch deviceType {
case "capture":
metrics = dhm.captureMetrics
case "playback":
metrics = dhm.playbackMetrics
default:
return
}
// Reset consecutive errors on success
atomic.StoreInt64(&metrics.ConsecutiveErrors, 0)
metrics.LastSuccessfulOp = time.Now()
// Improve health score gradually
if metrics.HealthScore < 1.0 {
metrics.HealthScore = min(1.0, metrics.HealthScore+0.1)
}
}
// RecordLatency records operation latency for health assessment
func (dhm *DeviceHealthMonitor) RecordLatency(deviceType string, latency time.Duration) {
if atomic.LoadInt32(&dhm.monitoringEnabled) == 0 {
return
}
dhm.mutex.Lock()
defer dhm.mutex.Unlock()
var metrics *DeviceHealthMetrics
switch deviceType {
case "capture":
metrics = dhm.captureMetrics
case "playback":
metrics = dhm.playbackMetrics
default:
return
}
// Update latency metrics
if metrics.AverageLatency == 0 {
metrics.AverageLatency = latency
} else {
// Exponential moving average
metrics.AverageLatency = time.Duration(float64(metrics.AverageLatency)*0.9 + float64(latency)*0.1)
}
if latency > metrics.MaxLatency {
metrics.MaxLatency = latency
}
// Track latency spikes
if latency > dhm.latencyThreshold {
atomic.AddInt64(&metrics.LatencySpikes, 1)
}
}
// RecordUnderrun records an audio underrun event
func (dhm *DeviceHealthMonitor) RecordUnderrun(deviceType string) {
if atomic.LoadInt32(&dhm.monitoringEnabled) == 0 {
return
}
dhm.mutex.Lock()
defer dhm.mutex.Unlock()
var metrics *DeviceHealthMetrics
switch deviceType {
case "capture":
metrics = dhm.captureMetrics
case "playback":
metrics = dhm.playbackMetrics
default:
return
}
atomic.AddInt64(&metrics.Underruns, 1)
dhm.logger.Debug().Str("device_type", deviceType).Msg("recorded audio underrun")
}
// RecordOverrun records an audio overrun event
func (dhm *DeviceHealthMonitor) RecordOverrun(deviceType string) {
if atomic.LoadInt32(&dhm.monitoringEnabled) == 0 {
return
}
dhm.mutex.Lock()
defer dhm.mutex.Unlock()
var metrics *DeviceHealthMetrics
switch deviceType {
case "capture":
metrics = dhm.captureMetrics
case "playback":
metrics = dhm.playbackMetrics
default:
return
}
atomic.AddInt64(&metrics.Overruns, 1)
dhm.logger.Debug().Str("device_type", deviceType).Msg("recorded audio overrun")
}
// GetHealthMetrics returns current health metrics
func (dhm *DeviceHealthMonitor) GetHealthMetrics() (capture, playback DeviceHealthMetrics) {
dhm.mutex.RLock()
defer dhm.mutex.RUnlock()
return *dhm.captureMetrics, *dhm.playbackMetrics
}
// monitoringLoop runs the main health monitoring loop
func (dhm *DeviceHealthMonitor) monitoringLoop() {
defer close(dhm.doneChan)
ticker := time.NewTicker(dhm.checkInterval)
defer ticker.Stop()
for {
select {
case <-dhm.stopChan:
return
case <-dhm.ctx.Done():
return
case <-ticker.C:
dhm.performHealthCheck()
}
}
}
// performHealthCheck performs a comprehensive health check
func (dhm *DeviceHealthMonitor) performHealthCheck() {
dhm.mutex.Lock()
defer dhm.mutex.Unlock()
// Assess health for both devices
dhm.assessDeviceHealth("capture", dhm.captureMetrics)
dhm.assessDeviceHealth("playback", dhm.playbackMetrics)
// Check if recovery is needed
dhm.checkRecoveryNeeded("capture", dhm.captureMetrics)
dhm.checkRecoveryNeeded("playback", dhm.playbackMetrics)
}
// assessDeviceHealth assesses the health status of a device
func (dhm *DeviceHealthMonitor) assessDeviceHealth(deviceType string, metrics *DeviceHealthMetrics) {
previousStatus := metrics.CurrentStatus
newStatus := dhm.calculateHealthStatus(metrics)
if newStatus != previousStatus {
metrics.CurrentStatus = newStatus
metrics.StatusLastChanged = time.Now()
dhm.logger.Info().
Str("device_type", deviceType).
Str("previous_status", previousStatus.String()).
Str("new_status", newStatus.String()).
Float64("health_score", metrics.HealthScore).
Msg("device health status changed")
}
// Update health score
metrics.HealthScore = dhm.calculateHealthScore(metrics)
}
// calculateHealthStatus determines health status based on metrics
func (dhm *DeviceHealthMonitor) calculateHealthStatus(metrics *DeviceHealthMetrics) DeviceHealthStatus {
consecutiveErrors := atomic.LoadInt64(&metrics.ConsecutiveErrors)
totalErrors := atomic.LoadInt64(&metrics.TotalErrors)
// Critical: Too many consecutive errors or device disconnected recently
if consecutiveErrors >= int64(dhm.recoveryThreshold) {
return DeviceHealthCritical
}
// Critical: No successful operations in a long time
if !metrics.LastSuccessfulOp.IsZero() && time.Since(metrics.LastSuccessfulOp) > time.Duration(dhm.config.SupervisorTimeout) {
return DeviceHealthCritical
}
// Failing: High error rate or frequent latency spikes
if metrics.ErrorRate > dhm.errorRateLimit || atomic.LoadInt64(&metrics.LatencySpikes) > int64(dhm.config.MaxDroppedFrames) {
return DeviceHealthFailing
}
// Degraded: Some errors or performance issues
if consecutiveErrors > 0 || totalErrors > int64(dhm.config.MaxDroppedFrames/2) || metrics.AverageLatency > dhm.latencyThreshold {
return DeviceHealthDegraded
}
// Healthy: No significant issues
return DeviceHealthHealthy
}
// calculateHealthScore calculates a numeric health score (0.0 to 1.0)
func (dhm *DeviceHealthMonitor) calculateHealthScore(metrics *DeviceHealthMetrics) float64 {
score := 1.0
// Penalize consecutive errors
consecutiveErrors := atomic.LoadInt64(&metrics.ConsecutiveErrors)
if consecutiveErrors > 0 {
score -= float64(consecutiveErrors) * 0.1
}
// Penalize high error rate
if metrics.ErrorRate > 0 {
score -= min(0.5, metrics.ErrorRate/dhm.errorRateLimit*0.5)
}
// Penalize high latency
if metrics.AverageLatency > dhm.latencyThreshold {
excess := float64(metrics.AverageLatency-dhm.latencyThreshold) / float64(dhm.latencyThreshold)
score -= min(0.3, excess*0.3)
}
// Penalize underruns/overruns
underruns := atomic.LoadInt64(&metrics.Underruns)
overruns := atomic.LoadInt64(&metrics.Overruns)
if underruns+overruns > 0 {
score -= min(0.2, float64(underruns+overruns)*0.01)
}
return max(0.0, score)
}
// checkRecoveryNeeded checks if recovery is needed and triggers it
func (dhm *DeviceHealthMonitor) checkRecoveryNeeded(deviceType string, metrics *DeviceHealthMetrics) {
if metrics.CurrentStatus == DeviceHealthCritical {
dhm.triggerRecovery(deviceType, metrics)
}
}
// triggerRecovery triggers recovery for a device
func (dhm *DeviceHealthMonitor) triggerRecovery(deviceType string, metrics *DeviceHealthMetrics) {
atomic.AddInt64(&metrics.RecoveryAttempts, 1)
dhm.logger.Warn().
Str("device_type", deviceType).
Str("status", metrics.CurrentStatus.String()).
Int64("consecutive_errors", atomic.LoadInt64(&metrics.ConsecutiveErrors)).
Float64("error_rate", metrics.ErrorRate).
Msg("triggering device recovery")
// Try registered recovery callbacks
dhm.callbackMutex.RLock()
defer dhm.callbackMutex.RUnlock()
for component, callback := range dhm.recoveryCallbacks {
if callback != nil {
go func(comp string, cb func() error) {
if err := cb(); err != nil {
dhm.logger.Error().
Str("component", comp).
Str("device_type", deviceType).
Err(err).
Msg("recovery callback failed")
} else {
atomic.AddInt64(&metrics.SuccessfulRecoveries, 1)
dhm.logger.Info().
Str("component", comp).
Str("device_type", deviceType).
Msg("recovery callback succeeded")
}
}(component, callback)
}
}
}
// Global device health monitor instance
var (
globalDeviceHealthMonitor *DeviceHealthMonitor
deviceHealthOnce sync.Once
)
// GetDeviceHealthMonitor returns the global device health monitor
func GetDeviceHealthMonitor() *DeviceHealthMonitor {
deviceHealthOnce.Do(func() {
globalDeviceHealthMonitor = NewDeviceHealthMonitor()
})
return globalDeviceHealthMonitor
}
// Helper functions for min/max
func min(a, b float64) float64 {
if a < b {
return a
}
return b
}
func max(a, b float64) float64 {
if a > b {
return a
}
return b
}

View File

@ -111,7 +111,7 @@ func initializeBroadcaster() {
go audioEventBroadcaster.startMetricsBroadcasting()
// Start granular metrics logging with same interval as metrics broadcasting
// StartGranularMetricsLogging(GetMetricsUpdateInterval()) // Disabled to reduce log pollution
StartGranularMetricsLogging(GetMetricsUpdateInterval())
}
// InitializeAudioEventBroadcaster initializes the global audio event broadcaster

View File

@ -93,18 +93,18 @@ type BufferPoolEfficiencyTracker struct {
// NewLatencyHistogram creates a new latency histogram with predefined buckets
func NewLatencyHistogram(maxSamples int, logger zerolog.Logger) *LatencyHistogram {
// Define latency buckets using configuration constants
// Define latency buckets: 1ms, 5ms, 10ms, 25ms, 50ms, 100ms, 250ms, 500ms, 1s, 2s+
buckets := []int64{
int64(1 * time.Millisecond),
int64(5 * time.Millisecond),
int64(GetConfig().LatencyBucket10ms),
int64(GetConfig().LatencyBucket25ms),
int64(GetConfig().LatencyBucket50ms),
int64(GetConfig().LatencyBucket100ms),
int64(GetConfig().LatencyBucket250ms),
int64(GetConfig().LatencyBucket500ms),
int64(GetConfig().LatencyBucket1s),
int64(GetConfig().LatencyBucket2s),
int64(10 * time.Millisecond),
int64(25 * time.Millisecond),
int64(50 * time.Millisecond),
int64(100 * time.Millisecond),
int64(250 * time.Millisecond),
int64(500 * time.Millisecond),
int64(1 * time.Second),
int64(2 * time.Second),
}
return &LatencyHistogram{

View File

@ -1,7 +1,6 @@
package audio
import (
"fmt"
"sync/atomic"
"time"
@ -11,10 +10,10 @@ import (
// AudioInputMetrics holds metrics for microphone input
type AudioInputMetrics struct {
FramesSent int64 // Total frames sent
FramesDropped int64 // Total frames dropped
BytesProcessed int64 // Total bytes processed
ConnectionDrops int64 // Connection drops
FramesSent int64
FramesDropped int64
BytesProcessed int64
ConnectionDrops int64
AverageLatency time.Duration // time.Duration is int64
LastFrameTime time.Time
}
@ -32,30 +31,26 @@ type AudioInputManager struct {
func NewAudioInputManager() *AudioInputManager {
return &AudioInputManager{
ipcManager: NewAudioInputIPCManager(),
logger: logging.GetDefaultLogger().With().Str("component", AudioInputManagerComponent).Logger(),
logger: logging.GetDefaultLogger().With().Str("component", "audio-input").Logger(),
}
}
// Start begins processing microphone input
func (aim *AudioInputManager) Start() error {
if !atomic.CompareAndSwapInt32(&aim.running, 0, 1) {
return fmt.Errorf("audio input manager is already running")
return nil // Already running
}
aim.logger.Info().Str("component", AudioInputManagerComponent).Msg("starting component")
aim.logger.Info().Msg("Starting audio input manager")
// Start the IPC-based audio input
err := aim.ipcManager.Start()
if err != nil {
aim.logger.Error().Err(err).Str("component", AudioInputManagerComponent).Msg("failed to start component")
// Ensure proper cleanup on error
aim.logger.Error().Err(err).Msg("Failed to start IPC audio input")
atomic.StoreInt32(&aim.running, 0)
// Reset metrics on failed start
aim.resetMetrics()
return err
}
aim.logger.Info().Str("component", AudioInputManagerComponent).Msg("component started successfully")
return nil
}
@ -65,20 +60,12 @@ func (aim *AudioInputManager) Stop() {
return // Already stopped
}
aim.logger.Info().Str("component", AudioInputManagerComponent).Msg("stopping component")
aim.logger.Info().Msg("Stopping audio input manager")
// Stop the IPC-based audio input
aim.ipcManager.Stop()
aim.logger.Info().Str("component", AudioInputManagerComponent).Msg("component stopped")
}
// resetMetrics resets all metrics to zero
func (aim *AudioInputManager) resetMetrics() {
atomic.StoreInt64(&aim.metrics.FramesSent, 0)
atomic.StoreInt64(&aim.metrics.FramesDropped, 0)
atomic.StoreInt64(&aim.metrics.BytesProcessed, 0)
atomic.StoreInt64(&aim.metrics.ConnectionDrops, 0)
aim.logger.Info().Msg("Audio input manager stopped")
}
// WriteOpusFrame writes an Opus frame to the audio input system with latency tracking

View File

@ -1,6 +1,7 @@
package audio
import (
"context"
"encoding/binary"
"fmt"
"io"
@ -13,12 +14,12 @@ import (
"time"
"github.com/jetkvm/kvm/internal/logging"
"github.com/rs/zerolog"
)
var (
inputMagicNumber uint32 = GetConfig().InputMagicNumber // "JKMI" (JetKVM Microphone Input)
inputSocketName = "audio_input.sock"
writeTimeout = GetConfig().WriteTimeout // Non-blocking write timeout
)
const (
@ -50,27 +51,6 @@ type InputIPCMessage struct {
Data []byte
}
// Implement IPCMessage interface
func (msg *InputIPCMessage) GetMagic() uint32 {
return msg.Magic
}
func (msg *InputIPCMessage) GetType() uint8 {
return uint8(msg.Type)
}
func (msg *InputIPCMessage) GetLength() uint32 {
return msg.Length
}
func (msg *InputIPCMessage) GetTimestamp() int64 {
return msg.Timestamp
}
func (msg *InputIPCMessage) GetData() []byte {
return msg.Data
}
// OptimizedIPCMessage represents an optimized message with pre-allocated buffers
type OptimizedIPCMessage struct {
header [headerSize]byte // Pre-allocated header buffer
@ -100,15 +80,16 @@ var globalMessagePool = &MessagePool{
var messagePoolInitOnce sync.Once
// initializeMessagePool initializes the global message pool with pre-allocated messages
// initializeMessagePool initializes the message pool with pre-allocated messages
func initializeMessagePool() {
messagePoolInitOnce.Do(func() {
preallocSize := messagePoolSize / 4 // 25% pre-allocated for immediate use
// Pre-allocate 30% of pool size for immediate availability
preallocSize := messagePoolSize * GetConfig().InputPreallocPercentage / 100
globalMessagePool.preallocSize = preallocSize
globalMessagePool.maxPoolSize = messagePoolSize * GetConfig().PoolGrowthMultiplier // Allow growth up to 2x
globalMessagePool.preallocated = make([]*OptimizedIPCMessage, 0, preallocSize)
// Pre-allocate messages for immediate use
// Pre-allocate messages to reduce initial allocation overhead
for i := 0; i < preallocSize; i++ {
msg := &OptimizedIPCMessage{
data: make([]byte, 0, maxFrameSize),
@ -116,7 +97,7 @@ func initializeMessagePool() {
globalMessagePool.preallocated = append(globalMessagePool.preallocated, msg)
}
// Fill the channel with remaining messages
// Fill the channel pool with remaining messages
for i := preallocSize; i < messagePoolSize; i++ {
globalMessagePool.pool <- &OptimizedIPCMessage{
data: make([]byte, 0, maxFrameSize),
@ -186,7 +167,7 @@ type InputIPCConfig struct {
// AudioInputServer handles IPC communication for audio input processing
type AudioInputServer struct {
// Atomic fields MUST be first for ARM32 alignment (int64 fields need 8-byte alignment)
// Atomic fields must be first for proper alignment on ARM
bufferSize int64 // Current buffer size (atomic)
processingTime int64 // Average processing time in nanoseconds (atomic)
droppedFrames int64 // Dropped frames counter (atomic)
@ -246,11 +227,6 @@ func (ais *AudioInputServer) Start() error {
ais.running = true
// Reset counters on start
atomic.StoreInt64(&ais.totalFrames, 0)
atomic.StoreInt64(&ais.droppedFrames, 0)
atomic.StoreInt64(&ais.processingTime, 0)
// Start triple-goroutine architecture
ais.startReaderGoroutine()
ais.startProcessorGoroutine()
@ -300,9 +276,7 @@ func (ais *AudioInputServer) acceptConnections() {
conn, err := ais.listener.Accept()
if err != nil {
if ais.running {
// Log error and continue accepting
logger := logging.GetDefaultLogger().With().Str("component", "audio-input-server").Logger()
logger.Warn().Err(err).Msg("Failed to accept connection, retrying")
// Only log error if we're still supposed to be running
continue
}
return
@ -319,10 +293,9 @@ func (ais *AudioInputServer) acceptConnections() {
}
ais.mtx.Lock()
// Close existing connection if any to prevent resource leaks
// Close existing connection if any
if ais.conn != nil {
ais.conn.Close()
ais.conn = nil
}
ais.conn = conn
ais.mtx.Unlock()
@ -488,13 +461,33 @@ func (ais *AudioInputServer) sendAck() error {
return ais.writeMessage(ais.conn, msg)
}
// Global shared message pool for input IPC server
var globalInputServerMessagePool = NewGenericMessagePool(messagePoolSize)
// writeMessage writes a message to the connection using shared common utilities
// writeMessage writes a message to the connection using optimized buffers
func (ais *AudioInputServer) writeMessage(conn net.Conn, msg *InputIPCMessage) error {
// Use shared WriteIPCMessage function with global message pool
return WriteIPCMessage(conn, msg, globalInputServerMessagePool, &ais.droppedFrames)
// Get optimized message from pool for header preparation
optMsg := globalMessagePool.Get()
defer globalMessagePool.Put(optMsg)
// Prepare header in pre-allocated buffer
binary.LittleEndian.PutUint32(optMsg.header[0:4], msg.Magic)
optMsg.header[4] = byte(msg.Type)
binary.LittleEndian.PutUint32(optMsg.header[5:9], msg.Length)
binary.LittleEndian.PutUint64(optMsg.header[9:17], uint64(msg.Timestamp))
// Write header
_, err := conn.Write(optMsg.header[:])
if err != nil {
return err
}
// Write data if present
if msg.Length > 0 && msg.Data != nil {
_, err = conn.Write(msg.Data)
if err != nil {
return err
}
}
return nil
}
// AudioInputClient handles IPC communication from the main process
@ -522,12 +515,6 @@ func (aic *AudioInputClient) Connect() error {
return nil // Already connected
}
// Ensure clean state before connecting
if aic.conn != nil {
aic.conn.Close()
aic.conn = nil
}
socketPath := getInputSocketPath()
// Try connecting multiple times as the server might not be ready
// Reduced retry count and delay for faster startup
@ -536,9 +523,6 @@ func (aic *AudioInputClient) Connect() error {
if err == nil {
aic.conn = conn
aic.running = true
// Reset frame counters on successful connection
atomic.StoreInt64(&aic.totalFrames, 0)
atomic.StoreInt64(&aic.droppedFrames, 0)
return nil
}
// Exponential backoff starting from config
@ -551,10 +535,7 @@ func (aic *AudioInputClient) Connect() error {
time.Sleep(delay)
}
// Ensure clean state on connection failure
aic.conn = nil
aic.running = false
return fmt.Errorf("failed to connect to audio input server after 10 attempts")
return fmt.Errorf("failed to connect to audio input server")
}
// Disconnect disconnects from the audio input server
@ -686,15 +667,58 @@ func (aic *AudioInputClient) SendHeartbeat() error {
}
// writeMessage writes a message to the server
// Global shared message pool for input IPC clients
var globalInputMessagePool = NewGenericMessagePool(messagePoolSize)
func (aic *AudioInputClient) writeMessage(msg *InputIPCMessage) error {
// Increment total frames counter
atomic.AddInt64(&aic.totalFrames, 1)
// Use shared WriteIPCMessage function with global message pool
return WriteIPCMessage(aic.conn, msg, globalInputMessagePool, &aic.droppedFrames)
// Get optimized message from pool for header preparation
optMsg := globalMessagePool.Get()
defer globalMessagePool.Put(optMsg)
// Prepare header in pre-allocated buffer
binary.LittleEndian.PutUint32(optMsg.header[0:4], msg.Magic)
optMsg.header[4] = byte(msg.Type)
binary.LittleEndian.PutUint32(optMsg.header[5:9], msg.Length)
binary.LittleEndian.PutUint64(optMsg.header[9:17], uint64(msg.Timestamp))
// Use non-blocking write with timeout
ctx, cancel := context.WithTimeout(context.Background(), writeTimeout)
defer cancel()
// Create a channel to signal write completion
done := make(chan error, 1)
go func() {
// Write header using pre-allocated buffer
_, err := aic.conn.Write(optMsg.header[:])
if err != nil {
done <- err
return
}
// Write data if present
if msg.Length > 0 && msg.Data != nil {
_, err = aic.conn.Write(msg.Data)
if err != nil {
done <- err
return
}
}
done <- nil
}()
// Wait for completion or timeout
select {
case err := <-done:
if err != nil {
atomic.AddInt64(&aic.droppedFrames, 1)
return err
}
return nil
case <-ctx.Done():
// Timeout occurred - drop frame to prevent blocking
atomic.AddInt64(&aic.droppedFrames, 1)
return fmt.Errorf("write timeout - frame dropped")
}
}
// IsConnected returns whether the client is connected
@ -706,19 +730,23 @@ func (aic *AudioInputClient) IsConnected() bool {
// GetFrameStats returns frame statistics
func (aic *AudioInputClient) GetFrameStats() (total, dropped int64) {
stats := GetFrameStats(&aic.totalFrames, &aic.droppedFrames)
return stats.Total, stats.Dropped
return atomic.LoadInt64(&aic.totalFrames), atomic.LoadInt64(&aic.droppedFrames)
}
// GetDropRate returns the current frame drop rate as a percentage
func (aic *AudioInputClient) GetDropRate() float64 {
stats := GetFrameStats(&aic.totalFrames, &aic.droppedFrames)
return CalculateDropRate(stats)
total := atomic.LoadInt64(&aic.totalFrames)
dropped := atomic.LoadInt64(&aic.droppedFrames)
if total == 0 {
return 0.0
}
return float64(dropped) / float64(total) * GetConfig().PercentageMultiplier
}
// ResetStats resets frame statistics
func (aic *AudioInputClient) ResetStats() {
ResetFrameStats(&aic.totalFrames, &aic.droppedFrames)
atomic.StoreInt64(&aic.totalFrames, 0)
atomic.StoreInt64(&aic.droppedFrames, 0)
}
// startReaderGoroutine starts the message reader goroutine
@ -726,17 +754,6 @@ func (ais *AudioInputServer) startReaderGoroutine() {
ais.wg.Add(1)
go func() {
defer ais.wg.Done()
// Enhanced error tracking and recovery
var consecutiveErrors int
var lastErrorTime time.Time
maxConsecutiveErrors := GetConfig().MaxConsecutiveErrors
errorResetWindow := GetConfig().RestartWindow // Use existing restart window
baseBackoffDelay := GetConfig().RetryDelay
maxBackoffDelay := GetConfig().MaxRetryDelay
logger := logging.GetDefaultLogger().With().Str("component", "audio-input-reader").Logger()
for {
select {
case <-ais.stopChan:
@ -745,55 +762,8 @@ func (ais *AudioInputServer) startReaderGoroutine() {
if ais.conn != nil {
msg, err := ais.readMessage(ais.conn)
if err != nil {
// Enhanced error handling with progressive backoff
now := time.Now()
// Reset error counter if enough time has passed
if now.Sub(lastErrorTime) > errorResetWindow {
consecutiveErrors = 0
}
consecutiveErrors++
lastErrorTime = now
// Log error with context
logger.Warn().Err(err).
Int("consecutive_errors", consecutiveErrors).
Msg("Failed to read message from input connection")
// Progressive backoff based on error count
if consecutiveErrors > 1 {
backoffDelay := time.Duration(consecutiveErrors-1) * baseBackoffDelay
if backoffDelay > maxBackoffDelay {
backoffDelay = maxBackoffDelay
}
time.Sleep(backoffDelay)
}
// If too many consecutive errors, close connection to force reconnect
if consecutiveErrors >= maxConsecutiveErrors {
logger.Error().
Int("consecutive_errors", consecutiveErrors).
Msg("Too many consecutive read errors, closing connection")
ais.mtx.Lock()
if ais.conn != nil {
ais.conn.Close()
ais.conn = nil
}
ais.mtx.Unlock()
consecutiveErrors = 0 // Reset for next connection
}
continue
continue // Connection error, retry
}
// Reset error counter on successful read
if consecutiveErrors > 0 {
consecutiveErrors = 0
logger.Info().Msg("Input connection recovered")
}
// Send to message channel with non-blocking write
select {
case ais.messageChan <- msg:
@ -801,11 +771,7 @@ func (ais *AudioInputServer) startReaderGoroutine() {
default:
// Channel full, drop message
atomic.AddInt64(&ais.droppedFrames, 1)
logger.Warn().Msg("Message channel full, dropping frame")
}
} else {
// No connection, wait briefly before checking again
time.Sleep(GetConfig().DefaultSleepDuration)
}
}
}
@ -830,105 +796,40 @@ func (ais *AudioInputServer) startProcessorGoroutine() {
}
}()
// Enhanced error tracking for processing
var processingErrors int
var lastProcessingError time.Time
maxProcessingErrors := GetConfig().MaxConsecutiveErrors
errorResetWindow := GetConfig().RestartWindow
defer ais.wg.Done()
for {
select {
case <-ais.stopChan:
return
case msg := <-ais.messageChan:
// Process message with error handling
start := time.Now()
err := ais.processMessageWithRecovery(msg, logger)
processingTime := time.Since(start)
// Intelligent frame dropping: prioritize recent frames
if msg.Type == InputMessageTypeOpusFrame {
// Check if processing queue is getting full
queueLen := len(ais.processChan)
bufferSize := int(atomic.LoadInt64(&ais.bufferSize))
if err != nil {
// Track processing errors
now := time.Now()
if now.Sub(lastProcessingError) > errorResetWindow {
processingErrors = 0
}
processingErrors++
lastProcessingError = now
logger.Warn().Err(err).
Int("processing_errors", processingErrors).
Dur("processing_time", processingTime).
Msg("Failed to process input message")
// If too many processing errors, drop frames more aggressively
if processingErrors >= maxProcessingErrors {
logger.Error().
Int("processing_errors", processingErrors).
Msg("Too many processing errors, entering aggressive drop mode")
// Clear processing queue to recover
for len(ais.processChan) > 0 {
select {
case <-ais.processChan:
atomic.AddInt64(&ais.droppedFrames, 1)
default:
break
}
if queueLen > bufferSize*3/4 {
// Drop oldest frames, keep newest
select {
case <-ais.processChan: // Remove oldest
atomic.AddInt64(&ais.droppedFrames, 1)
default:
}
processingErrors = 0 // Reset after clearing queue
}
continue
}
// Reset error counter on successful processing
if processingErrors > 0 {
processingErrors = 0
logger.Info().Msg("Input processing recovered")
// Send to processing queue
select {
case ais.processChan <- msg:
default:
// Processing queue full, drop frame
atomic.AddInt64(&ais.droppedFrames, 1)
}
// Update processing time metrics
atomic.StoreInt64(&ais.processingTime, processingTime.Nanoseconds())
}
}
}()
}
// processMessageWithRecovery processes a message with enhanced error recovery
func (ais *AudioInputServer) processMessageWithRecovery(msg *InputIPCMessage, logger zerolog.Logger) error {
// Intelligent frame dropping: prioritize recent frames
if msg.Type == InputMessageTypeOpusFrame {
// Check if processing queue is getting full
queueLen := len(ais.processChan)
bufferSize := int(atomic.LoadInt64(&ais.bufferSize))
if queueLen > bufferSize*3/4 {
// Drop oldest frames, keep newest
select {
case <-ais.processChan: // Remove oldest
atomic.AddInt64(&ais.droppedFrames, 1)
logger.Debug().Msg("Dropped oldest frame to make room")
default:
}
}
}
// Send to processing queue with timeout
select {
case ais.processChan <- msg:
return nil
case <-time.After(GetConfig().WriteTimeout):
// Processing queue full and timeout reached, drop frame
atomic.AddInt64(&ais.droppedFrames, 1)
return fmt.Errorf("processing queue timeout")
default:
// Processing queue full, drop frame immediately
atomic.AddInt64(&ais.droppedFrames, 1)
return fmt.Errorf("processing queue full")
}
}
// startMonitorGoroutine starts the performance monitoring goroutine
func (ais *AudioInputServer) startMonitorGoroutine() {
ais.wg.Add(1)

View File

@ -21,7 +21,7 @@ type AudioInputIPCManager struct {
func NewAudioInputIPCManager() *AudioInputIPCManager {
return &AudioInputIPCManager{
supervisor: NewAudioInputSupervisor(),
logger: logging.GetDefaultLogger().With().Str("component", AudioInputIPCComponent).Logger(),
logger: logging.GetDefaultLogger().With().Str("component", "audio-input-ipc").Logger(),
}
}
@ -31,15 +31,12 @@ func (aim *AudioInputIPCManager) Start() error {
return nil
}
aim.logger.Info().Str("component", AudioInputIPCComponent).Msg("starting component")
aim.logger.Info().Msg("Starting IPC-based audio input system")
err := aim.supervisor.Start()
if err != nil {
// Ensure proper cleanup on supervisor start failure
atomic.StoreInt32(&aim.running, 0)
// Reset metrics on failed start
aim.resetMetrics()
aim.logger.Error().Err(err).Str("component", AudioInputIPCComponent).Msg("failed to start audio input supervisor")
aim.logger.Error().Err(err).Msg("Failed to start audio input supervisor")
return err
}
@ -54,11 +51,10 @@ func (aim *AudioInputIPCManager) Start() error {
err = aim.supervisor.SendConfig(config)
if err != nil {
// Config send failure is not critical, log warning and continue
aim.logger.Warn().Err(err).Str("component", AudioInputIPCComponent).Msg("failed to send initial config, will retry later")
aim.logger.Warn().Err(err).Msg("Failed to send initial config, will retry later")
}
aim.logger.Info().Str("component", AudioInputIPCComponent).Msg("component started successfully")
aim.logger.Info().Msg("IPC-based audio input system started")
return nil
}
@ -68,17 +64,9 @@ func (aim *AudioInputIPCManager) Stop() {
return
}
aim.logger.Info().Str("component", AudioInputIPCComponent).Msg("stopping component")
aim.logger.Info().Msg("Stopping IPC-based audio input system")
aim.supervisor.Stop()
aim.logger.Info().Str("component", AudioInputIPCComponent).Msg("component stopped")
}
// resetMetrics resets all metrics to zero
func (aim *AudioInputIPCManager) resetMetrics() {
atomic.StoreInt64(&aim.metrics.FramesSent, 0)
atomic.StoreInt64(&aim.metrics.FramesDropped, 0)
atomic.StoreInt64(&aim.metrics.BytesProcessed, 0)
atomic.StoreInt64(&aim.metrics.ConnectionDrops, 0)
aim.logger.Info().Msg("IPC-based audio input system stopped")
}
// WriteOpusFrame sends an Opus frame to the audio input server via IPC

View File

@ -1,277 +0,0 @@
package audio
import (
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestAudioInputIPCManager tests the AudioInputIPCManager component
func TestAudioInputIPCManager(t *testing.T) {
tests := []struct {
name string
testFunc func(t *testing.T)
}{
{"Start", testAudioInputIPCManagerStart},
{"Stop", testAudioInputIPCManagerStop},
{"StartStop", testAudioInputIPCManagerStartStop},
{"IsRunning", testAudioInputIPCManagerIsRunning},
{"IsReady", testAudioInputIPCManagerIsReady},
{"GetMetrics", testAudioInputIPCManagerGetMetrics},
{"ConcurrentOperations", testAudioInputIPCManagerConcurrent},
{"MultipleStarts", testAudioInputIPCManagerMultipleStarts},
{"MultipleStops", testAudioInputIPCManagerMultipleStops},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tt.testFunc(t)
})
}
}
func testAudioInputIPCManagerStart(t *testing.T) {
manager := NewAudioInputIPCManager()
require.NotNil(t, manager)
// Test initial state
assert.False(t, manager.IsRunning())
assert.False(t, manager.IsReady())
// Test start
err := manager.Start()
assert.NoError(t, err)
assert.True(t, manager.IsRunning())
// Cleanup
manager.Stop()
}
func testAudioInputIPCManagerStop(t *testing.T) {
manager := NewAudioInputIPCManager()
require.NotNil(t, manager)
// Start first
err := manager.Start()
require.NoError(t, err)
assert.True(t, manager.IsRunning())
// Test stop
manager.Stop()
assert.False(t, manager.IsRunning())
assert.False(t, manager.IsReady())
}
func testAudioInputIPCManagerStartStop(t *testing.T) {
manager := NewAudioInputIPCManager()
require.NotNil(t, manager)
// Test multiple start/stop cycles
for i := 0; i < 3; i++ {
// Start
err := manager.Start()
assert.NoError(t, err)
assert.True(t, manager.IsRunning())
// Stop
manager.Stop()
assert.False(t, manager.IsRunning())
}
}
func testAudioInputIPCManagerIsRunning(t *testing.T) {
manager := NewAudioInputIPCManager()
require.NotNil(t, manager)
// Initially not running
assert.False(t, manager.IsRunning())
// Start and check
err := manager.Start()
require.NoError(t, err)
assert.True(t, manager.IsRunning())
// Stop and check
manager.Stop()
assert.False(t, manager.IsRunning())
}
func testAudioInputIPCManagerIsReady(t *testing.T) {
manager := NewAudioInputIPCManager()
require.NotNil(t, manager)
// Initially not ready
assert.False(t, manager.IsReady())
// Start and check ready state
err := manager.Start()
require.NoError(t, err)
// Give some time for initialization
time.Sleep(100 * time.Millisecond)
// Stop
manager.Stop()
assert.False(t, manager.IsReady())
}
func testAudioInputIPCManagerGetMetrics(t *testing.T) {
manager := NewAudioInputIPCManager()
require.NotNil(t, manager)
// Test metrics when not running
metrics := manager.GetMetrics()
assert.NotNil(t, metrics)
// Start and test metrics
err := manager.Start()
require.NoError(t, err)
metrics = manager.GetMetrics()
assert.NotNil(t, metrics)
// Cleanup
manager.Stop()
}
func testAudioInputIPCManagerConcurrent(t *testing.T) {
manager := NewAudioInputIPCManager()
require.NotNil(t, manager)
var wg sync.WaitGroup
const numGoroutines = 10
// Test concurrent starts
wg.Add(numGoroutines)
for i := 0; i < numGoroutines; i++ {
go func() {
defer wg.Done()
manager.Start()
}()
}
wg.Wait()
// Should be running
assert.True(t, manager.IsRunning())
// Test concurrent stops
wg.Add(numGoroutines)
for i := 0; i < numGoroutines; i++ {
go func() {
defer wg.Done()
manager.Stop()
}()
}
wg.Wait()
// Should be stopped
assert.False(t, manager.IsRunning())
}
func testAudioInputIPCManagerMultipleStarts(t *testing.T) {
manager := NewAudioInputIPCManager()
require.NotNil(t, manager)
// First start should succeed
err := manager.Start()
assert.NoError(t, err)
assert.True(t, manager.IsRunning())
// Subsequent starts should be no-op
err = manager.Start()
assert.NoError(t, err)
assert.True(t, manager.IsRunning())
err = manager.Start()
assert.NoError(t, err)
assert.True(t, manager.IsRunning())
// Cleanup
manager.Stop()
}
func testAudioInputIPCManagerMultipleStops(t *testing.T) {
manager := NewAudioInputIPCManager()
require.NotNil(t, manager)
// Start first
err := manager.Start()
require.NoError(t, err)
assert.True(t, manager.IsRunning())
// First stop should work
manager.Stop()
assert.False(t, manager.IsRunning())
// Subsequent stops should be no-op
manager.Stop()
assert.False(t, manager.IsRunning())
manager.Stop()
assert.False(t, manager.IsRunning())
}
// TestAudioInputIPCMetrics tests the AudioInputMetrics functionality
func TestAudioInputIPCMetrics(t *testing.T) {
metrics := &AudioInputMetrics{}
// Test initial state
assert.Equal(t, int64(0), metrics.FramesSent)
assert.Equal(t, int64(0), metrics.FramesDropped)
assert.Equal(t, int64(0), metrics.BytesProcessed)
assert.Equal(t, int64(0), metrics.ConnectionDrops)
assert.Equal(t, time.Duration(0), metrics.AverageLatency)
assert.True(t, metrics.LastFrameTime.IsZero())
// Test field assignment
metrics.FramesSent = 50
metrics.FramesDropped = 2
metrics.BytesProcessed = 512
metrics.ConnectionDrops = 1
metrics.AverageLatency = 5 * time.Millisecond
metrics.LastFrameTime = time.Now()
// Verify assignments
assert.Equal(t, int64(50), metrics.FramesSent)
assert.Equal(t, int64(2), metrics.FramesDropped)
assert.Equal(t, int64(512), metrics.BytesProcessed)
assert.Equal(t, int64(1), metrics.ConnectionDrops)
assert.Equal(t, 5*time.Millisecond, metrics.AverageLatency)
assert.False(t, metrics.LastFrameTime.IsZero())
}
// BenchmarkAudioInputIPCManager benchmarks the AudioInputIPCManager operations
func BenchmarkAudioInputIPCManager(b *testing.B) {
b.Run("Start", func(b *testing.B) {
for i := 0; i < b.N; i++ {
manager := NewAudioInputIPCManager()
manager.Start()
manager.Stop()
}
})
b.Run("IsRunning", func(b *testing.B) {
manager := NewAudioInputIPCManager()
manager.Start()
defer manager.Stop()
b.ResetTimer()
for i := 0; i < b.N; i++ {
manager.IsRunning()
}
})
b.Run("GetMetrics", func(b *testing.B) {
manager := NewAudioInputIPCManager()
manager.Start()
defer manager.Stop()
b.ResetTimer()
for i := 0; i < b.N; i++ {
manager.GetMetrics()
}
})
}

View File

@ -168,16 +168,7 @@ func (ais *AudioInputSupervisor) GetProcessMetrics() *ProcessMetrics {
defer ais.mtx.Unlock()
if ais.cmd == nil || ais.cmd.Process == nil {
// Return default metrics when no process is running
return &ProcessMetrics{
PID: 0,
CPUPercent: 0.0,
MemoryRSS: 0,
MemoryVMS: 0,
MemoryPercent: 0.0,
Timestamp: time.Now(),
ProcessName: "audio-input-server",
}
return nil
}
pid := ais.cmd.Process.Pid
@ -187,21 +178,12 @@ func (ais *AudioInputSupervisor) GetProcessMetrics() *ProcessMetrics {
return &metric
}
}
// Return default metrics if process not found in monitoring
return &ProcessMetrics{
PID: pid,
CPUPercent: 0.0,
MemoryRSS: 0,
MemoryVMS: 0,
MemoryPercent: 0.0,
Timestamp: time.Now(),
ProcessName: "audio-input-server",
}
return nil
}
// monitorSubprocess monitors the subprocess and handles unexpected exits
func (ais *AudioInputSupervisor) monitorSubprocess() {
if ais.cmd == nil || ais.cmd.Process == nil {
if ais.cmd == nil {
return
}

View File

@ -1,241 +0,0 @@
package audio
import (
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNewAudioInputManager(t *testing.T) {
manager := NewAudioInputManager()
assert.NotNil(t, manager)
assert.False(t, manager.IsRunning())
assert.False(t, manager.IsReady())
}
func TestAudioInputManagerStart(t *testing.T) {
manager := NewAudioInputManager()
require.NotNil(t, manager)
// Test successful start
err := manager.Start()
assert.NoError(t, err)
assert.True(t, manager.IsRunning())
// Test starting already running manager
err = manager.Start()
assert.Error(t, err)
assert.Contains(t, err.Error(), "already running")
// Cleanup
manager.Stop()
}
func TestAudioInputManagerStop(t *testing.T) {
manager := NewAudioInputManager()
require.NotNil(t, manager)
// Test stopping non-running manager
manager.Stop()
assert.False(t, manager.IsRunning())
// Start and then stop
err := manager.Start()
require.NoError(t, err)
assert.True(t, manager.IsRunning())
manager.Stop()
assert.False(t, manager.IsRunning())
}
func TestAudioInputManagerIsRunning(t *testing.T) {
manager := NewAudioInputManager()
require.NotNil(t, manager)
// Test initial state
assert.False(t, manager.IsRunning())
// Test after start
err := manager.Start()
require.NoError(t, err)
assert.True(t, manager.IsRunning())
// Test after stop
manager.Stop()
assert.False(t, manager.IsRunning())
}
func TestAudioInputManagerIsReady(t *testing.T) {
manager := NewAudioInputManager()
require.NotNil(t, manager)
// Test initial state
assert.False(t, manager.IsReady())
// Start manager
err := manager.Start()
require.NoError(t, err)
// Give some time for initialization
time.Sleep(100 * time.Millisecond)
// Test ready state (may vary based on implementation)
// Just ensure the method doesn't panic
_ = manager.IsReady()
// Cleanup
manager.Stop()
}
func TestAudioInputManagerGetMetrics(t *testing.T) {
manager := NewAudioInputManager()
require.NotNil(t, manager)
// Test metrics when not running
metrics := manager.GetMetrics()
assert.NotNil(t, metrics)
assert.Equal(t, int64(0), metrics.FramesSent)
assert.Equal(t, int64(0), metrics.FramesDropped)
assert.Equal(t, int64(0), metrics.BytesProcessed)
assert.Equal(t, int64(0), metrics.ConnectionDrops)
// Start and test metrics
err := manager.Start()
require.NoError(t, err)
metrics = manager.GetMetrics()
assert.NotNil(t, metrics)
assert.GreaterOrEqual(t, metrics.FramesSent, int64(0))
assert.GreaterOrEqual(t, metrics.FramesDropped, int64(0))
assert.GreaterOrEqual(t, metrics.BytesProcessed, int64(0))
assert.GreaterOrEqual(t, metrics.ConnectionDrops, int64(0))
// Cleanup
manager.Stop()
}
func TestAudioInputManagerConcurrentOperations(t *testing.T) {
manager := NewAudioInputManager()
require.NotNil(t, manager)
var wg sync.WaitGroup
// Test concurrent start/stop operations
for i := 0; i < 10; i++ {
wg.Add(2)
go func() {
defer wg.Done()
_ = manager.Start()
}()
go func() {
defer wg.Done()
manager.Stop()
}()
}
// Test concurrent metric access
for i := 0; i < 5; i++ {
wg.Add(1)
go func() {
defer wg.Done()
_ = manager.GetMetrics()
}()
}
// Test concurrent status checks
for i := 0; i < 5; i++ {
wg.Add(2)
go func() {
defer wg.Done()
_ = manager.IsRunning()
}()
go func() {
defer wg.Done()
_ = manager.IsReady()
}()
}
wg.Wait()
// Cleanup
manager.Stop()
}
func TestAudioInputManagerMultipleStartStop(t *testing.T) {
manager := NewAudioInputManager()
require.NotNil(t, manager)
// Test multiple start/stop cycles
for i := 0; i < 5; i++ {
err := manager.Start()
assert.NoError(t, err)
assert.True(t, manager.IsRunning())
manager.Stop()
assert.False(t, manager.IsRunning())
}
}
func TestAudioInputMetrics(t *testing.T) {
metrics := &AudioInputMetrics{
FramesSent: 100,
FramesDropped: 5,
BytesProcessed: 1024,
ConnectionDrops: 2,
AverageLatency: time.Millisecond * 10,
LastFrameTime: time.Now(),
}
assert.Equal(t, int64(100), metrics.FramesSent)
assert.Equal(t, int64(5), metrics.FramesDropped)
assert.Equal(t, int64(1024), metrics.BytesProcessed)
assert.Equal(t, int64(2), metrics.ConnectionDrops)
assert.Equal(t, time.Millisecond*10, metrics.AverageLatency)
assert.False(t, metrics.LastFrameTime.IsZero())
}
// Benchmark tests
func BenchmarkAudioInputManager(b *testing.B) {
manager := NewAudioInputManager()
b.Run("Start", func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = manager.Start()
manager.Stop()
}
})
b.Run("GetMetrics", func(b *testing.B) {
_ = manager.Start()
defer manager.Stop()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = manager.GetMetrics()
}
})
b.Run("IsRunning", func(b *testing.B) {
_ = manager.Start()
defer manager.Stop()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = manager.IsRunning()
}
})
b.Run("IsReady", func(b *testing.B) {
_ = manager.Start()
defer manager.Stop()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = manager.IsReady()
}
})
}

View File

@ -1,6 +1,7 @@
package audio
import (
"context"
"encoding/binary"
"fmt"
"io"
@ -34,7 +35,7 @@ const (
OutputMessageTypeAck
)
// OutputIPCMessage represents a message sent over IPC
// OutputIPCMessage represents an IPC message for audio output
type OutputIPCMessage struct {
Magic uint32
Type OutputMessageType
@ -43,32 +44,62 @@ type OutputIPCMessage struct {
Data []byte
}
// Implement IPCMessage interface
func (msg *OutputIPCMessage) GetMagic() uint32 {
return msg.Magic
// OutputOptimizedMessage represents a pre-allocated message for zero-allocation operations
type OutputOptimizedMessage struct {
header [17]byte // Pre-allocated header buffer (using constant value since array size must be compile-time constant)
data []byte // Reusable data buffer
}
func (msg *OutputIPCMessage) GetType() uint8 {
return uint8(msg.Type)
// OutputMessagePool manages pre-allocated messages for zero-allocation IPC
type OutputMessagePool struct {
pool chan *OutputOptimizedMessage
}
func (msg *OutputIPCMessage) GetLength() uint32 {
return msg.Length
// NewOutputMessagePool creates a new message pool
func NewOutputMessagePool(size int) *OutputMessagePool {
pool := &OutputMessagePool{
pool: make(chan *OutputOptimizedMessage, size),
}
// Pre-allocate messages
for i := 0; i < size; i++ {
msg := &OutputOptimizedMessage{
data: make([]byte, GetConfig().OutputMaxFrameSize),
}
pool.pool <- msg
}
return pool
}
func (msg *OutputIPCMessage) GetTimestamp() int64 {
return msg.Timestamp
// Get retrieves a message from the pool
func (p *OutputMessagePool) Get() *OutputOptimizedMessage {
select {
case msg := <-p.pool:
return msg
default:
// Pool exhausted, create new message
return &OutputOptimizedMessage{
data: make([]byte, GetConfig().OutputMaxFrameSize),
}
}
}
func (msg *OutputIPCMessage) GetData() []byte {
return msg.Data
// Put returns a message to the pool
func (p *OutputMessagePool) Put(msg *OutputOptimizedMessage) {
select {
case p.pool <- msg:
// Successfully returned to pool
default:
// Pool full, let GC handle it
}
}
// Global shared message pool for output IPC client header reading
var globalOutputClientMessagePool = NewGenericMessagePool(GetConfig().OutputMessagePoolSize)
// Global message pool for output IPC
var globalOutputMessagePool = NewOutputMessagePool(GetConfig().OutputMessagePoolSize)
type AudioOutputServer struct {
// Atomic fields MUST be first for ARM32 alignment (int64 fields need 8-byte alignment)
type AudioServer struct {
// Atomic fields must be first for proper alignment on ARM
bufferSize int64 // Current buffer size (atomic)
droppedFrames int64 // Dropped frames counter (atomic)
totalFrames int64 // Total frames counter (atomic)
@ -91,7 +122,7 @@ type AudioOutputServer struct {
socketBufferConfig SocketBufferConfig
}
func NewAudioOutputServer() (*AudioOutputServer, error) {
func NewAudioServer() (*AudioServer, error) {
socketPath := getOutputSocketPath()
// Remove existing socket if any
os.Remove(socketPath)
@ -120,7 +151,7 @@ func NewAudioOutputServer() (*AudioOutputServer, error) {
// Initialize socket buffer configuration
socketBufferConfig := DefaultSocketBufferConfig()
return &AudioOutputServer{
return &AudioServer{
listener: listener,
messageChan: make(chan *OutputIPCMessage, initialBufferSize),
stopChan: make(chan struct{}),
@ -131,7 +162,7 @@ func NewAudioOutputServer() (*AudioOutputServer, error) {
}, nil
}
func (s *AudioOutputServer) Start() error {
func (s *AudioServer) Start() error {
s.mtx.Lock()
defer s.mtx.Unlock()
@ -159,14 +190,12 @@ func (s *AudioOutputServer) Start() error {
}
// acceptConnections accepts incoming connections
func (s *AudioOutputServer) acceptConnections() {
logger := logging.GetDefaultLogger().With().Str("component", "audio-server").Logger()
func (s *AudioServer) acceptConnections() {
for s.running {
conn, err := s.listener.Accept()
if err != nil {
if s.running {
// Log warning and retry on accept failure
logger.Warn().Err(err).Msg("Failed to accept connection, retrying")
// Only log error if we're still supposed to be running
continue
}
return
@ -175,6 +204,7 @@ func (s *AudioOutputServer) acceptConnections() {
// Configure socket buffers for optimal performance
if err := ConfigureSocketBuffers(conn, s.socketBufferConfig); err != nil {
// Log warning but don't fail - socket buffer optimization is not critical
logger := logging.GetDefaultLogger().With().Str("component", "audio-server").Logger()
logger.Warn().Err(err).Msg("Failed to configure socket buffers, continuing with defaults")
} else {
// Record socket buffer metrics for monitoring
@ -185,7 +215,6 @@ func (s *AudioOutputServer) acceptConnections() {
// Close existing connection if any
if s.conn != nil {
s.conn.Close()
s.conn = nil
}
s.conn = conn
s.mtx.Unlock()
@ -193,7 +222,7 @@ func (s *AudioOutputServer) acceptConnections() {
}
// startProcessorGoroutine starts the message processor
func (s *AudioOutputServer) startProcessorGoroutine() {
func (s *AudioServer) startProcessorGoroutine() {
s.wg.Add(1)
go func() {
defer s.wg.Done()
@ -214,7 +243,7 @@ func (s *AudioOutputServer) startProcessorGoroutine() {
}()
}
func (s *AudioOutputServer) Stop() {
func (s *AudioServer) Stop() {
s.mtx.Lock()
defer s.mtx.Unlock()
@ -242,7 +271,7 @@ func (s *AudioOutputServer) Stop() {
}
}
func (s *AudioOutputServer) Close() error {
func (s *AudioServer) Close() error {
s.Stop()
if s.listener != nil {
s.listener.Close()
@ -252,7 +281,7 @@ func (s *AudioOutputServer) Close() error {
return nil
}
func (s *AudioOutputServer) SendFrame(frame []byte) error {
func (s *AudioServer) SendFrame(frame []byte) error {
maxFrameSize := GetConfig().OutputMaxFrameSize
if len(frame) > maxFrameSize {
return fmt.Errorf("output frame size validation failed: got %d bytes, maximum allowed %d bytes", len(frame), maxFrameSize)
@ -289,10 +318,7 @@ func (s *AudioOutputServer) SendFrame(frame []byte) error {
}
// sendFrameToClient sends frame data directly to the connected client
// Global shared message pool for output IPC server
var globalOutputServerMessagePool = NewGenericMessagePool(GetConfig().OutputMessagePoolSize)
func (s *AudioOutputServer) sendFrameToClient(frame []byte) error {
func (s *AudioServer) sendFrameToClient(frame []byte) error {
s.mtx.Lock()
defer s.mtx.Unlock()
@ -302,55 +328,84 @@ func (s *AudioOutputServer) sendFrameToClient(frame []byte) error {
start := time.Now()
// Create output IPC message
msg := &OutputIPCMessage{
Magic: outputMagicNumber,
Type: OutputMessageTypeOpusFrame,
Length: uint32(len(frame)),
Timestamp: start.UnixNano(),
Data: frame,
}
// Get optimized message from pool
optMsg := globalOutputMessagePool.Get()
defer globalOutputMessagePool.Put(optMsg)
// Use shared WriteIPCMessage function
err := WriteIPCMessage(s.conn, msg, globalOutputServerMessagePool, &s.droppedFrames)
if err != nil {
return err
}
// Prepare header in pre-allocated buffer
binary.LittleEndian.PutUint32(optMsg.header[0:4], outputMagicNumber)
optMsg.header[4] = byte(OutputMessageTypeOpusFrame)
binary.LittleEndian.PutUint32(optMsg.header[5:9], uint32(len(frame)))
binary.LittleEndian.PutUint64(optMsg.header[9:17], uint64(start.UnixNano()))
// Record latency for monitoring
if s.latencyMonitor != nil {
writeLatency := time.Since(start)
s.latencyMonitor.RecordLatency(writeLatency, "ipc_write")
}
// Use non-blocking write with timeout
ctx, cancel := context.WithTimeout(context.Background(), GetConfig().OutputWriteTimeout)
defer cancel()
return nil
// Create a channel to signal write completion
done := make(chan error, 1)
go func() {
// Write header using pre-allocated buffer
_, err := s.conn.Write(optMsg.header[:])
if err != nil {
done <- err
return
}
// Write frame data
if len(frame) > 0 {
_, err = s.conn.Write(frame)
if err != nil {
done <- err
return
}
}
done <- nil
}()
// Wait for completion or timeout
select {
case err := <-done:
if err != nil {
atomic.AddInt64(&s.droppedFrames, 1)
return err
}
// Record latency for monitoring
if s.latencyMonitor != nil {
writeLatency := time.Since(start)
s.latencyMonitor.RecordLatency(writeLatency, "ipc_write")
}
return nil
case <-ctx.Done():
// Timeout occurred - drop frame to prevent blocking
atomic.AddInt64(&s.droppedFrames, 1)
return fmt.Errorf("write timeout after %v - frame dropped to prevent blocking", GetConfig().OutputWriteTimeout)
}
}
// GetServerStats returns server performance statistics
func (s *AudioOutputServer) GetServerStats() (total, dropped int64, bufferSize int64) {
stats := GetFrameStats(&s.totalFrames, &s.droppedFrames)
return stats.Total, stats.Dropped, atomic.LoadInt64(&s.bufferSize)
func (s *AudioServer) GetServerStats() (total, dropped int64, bufferSize int64) {
return atomic.LoadInt64(&s.totalFrames),
atomic.LoadInt64(&s.droppedFrames),
atomic.LoadInt64(&s.bufferSize)
}
type AudioOutputClient struct {
// Atomic fields MUST be first for ARM32 alignment (int64 fields need 8-byte alignment)
type AudioClient struct {
// Atomic fields must be first for proper alignment on ARM
droppedFrames int64 // Atomic counter for dropped frames
totalFrames int64 // Atomic counter for total frames
conn net.Conn
mtx sync.Mutex
running bool
bufferPool *AudioBufferPool // Buffer pool for memory optimization
conn net.Conn
mtx sync.Mutex
running bool
}
func NewAudioOutputClient() *AudioOutputClient {
return &AudioOutputClient{
bufferPool: NewAudioBufferPool(GetMaxAudioFrameSize()),
}
func NewAudioClient() *AudioClient {
return &AudioClient{}
}
// Connect connects to the audio output server
func (c *AudioOutputClient) Connect() error {
func (c *AudioClient) Connect() error {
c.mtx.Lock()
defer c.mtx.Unlock()
@ -382,7 +437,7 @@ func (c *AudioOutputClient) Connect() error {
}
// Disconnect disconnects from the audio output server
func (c *AudioOutputClient) Disconnect() {
func (c *AudioClient) Disconnect() {
c.mtx.Lock()
defer c.mtx.Unlock()
@ -398,18 +453,18 @@ func (c *AudioOutputClient) Disconnect() {
}
// IsConnected returns whether the client is connected
func (c *AudioOutputClient) IsConnected() bool {
func (c *AudioClient) IsConnected() bool {
c.mtx.Lock()
defer c.mtx.Unlock()
return c.running && c.conn != nil
}
func (c *AudioOutputClient) Close() error {
func (c *AudioClient) Close() error {
c.Disconnect()
return nil
}
func (c *AudioOutputClient) ReceiveFrame() ([]byte, error) {
func (c *AudioClient) ReceiveFrame() ([]byte, error) {
c.mtx.Lock()
defer c.mtx.Unlock()
@ -418,8 +473,8 @@ func (c *AudioOutputClient) ReceiveFrame() ([]byte, error) {
}
// Get optimized message from pool for header reading
optMsg := globalOutputClientMessagePool.Get()
defer globalOutputClientMessagePool.Put(optMsg)
optMsg := globalOutputMessagePool.Get()
defer globalOutputMessagePool.Put(optMsg)
// Read header
if _, err := io.ReadFull(c.conn, optMsg.header[:]); err != nil {
@ -443,26 +498,22 @@ func (c *AudioOutputClient) ReceiveFrame() ([]byte, error) {
return nil, fmt.Errorf("received frame size validation failed: got %d bytes, maximum allowed %d bytes", size, maxFrameSize)
}
// Read frame data using buffer pool to avoid allocation
frame := c.bufferPool.Get()
frame = frame[:size] // Resize to actual frame size
// Read frame data
frame := make([]byte, size)
if size > 0 {
if _, err := io.ReadFull(c.conn, frame); err != nil {
c.bufferPool.Put(frame) // Return buffer on error
return nil, fmt.Errorf("failed to read frame data: %w", err)
}
}
// Note: Caller is responsible for returning frame to pool via PutAudioFrameBuffer()
atomic.AddInt64(&c.totalFrames, 1)
return frame, nil
}
// GetClientStats returns client performance statistics
func (c *AudioOutputClient) GetClientStats() (total, dropped int64) {
stats := GetFrameStats(&c.totalFrames, &c.droppedFrames)
return stats.Total, stats.Dropped
func (c *AudioClient) GetClientStats() (total, dropped int64) {
return atomic.LoadInt64(&c.totalFrames),
atomic.LoadInt64(&c.droppedFrames)
}
// Helper functions

View File

@ -1,238 +0,0 @@
package audio
import (
"context"
"encoding/binary"
"fmt"
"net"
"sync"
"sync/atomic"
"time"
)
// Common IPC message interface
type IPCMessage interface {
GetMagic() uint32
GetType() uint8
GetLength() uint32
GetTimestamp() int64
GetData() []byte
}
// Common optimized message structure
type OptimizedMessage struct {
header [17]byte // Pre-allocated header buffer
data []byte // Reusable data buffer
}
// Generic message pool for both input and output
type GenericMessagePool struct {
// 64-bit fields must be first for proper alignment on ARM
hitCount int64 // Pool hit counter (atomic)
missCount int64 // Pool miss counter (atomic)
pool chan *OptimizedMessage
preallocated []*OptimizedMessage // Pre-allocated messages
preallocSize int
maxPoolSize int
mutex sync.RWMutex
}
// NewGenericMessagePool creates a new generic message pool
func NewGenericMessagePool(size int) *GenericMessagePool {
pool := &GenericMessagePool{
pool: make(chan *OptimizedMessage, size),
preallocSize: size / 4, // 25% pre-allocated for immediate use
maxPoolSize: size,
}
// Pre-allocate some messages for immediate use
pool.preallocated = make([]*OptimizedMessage, pool.preallocSize)
for i := 0; i < pool.preallocSize; i++ {
pool.preallocated[i] = &OptimizedMessage{
data: make([]byte, 0, GetConfig().MaxFrameSize),
}
}
// Fill the channel pool
for i := 0; i < size-pool.preallocSize; i++ {
select {
case pool.pool <- &OptimizedMessage{
data: make([]byte, 0, GetConfig().MaxFrameSize),
}:
default:
break
}
}
return pool
}
// Get retrieves an optimized message from the pool
func (mp *GenericMessagePool) Get() *OptimizedMessage {
// Try pre-allocated first (fastest path)
mp.mutex.Lock()
if len(mp.preallocated) > 0 {
msg := mp.preallocated[len(mp.preallocated)-1]
mp.preallocated = mp.preallocated[:len(mp.preallocated)-1]
mp.mutex.Unlock()
atomic.AddInt64(&mp.hitCount, 1)
return msg
}
mp.mutex.Unlock()
// Try channel pool
select {
case msg := <-mp.pool:
atomic.AddInt64(&mp.hitCount, 1)
return msg
default:
// Pool empty, create new message
atomic.AddInt64(&mp.missCount, 1)
return &OptimizedMessage{
data: make([]byte, 0, GetConfig().MaxFrameSize),
}
}
}
// Put returns an optimized message to the pool
func (mp *GenericMessagePool) Put(msg *OptimizedMessage) {
if msg == nil {
return
}
// Reset the message for reuse
msg.data = msg.data[:0]
// Try to return to pre-allocated slice first
mp.mutex.Lock()
if len(mp.preallocated) < mp.preallocSize {
mp.preallocated = append(mp.preallocated, msg)
mp.mutex.Unlock()
return
}
mp.mutex.Unlock()
// Try to return to channel pool
select {
case mp.pool <- msg:
// Successfully returned to pool
default:
// Pool full, let GC handle it
}
}
// GetStats returns pool statistics
func (mp *GenericMessagePool) GetStats() (hitCount, missCount int64, hitRate float64) {
hits := atomic.LoadInt64(&mp.hitCount)
misses := atomic.LoadInt64(&mp.missCount)
total := hits + misses
if total > 0 {
hitRate = float64(hits) / float64(total) * 100
}
return hits, misses, hitRate
}
// Common write message function
func WriteIPCMessage(conn net.Conn, msg IPCMessage, pool *GenericMessagePool, droppedFramesCounter *int64) error {
if conn == nil {
return fmt.Errorf("connection is nil")
}
// Get optimized message from pool for header preparation
optMsg := pool.Get()
defer pool.Put(optMsg)
// Prepare header in pre-allocated buffer
binary.LittleEndian.PutUint32(optMsg.header[0:4], msg.GetMagic())
optMsg.header[4] = msg.GetType()
binary.LittleEndian.PutUint32(optMsg.header[5:9], msg.GetLength())
binary.LittleEndian.PutUint64(optMsg.header[9:17], uint64(msg.GetTimestamp()))
// Use non-blocking write with timeout
ctx, cancel := context.WithTimeout(context.Background(), GetConfig().WriteTimeout)
defer cancel()
// Create a channel to signal write completion
done := make(chan error, 1)
go func() {
// Write header using pre-allocated buffer
_, err := conn.Write(optMsg.header[:])
if err != nil {
done <- err
return
}
// Write data if present
if msg.GetLength() > 0 && msg.GetData() != nil {
_, err = conn.Write(msg.GetData())
if err != nil {
done <- err
return
}
}
done <- nil
}()
// Wait for completion or timeout
select {
case err := <-done:
if err != nil {
if droppedFramesCounter != nil {
atomic.AddInt64(droppedFramesCounter, 1)
}
return err
}
return nil
case <-ctx.Done():
// Timeout occurred - drop frame to prevent blocking
if droppedFramesCounter != nil {
atomic.AddInt64(droppedFramesCounter, 1)
}
return fmt.Errorf("write timeout - frame dropped")
}
}
// Common connection acceptance with retry logic
func AcceptConnectionWithRetry(listener net.Listener, maxRetries int, retryDelay time.Duration) (net.Conn, error) {
var lastErr error
for i := 0; i < maxRetries; i++ {
conn, err := listener.Accept()
if err == nil {
return conn, nil
}
lastErr = err
if i < maxRetries-1 {
time.Sleep(retryDelay)
}
}
return nil, fmt.Errorf("failed to accept connection after %d retries: %w", maxRetries, lastErr)
}
// Common frame statistics structure
type FrameStats struct {
Total int64
Dropped int64
}
// GetFrameStats safely retrieves frame statistics
func GetFrameStats(totalCounter, droppedCounter *int64) FrameStats {
return FrameStats{
Total: atomic.LoadInt64(totalCounter),
Dropped: atomic.LoadInt64(droppedCounter),
}
}
// CalculateDropRate calculates the drop rate percentage
func CalculateDropRate(stats FrameStats) float64 {
if stats.Total == 0 {
return 0.0
}
return float64(stats.Dropped) / float64(stats.Total) * 100.0
}
// ResetFrameStats resets frame counters
func ResetFrameStats(totalCounter, droppedCounter *int64) {
atomic.StoreInt64(totalCounter, 0)
atomic.StoreInt64(droppedCounter, 0)
}

View File

@ -301,45 +301,8 @@ var (
micConnectionDropsValue int64
)
// UnifiedAudioMetrics provides a common structure for both input and output audio streams
type UnifiedAudioMetrics struct {
FramesReceived int64 `json:"frames_received"`
FramesDropped int64 `json:"frames_dropped"`
FramesSent int64 `json:"frames_sent,omitempty"`
BytesProcessed int64 `json:"bytes_processed"`
ConnectionDrops int64 `json:"connection_drops"`
LastFrameTime time.Time `json:"last_frame_time"`
AverageLatency time.Duration `json:"average_latency"`
}
// convertAudioMetricsToUnified converts AudioMetrics to UnifiedAudioMetrics
func convertAudioMetricsToUnified(metrics AudioMetrics) UnifiedAudioMetrics {
return UnifiedAudioMetrics{
FramesReceived: metrics.FramesReceived,
FramesDropped: metrics.FramesDropped,
FramesSent: 0, // AudioMetrics doesn't have FramesSent
BytesProcessed: metrics.BytesProcessed,
ConnectionDrops: metrics.ConnectionDrops,
LastFrameTime: metrics.LastFrameTime,
AverageLatency: metrics.AverageLatency,
}
}
// convertAudioInputMetricsToUnified converts AudioInputMetrics to UnifiedAudioMetrics
func convertAudioInputMetricsToUnified(metrics AudioInputMetrics) UnifiedAudioMetrics {
return UnifiedAudioMetrics{
FramesReceived: 0, // AudioInputMetrics doesn't have FramesReceived
FramesDropped: metrics.FramesDropped,
FramesSent: metrics.FramesSent,
BytesProcessed: metrics.BytesProcessed,
ConnectionDrops: metrics.ConnectionDrops,
LastFrameTime: metrics.LastFrameTime,
AverageLatency: metrics.AverageLatency,
}
}
// UpdateAudioMetrics updates Prometheus metrics with current audio data
func UpdateAudioMetrics(metrics UnifiedAudioMetrics) {
func UpdateAudioMetrics(metrics AudioMetrics) {
oldReceived := atomic.SwapInt64(&audioFramesReceivedValue, metrics.FramesReceived)
if metrics.FramesReceived > oldReceived {
audioFramesReceivedTotal.Add(float64(metrics.FramesReceived - oldReceived))
@ -370,7 +333,7 @@ func UpdateAudioMetrics(metrics UnifiedAudioMetrics) {
}
// UpdateMicrophoneMetrics updates Prometheus metrics with current microphone data
func UpdateMicrophoneMetrics(metrics UnifiedAudioMetrics) {
func UpdateMicrophoneMetrics(metrics AudioInputMetrics) {
oldSent := atomic.SwapInt64(&micFramesSentValue, metrics.FramesSent)
if metrics.FramesSent > oldSent {
microphoneFramesSentTotal.Add(float64(metrics.FramesSent - oldSent))
@ -494,11 +457,11 @@ func StartMetricsUpdater() {
for range ticker.C {
// Update audio output metrics
audioMetrics := GetAudioMetrics()
UpdateAudioMetrics(convertAudioMetricsToUnified(audioMetrics))
UpdateAudioMetrics(audioMetrics)
// Update microphone input metrics
micMetrics := GetAudioInputMetrics()
UpdateMicrophoneMetrics(convertAudioInputMetricsToUnified(micMetrics))
UpdateMicrophoneMetrics(micMetrics)
// Update microphone subprocess process metrics
if inputSupervisor := GetAudioInputIPCSupervisor(); inputSupervisor != nil {

View File

@ -1,120 +0,0 @@
package audio
import "time"
// Naming Standards Documentation
// This file documents the standardized naming conventions for audio components
// to ensure consistency across the entire audio system.
/*
STANDARDIZED NAMING CONVENTIONS:
1. COMPONENT HIERARCHY:
- Manager: High-level component that orchestrates multiple subsystems
- Supervisor: Process lifecycle management (start/stop/restart processes)
- Server: IPC server that handles incoming connections
- Client: IPC client that connects to servers
- Streamer: High-performance streaming component
2. NAMING PATTERNS:
Input Components:
- AudioInputManager (replaces: AudioInputManager)
- AudioInputSupervisor (replaces: AudioInputSupervisor)
- AudioInputServer (replaces: AudioInputServer)
- AudioInputClient (replaces: AudioInputClient)
- AudioInputStreamer (new: for consistency with OutputStreamer)
Output Components:
- AudioOutputManager (new: missing high-level manager)
- AudioOutputSupervisor (replaces: AudioOutputSupervisor)
- AudioOutputServer (replaces: AudioOutputServer)
- AudioOutputClient (replaces: AudioOutputClient)
- AudioOutputStreamer (replaces: OutputStreamer)
3. IPC NAMING:
- AudioInputIPCManager (replaces: AudioInputIPCManager)
- AudioOutputIPCManager (new: for consistency)
4. CONFIGURATION NAMING:
- InputIPCConfig (replaces: InputIPCConfig)
- OutputIPCConfig (new: for consistency)
5. MESSAGE NAMING:
- InputIPCMessage (replaces: InputIPCMessage)
- OutputIPCMessage (replaces: OutputIPCMessage)
- InputMessageType (replaces: InputMessageType)
- OutputMessageType (replaces: OutputMessageType)
ISSUES IDENTIFIED:
1. Missing AudioOutputManager (high-level output management)
2. Inconsistent naming: OutputStreamer vs AudioInputSupervisor
3. Missing AudioOutputIPCManager for symmetry
4. Missing OutputIPCConfig for consistency
5. Component names in logging should be standardized
IMPLEMENTATION PLAN:
1. Create AudioOutputManager to match AudioInputManager
2. Rename OutputStreamer to AudioOutputStreamer
3. Create AudioOutputIPCManager for symmetry
4. Standardize all component logging names
5. Update all references consistently
*/
// Component name constants for consistent logging
const (
// Input component names
AudioInputManagerComponent = "audio-input-manager"
AudioInputSupervisorComponent = "audio-input-supervisor"
AudioInputServerComponent = "audio-input-server"
AudioInputClientComponent = "audio-input-client"
AudioInputIPCComponent = "audio-input-ipc"
// Output component names
AudioOutputManagerComponent = "audio-output-manager"
AudioOutputSupervisorComponent = "audio-output-supervisor"
AudioOutputServerComponent = "audio-output-server"
AudioOutputClientComponent = "audio-output-client"
AudioOutputStreamerComponent = "audio-output-streamer"
AudioOutputIPCComponent = "audio-output-ipc"
// Common component names
AudioRelayComponent = "audio-relay"
AudioEventsComponent = "audio-events"
AudioMetricsComponent = "audio-metrics"
)
// Interface definitions for consistent component behavior
type AudioManagerInterface interface {
Start() error
Stop()
IsRunning() bool
IsReady() bool
GetMetrics() interface{}
}
type AudioSupervisorInterface interface {
Start() error
Stop() error
IsRunning() bool
GetProcessPID() int
GetProcessMetrics() *ProcessMetrics
}
type AudioServerInterface interface {
Start() error
Stop()
Close() error
}
type AudioClientInterface interface {
Connect() error
Disconnect()
IsConnected() bool
Close() error
}
type AudioStreamerInterface interface {
Start() error
Stop()
GetStats() (processed, dropped int64, avgProcessingTime time.Duration)
}

View File

@ -1,177 +0,0 @@
package audio
import (
"sync/atomic"
"time"
"github.com/jetkvm/kvm/internal/logging"
"github.com/rs/zerolog"
)
// AudioOutputManager manages audio output stream using IPC mode
type AudioOutputManager struct {
metrics AudioOutputMetrics
streamer *AudioOutputStreamer
logger zerolog.Logger
running int32
}
// AudioOutputMetrics tracks output-specific metrics
type AudioOutputMetrics struct {
FramesReceived int64
FramesDropped int64
BytesProcessed int64
ConnectionDrops int64
LastFrameTime time.Time
AverageLatency time.Duration
}
// NewAudioOutputManager creates a new audio output manager
func NewAudioOutputManager() *AudioOutputManager {
streamer, err := NewAudioOutputStreamer()
if err != nil {
// Log error but continue with nil streamer - will be handled gracefully
logger := logging.GetDefaultLogger().With().Str("component", AudioOutputManagerComponent).Logger()
logger.Error().Err(err).Msg("Failed to create audio output streamer")
}
return &AudioOutputManager{
streamer: streamer,
logger: logging.GetDefaultLogger().With().Str("component", AudioOutputManagerComponent).Logger(),
}
}
// Start starts the audio output manager
func (aom *AudioOutputManager) Start() error {
if !atomic.CompareAndSwapInt32(&aom.running, 0, 1) {
return nil // Already running
}
aom.logger.Info().Str("component", AudioOutputManagerComponent).Msg("starting component")
if aom.streamer == nil {
// Try to recreate streamer if it was nil
streamer, err := NewAudioOutputStreamer()
if err != nil {
atomic.StoreInt32(&aom.running, 0)
aom.logger.Error().Err(err).Str("component", AudioOutputManagerComponent).Msg("failed to create audio output streamer")
return err
}
aom.streamer = streamer
}
err := aom.streamer.Start()
if err != nil {
atomic.StoreInt32(&aom.running, 0)
// Reset metrics on failed start
aom.resetMetrics()
aom.logger.Error().Err(err).Str("component", AudioOutputManagerComponent).Msg("failed to start component")
return err
}
aom.logger.Info().Str("component", AudioOutputManagerComponent).Msg("component started successfully")
return nil
}
// Stop stops the audio output manager
func (aom *AudioOutputManager) Stop() {
if !atomic.CompareAndSwapInt32(&aom.running, 1, 0) {
return // Already stopped
}
aom.logger.Info().Str("component", AudioOutputManagerComponent).Msg("stopping component")
if aom.streamer != nil {
aom.streamer.Stop()
}
aom.logger.Info().Str("component", AudioOutputManagerComponent).Msg("component stopped")
}
// resetMetrics resets all metrics to zero
func (aom *AudioOutputManager) resetMetrics() {
atomic.StoreInt64(&aom.metrics.FramesReceived, 0)
atomic.StoreInt64(&aom.metrics.FramesDropped, 0)
atomic.StoreInt64(&aom.metrics.BytesProcessed, 0)
atomic.StoreInt64(&aom.metrics.ConnectionDrops, 0)
}
// IsRunning returns whether the audio output manager is running
func (aom *AudioOutputManager) IsRunning() bool {
return atomic.LoadInt32(&aom.running) == 1
}
// IsReady returns whether the audio output manager is ready to receive frames
func (aom *AudioOutputManager) IsReady() bool {
if !aom.IsRunning() || aom.streamer == nil {
return false
}
// For output, we consider it ready if the streamer is running
// This could be enhanced with connection status checks
return true
}
// GetMetrics returns current metrics
func (aom *AudioOutputManager) GetMetrics() AudioOutputMetrics {
return AudioOutputMetrics{
FramesReceived: atomic.LoadInt64(&aom.metrics.FramesReceived),
FramesDropped: atomic.LoadInt64(&aom.metrics.FramesDropped),
BytesProcessed: atomic.LoadInt64(&aom.metrics.BytesProcessed),
ConnectionDrops: atomic.LoadInt64(&aom.metrics.ConnectionDrops),
AverageLatency: aom.metrics.AverageLatency,
LastFrameTime: aom.metrics.LastFrameTime,
}
}
// GetComprehensiveMetrics returns detailed performance metrics
func (aom *AudioOutputManager) GetComprehensiveMetrics() map[string]interface{} {
baseMetrics := aom.GetMetrics()
comprehensiveMetrics := map[string]interface{}{
"manager": map[string]interface{}{
"frames_received": baseMetrics.FramesReceived,
"frames_dropped": baseMetrics.FramesDropped,
"bytes_processed": baseMetrics.BytesProcessed,
"connection_drops": baseMetrics.ConnectionDrops,
"average_latency_ms": float64(baseMetrics.AverageLatency.Nanoseconds()) / 1e6,
"last_frame_time": baseMetrics.LastFrameTime,
"running": aom.IsRunning(),
"ready": aom.IsReady(),
},
}
if aom.streamer != nil {
processed, dropped, avgTime := aom.streamer.GetStats()
comprehensiveMetrics["streamer"] = map[string]interface{}{
"frames_processed": processed,
"frames_dropped": dropped,
"avg_processing_time_ms": float64(avgTime.Nanoseconds()) / 1e6,
}
if detailedStats := aom.streamer.GetDetailedStats(); detailedStats != nil {
comprehensiveMetrics["detailed"] = detailedStats
}
}
return comprehensiveMetrics
}
// LogPerformanceStats logs current performance statistics
func (aom *AudioOutputManager) LogPerformanceStats() {
metrics := aom.GetMetrics()
aom.logger.Info().
Int64("frames_received", metrics.FramesReceived).
Int64("frames_dropped", metrics.FramesDropped).
Int64("bytes_processed", metrics.BytesProcessed).
Int64("connection_drops", metrics.ConnectionDrops).
Float64("average_latency_ms", float64(metrics.AverageLatency.Nanoseconds())/1e6).
Bool("running", aom.IsRunning()).
Bool("ready", aom.IsReady()).
Msg("Audio output manager performance stats")
}
// GetStreamer returns the streamer for advanced operations
func (aom *AudioOutputManager) GetStreamer() *AudioOutputStreamer {
return aom.streamer
}

View File

@ -1,277 +0,0 @@
package audio
import (
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestAudioOutputManager tests the AudioOutputManager component
func TestAudioOutputManager(t *testing.T) {
tests := []struct {
name string
testFunc func(t *testing.T)
}{
{"Start", testAudioOutputManagerStart},
{"Stop", testAudioOutputManagerStop},
{"StartStop", testAudioOutputManagerStartStop},
{"IsRunning", testAudioOutputManagerIsRunning},
{"IsReady", testAudioOutputManagerIsReady},
{"GetMetrics", testAudioOutputManagerGetMetrics},
{"ConcurrentOperations", testAudioOutputManagerConcurrent},
{"MultipleStarts", testAudioOutputManagerMultipleStarts},
{"MultipleStops", testAudioOutputManagerMultipleStops},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tt.testFunc(t)
})
}
}
func testAudioOutputManagerStart(t *testing.T) {
manager := NewAudioOutputManager()
require.NotNil(t, manager)
// Test initial state
assert.False(t, manager.IsRunning())
assert.False(t, manager.IsReady())
// Test start
err := manager.Start()
assert.NoError(t, err)
assert.True(t, manager.IsRunning())
// Cleanup
manager.Stop()
}
func testAudioOutputManagerStop(t *testing.T) {
manager := NewAudioOutputManager()
require.NotNil(t, manager)
// Start first
err := manager.Start()
require.NoError(t, err)
assert.True(t, manager.IsRunning())
// Test stop
manager.Stop()
assert.False(t, manager.IsRunning())
assert.False(t, manager.IsReady())
}
func testAudioOutputManagerStartStop(t *testing.T) {
manager := NewAudioOutputManager()
require.NotNil(t, manager)
// Test multiple start/stop cycles
for i := 0; i < 3; i++ {
// Start
err := manager.Start()
assert.NoError(t, err)
assert.True(t, manager.IsRunning())
// Stop
manager.Stop()
assert.False(t, manager.IsRunning())
}
}
func testAudioOutputManagerIsRunning(t *testing.T) {
manager := NewAudioOutputManager()
require.NotNil(t, manager)
// Initially not running
assert.False(t, manager.IsRunning())
// Start and check
err := manager.Start()
require.NoError(t, err)
assert.True(t, manager.IsRunning())
// Stop and check
manager.Stop()
assert.False(t, manager.IsRunning())
}
func testAudioOutputManagerIsReady(t *testing.T) {
manager := NewAudioOutputManager()
require.NotNil(t, manager)
// Initially not ready
assert.False(t, manager.IsReady())
// Start and check ready state
err := manager.Start()
require.NoError(t, err)
// Give some time for initialization
time.Sleep(100 * time.Millisecond)
// Stop
manager.Stop()
assert.False(t, manager.IsReady())
}
func testAudioOutputManagerGetMetrics(t *testing.T) {
manager := NewAudioOutputManager()
require.NotNil(t, manager)
// Test metrics when not running
metrics := manager.GetMetrics()
assert.NotNil(t, metrics)
// Start and test metrics
err := manager.Start()
require.NoError(t, err)
metrics = manager.GetMetrics()
assert.NotNil(t, metrics)
// Cleanup
manager.Stop()
}
func testAudioOutputManagerConcurrent(t *testing.T) {
manager := NewAudioOutputManager()
require.NotNil(t, manager)
var wg sync.WaitGroup
const numGoroutines = 10
// Test concurrent starts
wg.Add(numGoroutines)
for i := 0; i < numGoroutines; i++ {
go func() {
defer wg.Done()
manager.Start()
}()
}
wg.Wait()
// Should be running
assert.True(t, manager.IsRunning())
// Test concurrent stops
wg.Add(numGoroutines)
for i := 0; i < numGoroutines; i++ {
go func() {
defer wg.Done()
manager.Stop()
}()
}
wg.Wait()
// Should be stopped
assert.False(t, manager.IsRunning())
}
func testAudioOutputManagerMultipleStarts(t *testing.T) {
manager := NewAudioOutputManager()
require.NotNil(t, manager)
// First start should succeed
err := manager.Start()
assert.NoError(t, err)
assert.True(t, manager.IsRunning())
// Subsequent starts should be no-op
err = manager.Start()
assert.NoError(t, err)
assert.True(t, manager.IsRunning())
err = manager.Start()
assert.NoError(t, err)
assert.True(t, manager.IsRunning())
// Cleanup
manager.Stop()
}
func testAudioOutputManagerMultipleStops(t *testing.T) {
manager := NewAudioOutputManager()
require.NotNil(t, manager)
// Start first
err := manager.Start()
require.NoError(t, err)
assert.True(t, manager.IsRunning())
// First stop should work
manager.Stop()
assert.False(t, manager.IsRunning())
// Subsequent stops should be no-op
manager.Stop()
assert.False(t, manager.IsRunning())
manager.Stop()
assert.False(t, manager.IsRunning())
}
// TestAudioOutputMetrics tests the AudioOutputMetrics functionality
func TestAudioOutputMetrics(t *testing.T) {
metrics := &AudioOutputMetrics{}
// Test initial state
assert.Equal(t, int64(0), metrics.FramesReceived)
assert.Equal(t, int64(0), metrics.FramesDropped)
assert.Equal(t, int64(0), metrics.BytesProcessed)
assert.Equal(t, int64(0), metrics.ConnectionDrops)
assert.Equal(t, time.Duration(0), metrics.AverageLatency)
assert.True(t, metrics.LastFrameTime.IsZero())
// Test field assignment
metrics.FramesReceived = 100
metrics.FramesDropped = 5
metrics.BytesProcessed = 1024
metrics.ConnectionDrops = 2
metrics.AverageLatency = 10 * time.Millisecond
metrics.LastFrameTime = time.Now()
// Verify assignments
assert.Equal(t, int64(100), metrics.FramesReceived)
assert.Equal(t, int64(5), metrics.FramesDropped)
assert.Equal(t, int64(1024), metrics.BytesProcessed)
assert.Equal(t, int64(2), metrics.ConnectionDrops)
assert.Equal(t, 10*time.Millisecond, metrics.AverageLatency)
assert.False(t, metrics.LastFrameTime.IsZero())
}
// BenchmarkAudioOutputManager benchmarks the AudioOutputManager operations
func BenchmarkAudioOutputManager(b *testing.B) {
b.Run("Start", func(b *testing.B) {
for i := 0; i < b.N; i++ {
manager := NewAudioOutputManager()
manager.Start()
manager.Stop()
}
})
b.Run("IsRunning", func(b *testing.B) {
manager := NewAudioOutputManager()
manager.Start()
defer manager.Stop()
b.ResetTimer()
for i := 0; i < b.N; i++ {
manager.IsRunning()
}
})
b.Run("GetMetrics", func(b *testing.B) {
manager := NewAudioOutputManager()
manager.Start()
defer manager.Stop()
b.ResetTimer()
for i := 0; i < b.N; i++ {
manager.GetMetrics()
}
})
}

View File

@ -17,7 +17,7 @@ func RunAudioOutputServer() error {
logger.Info().Msg("Starting audio output server subprocess")
// Create audio server
server, err := NewAudioOutputServer()
server, err := NewAudioServer()
if err != nil {
logger.Error().Err(err).Msg("failed to create audio server")
return err

View File

@ -12,24 +12,23 @@ import (
"github.com/rs/zerolog"
)
// AudioOutputStreamer manages high-performance audio output streaming
type AudioOutputStreamer struct {
// Performance metrics (atomic operations for thread safety)
// OutputStreamer manages high-performance audio output streaming
type OutputStreamer struct {
// Atomic fields must be first for proper alignment on ARM
processedFrames int64 // Total processed frames counter (atomic)
droppedFrames int64 // Dropped frames counter (atomic)
processingTime int64 // Average processing time in nanoseconds (atomic)
lastStatsTime int64 // Last statistics update time (atomic)
client *AudioOutputClient
client *AudioClient
bufferPool *AudioBufferPool
ctx context.Context
cancel context.CancelFunc
wg sync.WaitGroup
running bool
mtx sync.Mutex
chanClosed bool // Track if processing channel is closed
// Adaptive processing configuration
// Performance optimization fields
batchSize int // Adaptive batch size for frame processing
processingChan chan []byte // Buffered channel for frame processing
statsInterval time.Duration // Statistics reporting interval
@ -43,21 +42,21 @@ var (
func getOutputStreamingLogger() *zerolog.Logger {
if outputStreamingLogger == nil {
logger := logging.GetDefaultLogger().With().Str("component", AudioOutputStreamerComponent).Logger()
logger := logging.GetDefaultLogger().With().Str("component", "audio-output").Logger()
outputStreamingLogger = &logger
}
return outputStreamingLogger
}
func NewAudioOutputStreamer() (*AudioOutputStreamer, error) {
client := NewAudioOutputClient()
func NewOutputStreamer() (*OutputStreamer, error) {
client := NewAudioClient()
// Get initial batch size from adaptive buffer manager
adaptiveManager := GetAdaptiveBufferManager()
initialBatchSize := adaptiveManager.GetOutputBufferSize()
ctx, cancel := context.WithCancel(context.Background())
return &AudioOutputStreamer{
return &OutputStreamer{
client: client,
bufferPool: NewAudioBufferPool(GetMaxAudioFrameSize()), // Use existing buffer pool
ctx: ctx,
@ -69,7 +68,7 @@ func NewAudioOutputStreamer() (*AudioOutputStreamer, error) {
}, nil
}
func (s *AudioOutputStreamer) Start() error {
func (s *OutputStreamer) Start() error {
s.mtx.Lock()
defer s.mtx.Unlock()
@ -93,7 +92,7 @@ func (s *AudioOutputStreamer) Start() error {
return nil
}
func (s *AudioOutputStreamer) Stop() {
func (s *OutputStreamer) Stop() {
s.mtx.Lock()
defer s.mtx.Unlock()
@ -104,11 +103,8 @@ func (s *AudioOutputStreamer) Stop() {
s.running = false
s.cancel()
// Close processing channel to signal goroutines (only if not already closed)
if !s.chanClosed {
close(s.processingChan)
s.chanClosed = true
}
// Close processing channel to signal goroutines
close(s.processingChan)
// Wait for all goroutines to finish
s.wg.Wait()
@ -118,7 +114,7 @@ func (s *AudioOutputStreamer) Stop() {
}
}
func (s *AudioOutputStreamer) streamLoop() {
func (s *OutputStreamer) streamLoop() {
defer s.wg.Done()
// Pin goroutine to OS thread for consistent performance
@ -157,9 +153,7 @@ func (s *AudioOutputStreamer) streamLoop() {
if n > 0 {
// Send frame for processing (non-blocking)
// Use buffer pool to avoid allocation
frameData := s.bufferPool.Get()
frameData = frameData[:n]
frameData := make([]byte, n)
copy(frameData, frameBuf[:n])
select {
@ -181,7 +175,7 @@ func (s *AudioOutputStreamer) streamLoop() {
}
// processingLoop handles frame processing in a separate goroutine
func (s *AudioOutputStreamer) processingLoop() {
func (s *OutputStreamer) processingLoop() {
defer s.wg.Done()
// Pin goroutine to OS thread for consistent performance
@ -198,29 +192,25 @@ func (s *AudioOutputStreamer) processingLoop() {
}
}()
for frameData := range s.processingChan {
// Process frame and return buffer to pool after processing
func() {
defer s.bufferPool.Put(frameData)
if _, err := s.client.ReceiveFrame(); err != nil {
if s.client.IsConnected() {
getOutputStreamingLogger().Warn().Err(err).Msg("Error reading audio frame from output server")
atomic.AddInt64(&s.droppedFrames, 1)
}
// Try to reconnect if disconnected
if !s.client.IsConnected() {
if err := s.client.Connect(); err != nil {
getOutputStreamingLogger().Warn().Err(err).Msg("Failed to reconnect")
}
for range s.processingChan {
// Process frame (currently just receiving, but can be extended)
if _, err := s.client.ReceiveFrame(); err != nil {
if s.client.IsConnected() {
getOutputStreamingLogger().Warn().Err(err).Msg("Error reading audio frame from output server")
atomic.AddInt64(&s.droppedFrames, 1)
}
// Try to reconnect if disconnected
if !s.client.IsConnected() {
if err := s.client.Connect(); err != nil {
getOutputStreamingLogger().Warn().Err(err).Msg("Failed to reconnect")
}
}
}()
}
}
}
// statisticsLoop monitors and reports performance statistics
func (s *AudioOutputStreamer) statisticsLoop() {
func (s *OutputStreamer) statisticsLoop() {
defer s.wg.Done()
ticker := time.NewTicker(s.statsInterval)
@ -237,7 +227,7 @@ func (s *AudioOutputStreamer) statisticsLoop() {
}
// reportStatistics logs current performance statistics
func (s *AudioOutputStreamer) reportStatistics() {
func (s *OutputStreamer) reportStatistics() {
processed := atomic.LoadInt64(&s.processedFrames)
dropped := atomic.LoadInt64(&s.droppedFrames)
processingTime := atomic.LoadInt64(&s.processingTime)
@ -255,7 +245,7 @@ func (s *AudioOutputStreamer) reportStatistics() {
}
// GetStats returns streaming statistics
func (s *AudioOutputStreamer) GetStats() (processed, dropped int64, avgProcessingTime time.Duration) {
func (s *OutputStreamer) GetStats() (processed, dropped int64, avgProcessingTime time.Duration) {
processed = atomic.LoadInt64(&s.processedFrames)
dropped = atomic.LoadInt64(&s.droppedFrames)
processingTimeNs := atomic.LoadInt64(&s.processingTime)
@ -264,7 +254,7 @@ func (s *AudioOutputStreamer) GetStats() (processed, dropped int64, avgProcessin
}
// GetDetailedStats returns comprehensive streaming statistics
func (s *AudioOutputStreamer) GetDetailedStats() map[string]interface{} {
func (s *OutputStreamer) GetDetailedStats() map[string]interface{} {
processed := atomic.LoadInt64(&s.processedFrames)
dropped := atomic.LoadInt64(&s.droppedFrames)
processingTime := atomic.LoadInt64(&s.processingTime)
@ -292,7 +282,7 @@ func (s *AudioOutputStreamer) GetDetailedStats() map[string]interface{} {
}
// UpdateBatchSize updates the batch size from adaptive buffer manager
func (s *AudioOutputStreamer) UpdateBatchSize() {
func (s *OutputStreamer) UpdateBatchSize() {
s.mtx.Lock()
adaptiveManager := GetAdaptiveBufferManager()
s.batchSize = adaptiveManager.GetOutputBufferSize()
@ -300,7 +290,7 @@ func (s *AudioOutputStreamer) UpdateBatchSize() {
}
// ReportLatency reports processing latency to adaptive buffer manager
func (s *AudioOutputStreamer) ReportLatency(latency time.Duration) {
func (s *OutputStreamer) ReportLatency(latency time.Duration) {
adaptiveManager := GetAdaptiveBufferManager()
adaptiveManager.UpdateLatency(latency)
}
@ -331,61 +321,17 @@ func StartAudioOutputStreaming(send func([]byte)) error {
getOutputStreamingLogger().Info().Str("socket_path", getOutputSocketPath()).Msg("Audio output streaming started, connected to output server")
buffer := make([]byte, GetMaxAudioFrameSize())
consecutiveErrors := 0
maxConsecutiveErrors := GetConfig().MaxConsecutiveErrors
errorBackoffDelay := GetConfig().RetryDelay
maxErrorBackoff := GetConfig().MaxRetryDelay
for {
select {
case <-ctx.Done():
return
default:
// Capture audio frame with enhanced error handling
// Capture audio frame
n, err := CGOAudioReadEncode(buffer)
if err != nil {
consecutiveErrors++
getOutputStreamingLogger().Warn().
Err(err).
Int("consecutive_errors", consecutiveErrors).
Msg("Failed to read/encode audio")
// Implement progressive backoff for consecutive errors
if consecutiveErrors >= maxConsecutiveErrors {
getOutputStreamingLogger().Error().
Int("consecutive_errors", consecutiveErrors).
Msg("Too many consecutive audio errors, attempting recovery")
// Try to reinitialize audio system
CGOAudioClose()
time.Sleep(errorBackoffDelay)
if initErr := CGOAudioInit(); initErr != nil {
getOutputStreamingLogger().Error().
Err(initErr).
Msg("Failed to reinitialize audio system")
// Exponential backoff for reinitialization failures
errorBackoffDelay = time.Duration(float64(errorBackoffDelay) * GetConfig().BackoffMultiplier)
if errorBackoffDelay > maxErrorBackoff {
errorBackoffDelay = maxErrorBackoff
}
} else {
getOutputStreamingLogger().Info().Msg("Audio system reinitialized successfully")
consecutiveErrors = 0
errorBackoffDelay = GetConfig().RetryDelay // Reset backoff
}
} else {
// Brief delay for transient errors
time.Sleep(GetConfig().ShortSleepDuration)
}
getOutputStreamingLogger().Warn().Err(err).Msg("Failed to read/encode audio")
continue
}
// Success - reset error counters
if consecutiveErrors > 0 {
consecutiveErrors = 0
errorBackoffDelay = GetConfig().RetryDelay
}
if n > 0 {
// Get frame buffer from pool to reduce allocations
frame := GetAudioFrameBuffer()

View File

@ -1,341 +0,0 @@
package audio
import (
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestAudioOutputStreamer tests the AudioOutputStreamer component
func TestAudioOutputStreamer(t *testing.T) {
tests := []struct {
name string
testFunc func(t *testing.T)
}{
{"NewAudioOutputStreamer", testNewAudioOutputStreamer},
{"Start", testAudioOutputStreamerStart},
{"Stop", testAudioOutputStreamerStop},
{"StartStop", testAudioOutputStreamerStartStop},
{"GetStats", testAudioOutputStreamerGetStats},
{"GetDetailedStats", testAudioOutputStreamerGetDetailedStats},
{"UpdateBatchSize", testAudioOutputStreamerUpdateBatchSize},
{"ReportLatency", testAudioOutputStreamerReportLatency},
{"ConcurrentOperations", testAudioOutputStreamerConcurrent},
{"MultipleStarts", testAudioOutputStreamerMultipleStarts},
{"MultipleStops", testAudioOutputStreamerMultipleStops},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tt.testFunc(t)
})
}
}
func testNewAudioOutputStreamer(t *testing.T) {
streamer, err := NewAudioOutputStreamer()
if err != nil {
// If creation fails due to missing dependencies, skip the test
t.Skipf("Skipping test due to missing dependencies: %v", err)
return
}
require.NotNil(t, streamer)
// Test initial state
processed, dropped, avgTime := streamer.GetStats()
assert.GreaterOrEqual(t, processed, int64(0))
assert.GreaterOrEqual(t, dropped, int64(0))
assert.GreaterOrEqual(t, avgTime, time.Duration(0))
// Cleanup
streamer.Stop()
}
func testAudioOutputStreamerStart(t *testing.T) {
streamer, err := NewAudioOutputStreamer()
if err != nil {
t.Skipf("Skipping test due to missing dependencies: %v", err)
return
}
require.NotNil(t, streamer)
// Test start
err = streamer.Start()
assert.NoError(t, err)
// Cleanup
streamer.Stop()
}
func testAudioOutputStreamerStop(t *testing.T) {
streamer, err := NewAudioOutputStreamer()
if err != nil {
t.Skipf("Skipping test due to missing dependencies: %v", err)
return
}
require.NotNil(t, streamer)
// Start first
err = streamer.Start()
require.NoError(t, err)
// Test stop
streamer.Stop()
// Multiple stops should be safe
streamer.Stop()
streamer.Stop()
}
func testAudioOutputStreamerStartStop(t *testing.T) {
streamer, err := NewAudioOutputStreamer()
if err != nil {
t.Skipf("Skipping test due to missing dependencies: %v", err)
return
}
require.NotNil(t, streamer)
// Test multiple start/stop cycles
for i := 0; i < 3; i++ {
// Start
err = streamer.Start()
assert.NoError(t, err)
// Stop
streamer.Stop()
}
}
func testAudioOutputStreamerGetStats(t *testing.T) {
streamer, err := NewAudioOutputStreamer()
if err != nil {
t.Skipf("Skipping test due to missing dependencies: %v", err)
return
}
require.NotNil(t, streamer)
// Test stats when not running
processed, dropped, avgTime := streamer.GetStats()
assert.Equal(t, int64(0), processed)
assert.Equal(t, int64(0), dropped)
assert.GreaterOrEqual(t, avgTime, time.Duration(0))
// Start and test stats
err = streamer.Start()
require.NoError(t, err)
processed, dropped, avgTime = streamer.GetStats()
assert.GreaterOrEqual(t, processed, int64(0))
assert.GreaterOrEqual(t, dropped, int64(0))
assert.GreaterOrEqual(t, avgTime, time.Duration(0))
// Cleanup
streamer.Stop()
}
func testAudioOutputStreamerGetDetailedStats(t *testing.T) {
streamer, err := NewAudioOutputStreamer()
if err != nil {
t.Skipf("Skipping test due to missing dependencies: %v", err)
return
}
require.NotNil(t, streamer)
// Test detailed stats
stats := streamer.GetDetailedStats()
assert.NotNil(t, stats)
assert.Contains(t, stats, "processed_frames")
assert.Contains(t, stats, "dropped_frames")
assert.Contains(t, stats, "batch_size")
assert.Contains(t, stats, "connected")
assert.Equal(t, int64(0), stats["processed_frames"])
assert.Equal(t, int64(0), stats["dropped_frames"])
// Start and test detailed stats
err = streamer.Start()
require.NoError(t, err)
stats = streamer.GetDetailedStats()
assert.NotNil(t, stats)
assert.Contains(t, stats, "processed_frames")
assert.Contains(t, stats, "dropped_frames")
// Cleanup
streamer.Stop()
}
func testAudioOutputStreamerUpdateBatchSize(t *testing.T) {
streamer, err := NewAudioOutputStreamer()
if err != nil {
t.Skipf("Skipping test due to missing dependencies: %v", err)
return
}
require.NotNil(t, streamer)
// Test updating batch size (no parameters, uses adaptive manager)
streamer.UpdateBatchSize()
streamer.UpdateBatchSize()
streamer.UpdateBatchSize()
// Cleanup
streamer.Stop()
}
func testAudioOutputStreamerReportLatency(t *testing.T) {
streamer, err := NewAudioOutputStreamer()
if err != nil {
t.Skipf("Skipping test due to missing dependencies: %v", err)
return
}
require.NotNil(t, streamer)
// Test reporting latency
streamer.ReportLatency(10 * time.Millisecond)
streamer.ReportLatency(5 * time.Millisecond)
streamer.ReportLatency(15 * time.Millisecond)
// Cleanup
streamer.Stop()
}
func testAudioOutputStreamerConcurrent(t *testing.T) {
streamer, err := NewAudioOutputStreamer()
if err != nil {
t.Skipf("Skipping test due to missing dependencies: %v", err)
return
}
require.NotNil(t, streamer)
var wg sync.WaitGroup
const numGoroutines = 10
// Test concurrent starts
wg.Add(numGoroutines)
for i := 0; i < numGoroutines; i++ {
go func() {
defer wg.Done()
streamer.Start()
}()
}
wg.Wait()
// Test concurrent operations
wg.Add(numGoroutines * 3)
for i := 0; i < numGoroutines; i++ {
go func() {
defer wg.Done()
streamer.GetStats()
}()
go func() {
defer wg.Done()
streamer.UpdateBatchSize()
}()
go func() {
defer wg.Done()
streamer.ReportLatency(10 * time.Millisecond)
}()
}
wg.Wait()
// Test concurrent stops
wg.Add(numGoroutines)
for i := 0; i < numGoroutines; i++ {
go func() {
defer wg.Done()
streamer.Stop()
}()
}
wg.Wait()
}
func testAudioOutputStreamerMultipleStarts(t *testing.T) {
streamer, err := NewAudioOutputStreamer()
if err != nil {
t.Skipf("Skipping test due to missing dependencies: %v", err)
return
}
require.NotNil(t, streamer)
// First start should succeed
err = streamer.Start()
assert.NoError(t, err)
// Subsequent starts should return error
err = streamer.Start()
assert.Error(t, err)
assert.Contains(t, err.Error(), "already running")
err = streamer.Start()
assert.Error(t, err)
assert.Contains(t, err.Error(), "already running")
// Cleanup
streamer.Stop()
}
func testAudioOutputStreamerMultipleStops(t *testing.T) {
streamer, err := NewAudioOutputStreamer()
if err != nil {
t.Skipf("Skipping test due to missing dependencies: %v", err)
return
}
require.NotNil(t, streamer)
// Start first
err = streamer.Start()
require.NoError(t, err)
// Multiple stops should be safe
streamer.Stop()
streamer.Stop()
streamer.Stop()
}
// BenchmarkAudioOutputStreamer benchmarks the AudioOutputStreamer operations
func BenchmarkAudioOutputStreamer(b *testing.B) {
b.Run("GetStats", func(b *testing.B) {
streamer, err := NewAudioOutputStreamer()
if err != nil {
b.Skipf("Skipping benchmark due to missing dependencies: %v", err)
return
}
defer streamer.Stop()
streamer.Start()
b.ResetTimer()
for i := 0; i < b.N; i++ {
streamer.GetStats()
}
})
b.Run("UpdateBatchSize", func(b *testing.B) {
streamer, err := NewAudioOutputStreamer()
if err != nil {
b.Skipf("Skipping benchmark due to missing dependencies: %v", err)
return
}
defer streamer.Stop()
b.ResetTimer()
for i := 0; i < b.N; i++ {
streamer.UpdateBatchSize()
}
})
b.Run("ReportLatency", func(b *testing.B) {
streamer, err := NewAudioOutputStreamer()
if err != nil {
b.Skipf("Skipping benchmark due to missing dependencies: %v", err)
return
}
defer streamer.Stop()
b.ResetTimer()
for i := 0; i < b.N; i++ {
streamer.ReportLatency(10 * time.Millisecond)
}
})
}

View File

@ -19,14 +19,13 @@ type AudioRelay struct {
framesRelayed int64
framesDropped int64
client *AudioOutputClient
ctx context.Context
cancel context.CancelFunc
wg sync.WaitGroup
logger *zerolog.Logger
running bool
mutex sync.RWMutex
bufferPool *AudioBufferPool // Buffer pool for memory optimization
client *AudioClient
ctx context.Context
cancel context.CancelFunc
wg sync.WaitGroup
logger *zerolog.Logger
running bool
mutex sync.RWMutex
// WebRTC integration
audioTrack AudioTrackWriter
@ -45,10 +44,9 @@ func NewAudioRelay() *AudioRelay {
logger := logging.GetDefaultLogger().With().Str("component", "audio-relay").Logger()
return &AudioRelay{
ctx: ctx,
cancel: cancel,
logger: &logger,
bufferPool: NewAudioBufferPool(GetMaxAudioFrameSize()),
ctx: ctx,
cancel: cancel,
logger: &logger,
}
}
@ -62,7 +60,7 @@ func (r *AudioRelay) Start(audioTrack AudioTrackWriter, config AudioConfig) erro
}
// Create audio client to connect to subprocess
client := NewAudioOutputClient()
client := NewAudioClient()
r.client = client
r.audioTrack = audioTrack
r.config = config
@ -190,14 +188,8 @@ func (r *AudioRelay) forwardToWebRTC(frame []byte) error {
// Prepare sample data
var sampleData []byte
if muted {
// Send silence when muted - use buffer pool to avoid allocation
sampleData = r.bufferPool.Get()
sampleData = sampleData[:len(frame)] // Resize to frame length
// Clear the buffer to create silence
for i := range sampleData {
sampleData[i] = 0
}
defer r.bufferPool.Put(sampleData) // Return to pool after use
// Send silence when muted
sampleData = make([]byte, len(frame))
} else {
sampleData = frame
}

View File

@ -34,8 +34,8 @@ func getMaxRestartDelay() time.Duration {
return GetConfig().MaxRestartDelay
}
// AudioOutputSupervisor manages the audio output server subprocess lifecycle
type AudioOutputSupervisor struct {
// AudioServerSupervisor manages the audio server subprocess lifecycle
type AudioServerSupervisor struct {
ctx context.Context
cancel context.CancelFunc
logger *zerolog.Logger
@ -52,10 +52,8 @@ type AudioOutputSupervisor struct {
lastExitTime time.Time
// Channels for coordination
processDone chan struct{}
stopChan chan struct{}
stopChanClosed bool // Track if stopChan is closed
processDoneClosed bool // Track if processDone is closed
processDone chan struct{}
stopChan chan struct{}
// Process monitoring
processMonitor *ProcessMonitor
@ -66,12 +64,12 @@ type AudioOutputSupervisor struct {
onRestart func(attempt int, delay time.Duration)
}
// NewAudioOutputSupervisor creates a new audio output server supervisor
func NewAudioOutputSupervisor() *AudioOutputSupervisor {
// NewAudioServerSupervisor creates a new audio server supervisor
func NewAudioServerSupervisor() *AudioServerSupervisor {
ctx, cancel := context.WithCancel(context.Background())
logger := logging.GetDefaultLogger().With().Str("component", AudioOutputSupervisorComponent).Logger()
logger := logging.GetDefaultLogger().With().Str("component", "audio-supervisor").Logger()
return &AudioOutputSupervisor{
return &AudioServerSupervisor{
ctx: ctx,
cancel: cancel,
logger: &logger,
@ -82,7 +80,7 @@ func NewAudioOutputSupervisor() *AudioOutputSupervisor {
}
// SetCallbacks sets optional callbacks for process lifecycle events
func (s *AudioOutputSupervisor) SetCallbacks(
func (s *AudioServerSupervisor) SetCallbacks(
onStart func(pid int),
onExit func(pid int, exitCode int, crashed bool),
onRestart func(attempt int, delay time.Duration),
@ -95,100 +93,79 @@ func (s *AudioOutputSupervisor) SetCallbacks(
s.onRestart = onRestart
}
// Start begins supervising the audio output server process
func (s *AudioOutputSupervisor) Start() error {
// Start begins supervising the audio server process
func (s *AudioServerSupervisor) Start() error {
if !atomic.CompareAndSwapInt32(&s.running, 0, 1) {
return fmt.Errorf("audio output supervisor is already running")
return fmt.Errorf("supervisor already running")
}
s.logger.Info().Str("component", AudioOutputSupervisorComponent).Msg("starting component")
s.logger.Info().Msg("starting audio server supervisor")
// Recreate channels in case they were closed by a previous Stop() call
s.mutex.Lock()
s.processDone = make(chan struct{})
s.stopChan = make(chan struct{})
s.stopChanClosed = false // Reset channel closed flag
s.processDoneClosed = false // Reset channel closed flag
// Recreate context as well since it might have been cancelled
s.ctx, s.cancel = context.WithCancel(context.Background())
// Reset restart tracking on start
s.restartAttempts = s.restartAttempts[:0]
s.lastExitCode = 0
s.lastExitTime = time.Time{}
s.mutex.Unlock()
// Start the supervision loop
go s.supervisionLoop()
s.logger.Info().Str("component", AudioOutputSupervisorComponent).Msg("component started successfully")
return nil
}
// Stop gracefully stops the audio server and supervisor
func (s *AudioOutputSupervisor) Stop() {
func (s *AudioServerSupervisor) Stop() error {
if !atomic.CompareAndSwapInt32(&s.running, 1, 0) {
return // Already stopped
return nil // Already stopped
}
s.logger.Info().Str("component", AudioOutputSupervisorComponent).Msg("stopping component")
s.logger.Info().Msg("stopping audio server supervisor")
// Signal stop and wait for cleanup
s.mutex.Lock()
if !s.stopChanClosed {
close(s.stopChan)
s.stopChanClosed = true
}
s.mutex.Unlock()
close(s.stopChan)
s.cancel()
// Wait for process to exit
select {
case <-s.processDone:
s.logger.Info().Str("component", AudioOutputSupervisorComponent).Msg("component stopped gracefully")
s.logger.Info().Msg("audio server process stopped gracefully")
case <-time.After(GetConfig().SupervisorTimeout):
s.logger.Warn().Str("component", AudioOutputSupervisorComponent).Msg("component did not stop gracefully, forcing termination")
s.logger.Warn().Msg("audio server process did not stop gracefully, forcing termination")
s.forceKillProcess()
}
s.logger.Info().Str("component", AudioOutputSupervisorComponent).Msg("component stopped")
return nil
}
// IsRunning returns true if the supervisor is running
func (s *AudioOutputSupervisor) IsRunning() bool {
func (s *AudioServerSupervisor) IsRunning() bool {
return atomic.LoadInt32(&s.running) == 1
}
// GetProcessPID returns the current process PID (0 if not running)
func (s *AudioOutputSupervisor) GetProcessPID() int {
func (s *AudioServerSupervisor) GetProcessPID() int {
s.mutex.RLock()
defer s.mutex.RUnlock()
return s.processPID
}
// GetLastExitInfo returns information about the last process exit
func (s *AudioOutputSupervisor) GetLastExitInfo() (exitCode int, exitTime time.Time) {
func (s *AudioServerSupervisor) GetLastExitInfo() (exitCode int, exitTime time.Time) {
s.mutex.RLock()
defer s.mutex.RUnlock()
return s.lastExitCode, s.lastExitTime
}
// GetProcessMetrics returns current process metrics if the process is running
func (s *AudioOutputSupervisor) GetProcessMetrics() *ProcessMetrics {
func (s *AudioServerSupervisor) GetProcessMetrics() *ProcessMetrics {
s.mutex.RLock()
pid := s.processPID
s.mutex.RUnlock()
if pid == 0 {
// Return default metrics when no process is running
return &ProcessMetrics{
PID: 0,
CPUPercent: 0.0,
MemoryRSS: 0,
MemoryVMS: 0,
MemoryPercent: 0.0,
Timestamp: time.Now(),
ProcessName: "audio-output-server",
}
return nil
}
metrics := s.processMonitor.GetCurrentMetrics()
@ -197,28 +174,13 @@ func (s *AudioOutputSupervisor) GetProcessMetrics() *ProcessMetrics {
return &metric
}
}
// Return default metrics if process not found in monitor
return &ProcessMetrics{
PID: pid,
CPUPercent: 0.0,
MemoryRSS: 0,
MemoryVMS: 0,
MemoryPercent: 0.0,
Timestamp: time.Now(),
ProcessName: "audio-output-server",
}
return nil
}
// supervisionLoop is the main supervision loop
func (s *AudioOutputSupervisor) supervisionLoop() {
func (s *AudioServerSupervisor) supervisionLoop() {
defer func() {
s.mutex.Lock()
if !s.processDoneClosed {
close(s.processDone)
s.processDoneClosed = true
}
s.mutex.Unlock()
close(s.processDone)
s.logger.Info().Msg("audio server supervision ended")
}()
@ -290,7 +252,7 @@ func (s *AudioOutputSupervisor) supervisionLoop() {
}
// startProcess starts the audio server process
func (s *AudioOutputSupervisor) startProcess() error {
func (s *AudioServerSupervisor) startProcess() error {
execPath, err := os.Executable()
if err != nil {
return fmt.Errorf("failed to get executable path: %w", err)
@ -323,7 +285,7 @@ func (s *AudioOutputSupervisor) startProcess() error {
}
// waitForProcessExit waits for the current process to exit and logs the result
func (s *AudioOutputSupervisor) waitForProcessExit() {
func (s *AudioServerSupervisor) waitForProcessExit() {
s.mutex.RLock()
cmd := s.cmd
pid := s.processPID
@ -376,7 +338,7 @@ func (s *AudioOutputSupervisor) waitForProcessExit() {
}
// terminateProcess gracefully terminates the current process
func (s *AudioOutputSupervisor) terminateProcess() {
func (s *AudioServerSupervisor) terminateProcess() {
s.mutex.RLock()
cmd := s.cmd
pid := s.processPID
@ -403,14 +365,14 @@ func (s *AudioOutputSupervisor) terminateProcess() {
select {
case <-done:
s.logger.Info().Int("pid", pid).Msg("audio server process terminated gracefully")
case <-time.After(GetConfig().OutputSupervisorTimeout):
case <-time.After(GetConfig().InputSupervisorTimeout):
s.logger.Warn().Int("pid", pid).Msg("process did not terminate gracefully, sending SIGKILL")
s.forceKillProcess()
}
}
// forceKillProcess forcefully kills the current process
func (s *AudioOutputSupervisor) forceKillProcess() {
func (s *AudioServerSupervisor) forceKillProcess() {
s.mutex.RLock()
cmd := s.cmd
pid := s.processPID
@ -427,7 +389,7 @@ func (s *AudioOutputSupervisor) forceKillProcess() {
}
// shouldRestart determines if the process should be restarted
func (s *AudioOutputSupervisor) shouldRestart() bool {
func (s *AudioServerSupervisor) shouldRestart() bool {
if atomic.LoadInt32(&s.running) == 0 {
return false // Supervisor is stopping
}
@ -449,7 +411,7 @@ func (s *AudioOutputSupervisor) shouldRestart() bool {
}
// recordRestartAttempt records a restart attempt
func (s *AudioOutputSupervisor) recordRestartAttempt() {
func (s *AudioServerSupervisor) recordRestartAttempt() {
s.mutex.Lock()
defer s.mutex.Unlock()
@ -457,7 +419,7 @@ func (s *AudioOutputSupervisor) recordRestartAttempt() {
}
// calculateRestartDelay calculates the delay before next restart attempt
func (s *AudioOutputSupervisor) calculateRestartDelay() time.Duration {
func (s *AudioServerSupervisor) calculateRestartDelay() time.Duration {
s.mutex.RLock()
defer s.mutex.RUnlock()

View File

@ -1,217 +0,0 @@
package audio
import (
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNewAudioOutputSupervisor(t *testing.T) {
supervisor := NewAudioOutputSupervisor()
assert.NotNil(t, supervisor)
assert.False(t, supervisor.IsRunning())
}
func TestAudioOutputSupervisorStart(t *testing.T) {
supervisor := NewAudioOutputSupervisor()
require.NotNil(t, supervisor)
// Test successful start
err := supervisor.Start()
assert.NoError(t, err)
assert.True(t, supervisor.IsRunning())
// Test starting already running supervisor
err = supervisor.Start()
assert.Error(t, err)
assert.Contains(t, err.Error(), "already running")
// Cleanup
supervisor.Stop()
}
func TestAudioOutputSupervisorStop(t *testing.T) {
supervisor := NewAudioOutputSupervisor()
require.NotNil(t, supervisor)
// Test stopping non-running supervisor
supervisor.Stop()
assert.False(t, supervisor.IsRunning())
// Start and then stop
err := supervisor.Start()
require.NoError(t, err)
assert.True(t, supervisor.IsRunning())
supervisor.Stop()
assert.False(t, supervisor.IsRunning())
}
func TestAudioOutputSupervisorIsRunning(t *testing.T) {
supervisor := NewAudioOutputSupervisor()
require.NotNil(t, supervisor)
// Test initial state
assert.False(t, supervisor.IsRunning())
// Test after start
err := supervisor.Start()
require.NoError(t, err)
assert.True(t, supervisor.IsRunning())
// Test after stop
supervisor.Stop()
assert.False(t, supervisor.IsRunning())
}
func TestAudioOutputSupervisorGetProcessMetrics(t *testing.T) {
supervisor := NewAudioOutputSupervisor()
require.NotNil(t, supervisor)
// Test metrics when not running
metrics := supervisor.GetProcessMetrics()
assert.NotNil(t, metrics)
// Start and test metrics
err := supervisor.Start()
require.NoError(t, err)
metrics = supervisor.GetProcessMetrics()
assert.NotNil(t, metrics)
// Cleanup
supervisor.Stop()
}
func TestAudioOutputSupervisorConcurrentOperations(t *testing.T) {
supervisor := NewAudioOutputSupervisor()
require.NotNil(t, supervisor)
var wg sync.WaitGroup
// Test concurrent start/stop operations
for i := 0; i < 10; i++ {
wg.Add(2)
go func() {
defer wg.Done()
_ = supervisor.Start()
}()
go func() {
defer wg.Done()
supervisor.Stop()
}()
}
// Test concurrent metric access
for i := 0; i < 5; i++ {
wg.Add(1)
go func() {
defer wg.Done()
_ = supervisor.GetProcessMetrics()
}()
}
// Test concurrent status checks
for i := 0; i < 5; i++ {
wg.Add(1)
go func() {
defer wg.Done()
_ = supervisor.IsRunning()
}()
}
wg.Wait()
// Cleanup
supervisor.Stop()
}
func TestAudioOutputSupervisorMultipleStartStop(t *testing.T) {
supervisor := NewAudioOutputSupervisor()
require.NotNil(t, supervisor)
// Test multiple start/stop cycles
for i := 0; i < 5; i++ {
err := supervisor.Start()
assert.NoError(t, err)
assert.True(t, supervisor.IsRunning())
supervisor.Stop()
assert.False(t, supervisor.IsRunning())
}
}
func TestAudioOutputSupervisorHealthCheck(t *testing.T) {
supervisor := NewAudioOutputSupervisor()
require.NotNil(t, supervisor)
// Start supervisor
err := supervisor.Start()
require.NoError(t, err)
// Give some time for health monitoring to initialize
time.Sleep(100 * time.Millisecond)
// Test that supervisor is still running
assert.True(t, supervisor.IsRunning())
// Cleanup
supervisor.Stop()
}
func TestAudioOutputSupervisorProcessManagement(t *testing.T) {
supervisor := NewAudioOutputSupervisor()
require.NotNil(t, supervisor)
// Start supervisor
err := supervisor.Start()
require.NoError(t, err)
// Give some time for process management to initialize
time.Sleep(200 * time.Millisecond)
// Test that supervisor is managing processes
assert.True(t, supervisor.IsRunning())
// Cleanup
supervisor.Stop()
// Ensure supervisor stopped cleanly
assert.False(t, supervisor.IsRunning())
}
// Benchmark tests
func BenchmarkAudioOutputSupervisor(b *testing.B) {
supervisor := NewAudioOutputSupervisor()
b.Run("Start", func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = supervisor.Start()
supervisor.Stop()
}
})
b.Run("GetProcessMetrics", func(b *testing.B) {
_ = supervisor.Start()
defer supervisor.Stop()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = supervisor.GetProcessMetrics()
}
})
b.Run("IsRunning", func(b *testing.B) {
_ = supervisor.Start()
defer supervisor.Stop()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = supervisor.IsRunning()
}
})
}

View File

@ -1,177 +0,0 @@
package audio
import (
"errors"
"time"
)
// Validation errors
var (
ErrInvalidAudioQuality = errors.New("invalid audio quality level")
ErrInvalidFrameSize = errors.New("invalid frame size")
ErrInvalidFrameData = errors.New("invalid frame data")
ErrInvalidBufferSize = errors.New("invalid buffer size")
ErrInvalidPriority = errors.New("invalid priority value")
ErrInvalidLatency = errors.New("invalid latency value")
ErrInvalidConfiguration = errors.New("invalid configuration")
ErrInvalidSocketConfig = errors.New("invalid socket configuration")
ErrInvalidMetricsInterval = errors.New("invalid metrics interval")
ErrInvalidSampleRate = errors.New("invalid sample rate")
ErrInvalidChannels = errors.New("invalid channels")
)
// ValidateAudioQuality validates audio quality enum values
func ValidateAudioQuality(quality AudioQuality) error {
switch quality {
case AudioQualityLow, AudioQualityMedium, AudioQualityHigh, AudioQualityUltra:
return nil
default:
return ErrInvalidAudioQuality
}
}
// ValidateFrameData validates audio frame data
func ValidateFrameData(data []byte) error {
if len(data) == 0 {
return ErrInvalidFrameData
}
// Use a reasonable default if config is not available
maxFrameSize := 4096
if config := GetConfig(); config != nil {
maxFrameSize = config.MaxAudioFrameSize
}
if len(data) > maxFrameSize {
return ErrInvalidFrameSize
}
return nil
}
// ValidateZeroCopyFrame validates zero-copy audio frame
func ValidateZeroCopyFrame(frame *ZeroCopyAudioFrame) error {
if frame == nil {
return ErrInvalidFrameData
}
data := frame.Data()
if len(data) == 0 {
return ErrInvalidFrameData
}
// Use a reasonable default if config is not available
maxFrameSize := 4096
if config := GetConfig(); config != nil {
maxFrameSize = config.MaxAudioFrameSize
}
if len(data) > maxFrameSize {
return ErrInvalidFrameSize
}
return nil
}
// ValidateBufferSize validates buffer size parameters
func ValidateBufferSize(size int) error {
if size <= 0 {
return ErrInvalidBufferSize
}
// Use a reasonable default if config is not available
maxBuffer := 262144 // 256KB default
if config := GetConfig(); config != nil {
maxBuffer = config.SocketMaxBuffer
}
if size > maxBuffer {
return ErrInvalidBufferSize
}
return nil
}
// ValidateThreadPriority validates thread priority values
func ValidateThreadPriority(priority int) error {
// Use reasonable defaults if config is not available
minPriority := -20
maxPriority := 99
if config := GetConfig(); config != nil {
minPriority = config.MinNiceValue
maxPriority = config.RTAudioHighPriority
}
if priority < minPriority || priority > maxPriority {
return ErrInvalidPriority
}
return nil
}
// ValidateLatency validates latency values
func ValidateLatency(latency time.Duration) error {
if latency < 0 {
return ErrInvalidLatency
}
// Use a reasonable default if config is not available
maxLatency := 500 * time.Millisecond
if config := GetConfig(); config != nil {
maxLatency = config.MaxLatency
}
if latency > maxLatency {
return ErrInvalidLatency
}
return nil
}
// ValidateMetricsInterval validates metrics update interval
func ValidateMetricsInterval(interval time.Duration) error {
// Use reasonable defaults if config is not available
minInterval := 100 * time.Millisecond
maxInterval := 10 * time.Second
if config := GetConfig(); config != nil {
minInterval = config.MinMetricsUpdateInterval
maxInterval = config.MaxMetricsUpdateInterval
}
if interval < minInterval {
return ErrInvalidMetricsInterval
}
if interval > maxInterval {
return ErrInvalidMetricsInterval
}
return nil
}
// ValidateAdaptiveBufferConfig validates adaptive buffer configuration
func ValidateAdaptiveBufferConfig(minSize, maxSize, defaultSize int) error {
if minSize <= 0 || maxSize <= 0 || defaultSize <= 0 {
return ErrInvalidBufferSize
}
if minSize >= maxSize {
return ErrInvalidBufferSize
}
if defaultSize < minSize || defaultSize > maxSize {
return ErrInvalidBufferSize
}
// Validate against global limits
maxBuffer := 262144 // 256KB default
if config := GetConfig(); config != nil {
maxBuffer = config.SocketMaxBuffer
}
if maxSize > maxBuffer {
return ErrInvalidBufferSize
}
return nil
}
// ValidateInputIPCConfig validates input IPC configuration
func ValidateInputIPCConfig(sampleRate, channels, frameSize int) error {
// Use reasonable defaults if config is not available
minSampleRate := 8000
maxSampleRate := 48000
maxChannels := 8
if config := GetConfig(); config != nil {
minSampleRate = config.MinSampleRate
maxSampleRate = config.MaxSampleRate
maxChannels = config.MaxChannels
}
if sampleRate < minSampleRate || sampleRate > maxSampleRate {
return ErrInvalidSampleRate
}
if channels < 1 || channels > maxChannels {
return ErrInvalidChannels
}
if frameSize <= 0 {
return ErrInvalidFrameSize
}
return nil
}

View File

@ -1,290 +0,0 @@
package audio
import (
"errors"
"fmt"
"time"
"unsafe"
"github.com/rs/zerolog"
)
// Enhanced validation errors with more specific context
var (
ErrInvalidFrameLength = errors.New("invalid frame length")
ErrFrameDataCorrupted = errors.New("frame data appears corrupted")
ErrBufferAlignment = errors.New("buffer alignment invalid")
ErrInvalidSampleFormat = errors.New("invalid sample format")
ErrInvalidTimestamp = errors.New("invalid timestamp")
ErrConfigurationMismatch = errors.New("configuration mismatch")
ErrResourceExhaustion = errors.New("resource exhaustion detected")
ErrInvalidPointer = errors.New("invalid pointer")
ErrBufferOverflow = errors.New("buffer overflow detected")
ErrInvalidState = errors.New("invalid state")
)
// ValidationLevel defines the level of validation to perform
type ValidationLevel int
const (
ValidationMinimal ValidationLevel = iota // Only critical safety checks
ValidationStandard // Standard validation for production
ValidationStrict // Comprehensive validation for debugging
)
// ValidationConfig controls validation behavior
type ValidationConfig struct {
Level ValidationLevel
EnableRangeChecks bool
EnableAlignmentCheck bool
EnableDataIntegrity bool
MaxValidationTime time.Duration
}
// GetValidationConfig returns the current validation configuration
func GetValidationConfig() ValidationConfig {
return ValidationConfig{
Level: ValidationStandard,
EnableRangeChecks: true,
EnableAlignmentCheck: true,
EnableDataIntegrity: false, // Disabled by default for performance
MaxValidationTime: 5 * time.Second, // Default validation timeout
}
}
// ValidateAudioFrameFast performs minimal validation for performance-critical paths
func ValidateAudioFrameFast(data []byte) error {
if len(data) == 0 {
return ErrInvalidFrameData
}
// Quick bounds check using config constants
maxSize := GetConfig().MaxAudioFrameSize
if len(data) > maxSize {
return fmt.Errorf("%w: frame size %d exceeds maximum %d", ErrInvalidFrameSize, len(data), maxSize)
}
return nil
}
// ValidateAudioFrameComprehensive performs thorough validation
func ValidateAudioFrameComprehensive(data []byte, expectedSampleRate int, expectedChannels int) error {
validationConfig := GetValidationConfig()
start := time.Now()
// Timeout protection for validation
defer func() {
if time.Since(start) > validationConfig.MaxValidationTime {
// Log validation timeout but don't fail
getValidationLogger().Warn().Dur("duration", time.Since(start)).Msg("validation timeout exceeded")
}
}()
// Basic validation first
if err := ValidateAudioFrameFast(data); err != nil {
return err
}
// Range validation
if validationConfig.EnableRangeChecks {
config := GetConfig()
minFrameSize := 64 // Minimum reasonable frame size
if len(data) < minFrameSize {
return fmt.Errorf("%w: frame size %d below minimum %d", ErrInvalidFrameSize, len(data), minFrameSize)
}
// Validate frame length matches expected sample format
expectedFrameSize := (expectedSampleRate * expectedChannels * 2) / 1000 * int(config.AudioQualityMediumFrameSize/time.Millisecond)
tolerance := 512 // Frame size tolerance in bytes
if abs(len(data)-expectedFrameSize) > tolerance {
return fmt.Errorf("%w: frame size %d doesn't match expected %d (±%d)", ErrInvalidFrameLength, len(data), expectedFrameSize, tolerance)
}
}
// Alignment validation for ARM32 compatibility
if validationConfig.EnableAlignmentCheck {
if uintptr(unsafe.Pointer(&data[0]))%4 != 0 {
return fmt.Errorf("%w: buffer not 4-byte aligned for ARM32", ErrBufferAlignment)
}
}
// Data integrity checks (expensive, only for debugging)
if validationConfig.EnableDataIntegrity && validationConfig.Level == ValidationStrict {
if err := validateAudioDataIntegrity(data, expectedChannels); err != nil {
return err
}
}
return nil
}
// ValidateZeroCopyFrameEnhanced performs enhanced zero-copy frame validation
func ValidateZeroCopyFrameEnhanced(frame *ZeroCopyAudioFrame) error {
if frame == nil {
return fmt.Errorf("%w: frame is nil", ErrInvalidPointer)
}
// Check reference count validity
frame.mutex.RLock()
refCount := frame.refCount
length := frame.length
capacity := frame.capacity
frame.mutex.RUnlock()
if refCount <= 0 {
return fmt.Errorf("%w: invalid reference count %d", ErrInvalidState, refCount)
}
if length < 0 || capacity < 0 {
return fmt.Errorf("%w: negative length (%d) or capacity (%d)", ErrInvalidState, length, capacity)
}
if length > capacity {
return fmt.Errorf("%w: length %d exceeds capacity %d", ErrBufferOverflow, length, capacity)
}
// Validate the underlying data
data := frame.Data()
return ValidateAudioFrameFast(data)
}
// ValidateBufferBounds performs bounds checking with overflow protection
func ValidateBufferBounds(buffer []byte, offset, length int) error {
if buffer == nil {
return fmt.Errorf("%w: buffer is nil", ErrInvalidPointer)
}
if offset < 0 {
return fmt.Errorf("%w: negative offset %d", ErrInvalidState, offset)
}
if length < 0 {
return fmt.Errorf("%w: negative length %d", ErrInvalidState, length)
}
// Check for integer overflow
if offset > len(buffer) {
return fmt.Errorf("%w: offset %d exceeds buffer length %d", ErrBufferOverflow, offset, len(buffer))
}
// Safe addition check for overflow
if offset+length < offset || offset+length > len(buffer) {
return fmt.Errorf("%w: range [%d:%d] exceeds buffer length %d", ErrBufferOverflow, offset, offset+length, len(buffer))
}
return nil
}
// ValidateAudioConfiguration performs comprehensive configuration validation
func ValidateAudioConfiguration(config AudioConfig) error {
if err := ValidateAudioQuality(config.Quality); err != nil {
return fmt.Errorf("quality validation failed: %w", err)
}
configConstants := GetConfig()
// Validate bitrate ranges
minBitrate := 6000 // Minimum Opus bitrate
maxBitrate := 510000 // Maximum Opus bitrate
if config.Bitrate < minBitrate || config.Bitrate > maxBitrate {
return fmt.Errorf("%w: bitrate %d outside valid range [%d, %d]", ErrInvalidConfiguration, config.Bitrate, minBitrate, maxBitrate)
}
// Validate sample rate
validSampleRates := []int{8000, 12000, 16000, 24000, 48000}
validSampleRate := false
for _, rate := range validSampleRates {
if config.SampleRate == rate {
validSampleRate = true
break
}
}
if !validSampleRate {
return fmt.Errorf("%w: sample rate %d not in supported rates %v", ErrInvalidSampleRate, config.SampleRate, validSampleRates)
}
// Validate channels
if config.Channels < 1 || config.Channels > configConstants.MaxChannels {
return fmt.Errorf("%w: channels %d outside valid range [1, %d]", ErrInvalidChannels, config.Channels, configConstants.MaxChannels)
}
// Validate frame size
minFrameSize := 10 * time.Millisecond // Minimum frame duration
maxFrameSize := 100 * time.Millisecond // Maximum frame duration
if config.FrameSize < minFrameSize || config.FrameSize > maxFrameSize {
return fmt.Errorf("%w: frame size %v outside valid range [%v, %v]", ErrInvalidConfiguration, config.FrameSize, minFrameSize, maxFrameSize)
}
return nil
}
// ValidateResourceLimits checks if system resources are within acceptable limits
func ValidateResourceLimits() error {
config := GetConfig()
// Check buffer pool sizes
framePoolStats := GetAudioBufferPoolStats()
if framePoolStats.FramePoolSize > int64(config.MaxPoolSize*2) {
return fmt.Errorf("%w: frame pool size %d exceeds safe limit %d", ErrResourceExhaustion, framePoolStats.FramePoolSize, config.MaxPoolSize*2)
}
// Check zero-copy pool allocation count
zeroCopyStats := GetGlobalZeroCopyPoolStats()
if zeroCopyStats.AllocationCount > int64(config.MaxPoolSize*3) {
return fmt.Errorf("%w: zero-copy allocations %d exceed safe limit %d", ErrResourceExhaustion, zeroCopyStats.AllocationCount, config.MaxPoolSize*3)
}
return nil
}
// validateAudioDataIntegrity performs expensive data integrity checks
func validateAudioDataIntegrity(data []byte, channels int) error {
if len(data)%2 != 0 {
return fmt.Errorf("%w: odd number of bytes for 16-bit samples", ErrInvalidSampleFormat)
}
if len(data)%(channels*2) != 0 {
return fmt.Errorf("%w: data length %d not aligned to channel count %d", ErrInvalidSampleFormat, len(data), channels)
}
// Check for obvious corruption patterns (all zeros, all max values)
sampleCount := len(data) / 2
zeroCount := 0
maxCount := 0
for i := 0; i < len(data); i += 2 {
sample := int16(data[i]) | int16(data[i+1])<<8
switch sample {
case 0:
zeroCount++
case 32767, -32768:
maxCount++
}
}
// Flag suspicious patterns
if zeroCount > sampleCount*9/10 {
return fmt.Errorf("%w: %d%% zero samples suggests silence or corruption", ErrFrameDataCorrupted, (zeroCount*100)/sampleCount)
}
if maxCount > sampleCount/10 {
return fmt.Errorf("%w: %d%% max-value samples suggests clipping or corruption", ErrFrameDataCorrupted, (maxCount*100)/sampleCount)
}
return nil
}
// Helper function for absolute value
func abs(x int) int {
if x < 0 {
return -x
}
return x
}
// getValidationLogger returns a logger for validation operations
func getValidationLogger() *zerolog.Logger {
// Return a basic logger for validation
logger := zerolog.New(nil).With().Timestamp().Logger()
return &logger
}

View File

@ -7,38 +7,8 @@ import (
"unsafe"
)
// ZeroCopyAudioFrame represents a reference-counted audio frame for zero-copy operations.
//
// This structure implements a sophisticated memory management system designed to minimize
// allocations and memory copying in the audio pipeline:
//
// Key Features:
//
// 1. Reference Counting: Multiple components can safely share the same frame data
// without copying. The frame is automatically returned to the pool when the last
// reference is released.
//
// 2. Thread Safety: All operations are protected by RWMutex, allowing concurrent
// reads while ensuring exclusive access for modifications.
//
// 3. Pool Integration: Frames are automatically managed by ZeroCopyFramePool,
// enabling efficient reuse and preventing memory fragmentation.
//
// 4. Unsafe Pointer Access: For performance-critical CGO operations, direct
// memory access is provided while maintaining safety through reference counting.
//
// Usage Pattern:
//
// frame := pool.Get() // Acquire frame (refCount = 1)
// frame.AddRef() // Share with another component (refCount = 2)
// data := frame.Data() // Access data safely
// frame.Release() // Release reference (refCount = 1)
// frame.Release() // Final release, returns to pool (refCount = 0)
//
// Memory Safety:
// - Frames cannot be modified while shared (refCount > 1)
// - Data access is bounds-checked to prevent buffer overruns
// - Pool management prevents use-after-free scenarios
// ZeroCopyAudioFrame represents an audio frame that can be passed between
// components without copying the underlying data
type ZeroCopyAudioFrame struct {
data []byte
length int
@ -48,37 +18,7 @@ type ZeroCopyAudioFrame struct {
pooled bool
}
// ZeroCopyFramePool manages a pool of reusable zero-copy audio frames.
//
// This pool implements a three-tier memory management strategy optimized for
// real-time audio processing with minimal allocation overhead:
//
// Tier 1 - Pre-allocated Frames:
//
// A small number of frames are pre-allocated at startup and kept ready
// for immediate use. This provides the fastest possible allocation for
// the most common case and eliminates allocation latency spikes.
//
// Tier 2 - sync.Pool Cache:
//
// The standard Go sync.Pool provides efficient reuse of frames with
// automatic garbage collection integration. Frames are automatically
// returned here when memory pressure is low.
//
// Tier 3 - Memory Guard:
//
// A configurable limit prevents excessive memory usage by limiting
// the total number of allocated frames. When the limit is reached,
// allocation requests are denied to prevent OOM conditions.
//
// Performance Characteristics:
// - Pre-allocated tier: ~10ns allocation time
// - sync.Pool tier: ~50ns allocation time
// - Memory guard: Prevents unbounded growth
// - Metrics tracking: Hit/miss rates for optimization
//
// The pool is designed for embedded systems with limited memory (256MB)
// where predictable memory usage is more important than absolute performance.
// ZeroCopyFramePool manages reusable zero-copy audio frames
type ZeroCopyFramePool struct {
// Atomic fields MUST be first for ARM32 alignment (int64 fields need 8-byte alignment)
counter int64 // Frame counter (atomic)

View File

@ -967,7 +967,9 @@ func rpcSetUsbDevices(usbDevices usbgadget.Devices) error {
// Stop audio output supervisor
if audioSupervisor != nil && audioSupervisor.IsRunning() {
logger.Info().Msg("stopping audio output supervisor")
audioSupervisor.Stop()
if err := audioSupervisor.Stop(); err != nil {
logger.Error().Err(err).Msg("failed to stop audio supervisor")
}
// Wait for audio processes to fully stop before proceeding
for i := 0; i < 50; i++ { // Wait up to 5 seconds
if !audioSupervisor.IsRunning() {
@ -1061,7 +1063,9 @@ func rpcSetUsbDeviceState(device string, enabled bool) error {
// Stop audio output supervisor
if audioSupervisor != nil && audioSupervisor.IsRunning() {
logger.Info().Msg("stopping audio output supervisor")
audioSupervisor.Stop()
if err := audioSupervisor.Stop(); err != nil {
logger.Error().Err(err).Msg("failed to stop audio supervisor")
}
// Wait for audio processes to fully stop
for i := 0; i < 50; i++ { // Wait up to 5 seconds
if !audioSupervisor.IsRunning() {

View File

@ -18,7 +18,7 @@ var (
appCtx context.Context
isAudioServer bool
audioProcessDone chan struct{}
audioSupervisor *audio.AudioOutputSupervisor
audioSupervisor *audio.AudioServerSupervisor
)
// runAudioServer is now handled by audio.RunAudioOutputServer
@ -36,7 +36,7 @@ func startAudioSubprocess() error {
audio.StartAdaptiveBuffering()
// Create audio server supervisor
audioSupervisor = audio.NewAudioOutputSupervisor()
audioSupervisor = audio.NewAudioServerSupervisor()
// Set the global supervisor for access from audio package
audio.SetAudioOutputSupervisor(audioSupervisor)
@ -251,7 +251,9 @@ func Main(audioServer bool, audioInputServer bool) {
if !isAudioServer {
if audioSupervisor != nil {
logger.Info().Msg("stopping audio supervisor")
audioSupervisor.Stop()
if err := audioSupervisor.Stop(); err != nil {
logger.Error().Err(err).Msg("failed to stop audio supervisor")
}
}
<-audioProcessDone
} else {

View File

@ -9,8 +9,6 @@ import { useMicrophone } from "@/hooks/useMicrophone";
import { useAudioLevel } from "@/hooks/useAudioLevel";
import { useAudioEvents } from "@/hooks/useAudioEvents";
import api from "@/api";
import { AUDIO_CONFIG } from "@/config/constants";
import audioQualityService from "@/services/audioQualityService";
interface AudioMetrics {
frames_received: number;
@ -46,8 +44,12 @@ interface AudioConfig {
FrameSize: string;
}
// Quality labels will be managed by the audio quality service
const getQualityLabels = () => audioQualityService.getQualityLabels();
const qualityLabels = {
0: "Low",
1: "Medium",
2: "High",
3: "Ultra"
};
// Format percentage values to 2 decimal places
function formatPercentage(value: number | null | undefined): string {
@ -244,15 +246,22 @@ export default function AudioMetricsDashboard() {
const loadAudioConfig = async () => {
try {
// Use centralized audio quality service
const { audio, microphone } = await audioQualityService.loadAllConfigurations();
if (audio) {
setConfig(audio.current);
// Load config
const configResp = await api.GET("/audio/quality");
if (configResp.ok) {
const configData = await configResp.json();
setConfig(configData.current);
}
if (microphone) {
setMicrophoneConfig(microphone.current);
// Load microphone config
try {
const micConfigResp = await api.GET("/microphone/quality");
if (micConfigResp.ok) {
const micConfigData = await micConfigResp.json();
setMicrophoneConfig(micConfigData.current);
}
} catch {
// Microphone config not available
}
} catch (error) {
console.error("Failed to load audio config:", error);
@ -388,7 +397,7 @@ export default function AudioMetricsDashboard() {
const getDropRate = () => {
if (!metrics || metrics.frames_received === 0) return 0;
return ((metrics.frames_dropped / metrics.frames_received) * AUDIO_CONFIG.PERCENTAGE_MULTIPLIER);
return ((metrics.frames_dropped / metrics.frames_received) * 100);
};
@ -440,7 +449,7 @@ export default function AudioMetricsDashboard() {
<div className="flex justify-between">
<span className="text-slate-500 dark:text-slate-400">Quality:</span>
<span className={cx("font-medium", getQualityColor(config.Quality))}>
{getQualityLabels()[config.Quality]}
{qualityLabels[config.Quality as keyof typeof qualityLabels]}
</span>
</div>
<div className="flex justify-between">
@ -477,7 +486,7 @@ export default function AudioMetricsDashboard() {
<div className="flex justify-between">
<span className="text-slate-500 dark:text-slate-400">Quality:</span>
<span className={cx("font-medium", getQualityColor(microphoneConfig.Quality))}>
{getQualityLabels()[microphoneConfig.Quality]}
{qualityLabels[microphoneConfig.Quality as keyof typeof qualityLabels]}
</span>
</div>
<div className="flex justify-between">
@ -659,26 +668,26 @@ export default function AudioMetricsDashboard() {
</span>
<span className={cx(
"font-bold",
getDropRate() > AUDIO_CONFIG.DROP_RATE_CRITICAL_THRESHOLD
getDropRate() > 5
? "text-red-600 dark:text-red-400"
: getDropRate() > AUDIO_CONFIG.DROP_RATE_WARNING_THRESHOLD
: getDropRate() > 1
? "text-yellow-600 dark:text-yellow-400"
: "text-green-600 dark:text-green-400"
)}>
{getDropRate().toFixed(AUDIO_CONFIG.PERCENTAGE_DECIMAL_PLACES)}%
{getDropRate().toFixed(2)}%
</span>
</div>
<div className="mt-1 h-2 w-full rounded-full bg-slate-200 dark:bg-slate-600">
<div
className={cx(
"h-2 rounded-full transition-all duration-300",
getDropRate() > AUDIO_CONFIG.DROP_RATE_CRITICAL_THRESHOLD
getDropRate() > 5
? "bg-red-500"
: getDropRate() > AUDIO_CONFIG.DROP_RATE_WARNING_THRESHOLD
: getDropRate() > 1
? "bg-yellow-500"
: "bg-green-500"
)}
style={{ width: `${Math.min(getDropRate(), AUDIO_CONFIG.MAX_LEVEL_PERCENTAGE)}%` }}
style={{ width: `${Math.min(getDropRate(), 100)}%` }}
/>
</div>
</div>
@ -725,27 +734,27 @@ export default function AudioMetricsDashboard() {
</span>
<span className={cx(
"font-bold",
(microphoneMetrics.frames_sent > 0 ? (microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * AUDIO_CONFIG.PERCENTAGE_MULTIPLIER : 0) > AUDIO_CONFIG.DROP_RATE_CRITICAL_THRESHOLD
(microphoneMetrics.frames_sent > 0 ? (microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * 100 : 0) > 5
? "text-red-600 dark:text-red-400"
: (microphoneMetrics.frames_sent > 0 ? (microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * AUDIO_CONFIG.PERCENTAGE_MULTIPLIER : 0) > AUDIO_CONFIG.DROP_RATE_WARNING_THRESHOLD
: (microphoneMetrics.frames_sent > 0 ? (microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * 100 : 0) > 1
? "text-yellow-600 dark:text-yellow-400"
: "text-green-600 dark:text-green-400"
)}>
{microphoneMetrics.frames_sent > 0 ? ((microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * AUDIO_CONFIG.PERCENTAGE_MULTIPLIER).toFixed(AUDIO_CONFIG.PERCENTAGE_DECIMAL_PLACES) : "0.00"}%
{microphoneMetrics.frames_sent > 0 ? ((microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * 100).toFixed(2) : "0.00"}%
</span>
</div>
<div className="mt-1 h-2 w-full rounded-full bg-slate-200 dark:bg-slate-600">
<div
className={cx(
"h-2 rounded-full transition-all duration-300",
(microphoneMetrics.frames_sent > 0 ? (microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * AUDIO_CONFIG.PERCENTAGE_MULTIPLIER : 0) > AUDIO_CONFIG.DROP_RATE_CRITICAL_THRESHOLD
(microphoneMetrics.frames_sent > 0 ? (microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * 100 : 0) > 5
? "bg-red-500"
: (microphoneMetrics.frames_sent > 0 ? (microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * AUDIO_CONFIG.PERCENTAGE_MULTIPLIER : 0) > AUDIO_CONFIG.DROP_RATE_WARNING_THRESHOLD
: (microphoneMetrics.frames_sent > 0 ? (microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * 100 : 0) > 1
? "bg-yellow-500"
: "bg-green-500"
)}
style={{
width: `${Math.min(microphoneMetrics.frames_sent > 0 ? (microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * AUDIO_CONFIG.PERCENTAGE_MULTIPLIER : 0, AUDIO_CONFIG.MAX_LEVEL_PERCENTAGE)}%`
width: `${Math.min(microphoneMetrics.frames_sent > 0 ? (microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * 100 : 0, 100)}%`
}}
/>
</div>

View File

@ -11,8 +11,6 @@ import { useAudioLevel } from "@/hooks/useAudioLevel";
import { useAudioEvents } from "@/hooks/useAudioEvents";
import api from "@/api";
import notifications from "@/notifications";
import { AUDIO_CONFIG } from "@/config/constants";
import audioQualityService from "@/services/audioQualityService";
// Type for microphone error
interface MicrophoneError {
@ -43,8 +41,12 @@ interface AudioConfig {
FrameSize: string;
}
// Quality labels will be managed by the audio quality service
const getQualityLabels = () => audioQualityService.getQualityLabels();
const qualityLabels = {
0: "Low (32kbps)",
1: "Medium (64kbps)",
2: "High (128kbps)",
3: "Ultra (256kbps)"
};
interface AudioControlPopoverProps {
microphone: MicrophoneHookReturn;
@ -136,15 +138,20 @@ export default function AudioControlPopover({ microphone, open }: AudioControlPo
const loadAudioConfigurations = async () => {
try {
// Use centralized audio quality service
const { audio, microphone } = await audioQualityService.loadAllConfigurations();
// Parallel loading for better performance
const [qualityResp, micQualityResp] = await Promise.all([
api.GET("/audio/quality"),
api.GET("/microphone/quality")
]);
if (audio) {
setCurrentConfig(audio.current);
if (qualityResp.ok) {
const qualityData = await qualityResp.json();
setCurrentConfig(qualityData.current);
}
if (microphone) {
setCurrentMicrophoneConfig(microphone.current);
if (micQualityResp.ok) {
const micQualityData = await micQualityResp.json();
setCurrentMicrophoneConfig(micQualityData.current);
}
setConfigsLoaded(true);
@ -504,7 +511,7 @@ export default function AudioControlPopover({ microphone, open }: AudioControlPo
</div>
<div className="grid grid-cols-2 gap-2">
{Object.entries(getQualityLabels()).map(([quality, label]) => (
{Object.entries(qualityLabels).map(([quality, label]) => (
<button
key={`mic-${quality}`}
onClick={() => handleMicrophoneQualityChange(parseInt(quality))}
@ -545,7 +552,7 @@ export default function AudioControlPopover({ microphone, open }: AudioControlPo
</div>
<div className="grid grid-cols-2 gap-2">
{Object.entries(getQualityLabels()).map(([quality, label]) => (
{Object.entries(qualityLabels).map(([quality, label]) => (
<button
key={quality}
onClick={() => handleQualityChange(parseInt(quality))}
@ -697,13 +704,13 @@ export default function AudioControlPopover({ microphone, open }: AudioControlPo
<div className="text-xs text-slate-500 dark:text-slate-400">Drop Rate</div>
<div className={cx(
"font-mono text-sm",
((metrics.frames_dropped / metrics.frames_received) * AUDIO_CONFIG.PERCENTAGE_MULTIPLIER) > AUDIO_CONFIG.DROP_RATE_CRITICAL_THRESHOLD
((metrics.frames_dropped / metrics.frames_received) * 100) > 5
? "text-red-600 dark:text-red-400"
: ((metrics.frames_dropped / metrics.frames_received) * AUDIO_CONFIG.PERCENTAGE_MULTIPLIER) > AUDIO_CONFIG.DROP_RATE_WARNING_THRESHOLD
: ((metrics.frames_dropped / metrics.frames_received) * 100) > 1
? "text-yellow-600 dark:text-yellow-400"
: "text-green-600 dark:text-green-400"
)}>
{((metrics.frames_dropped / metrics.frames_received) * AUDIO_CONFIG.PERCENTAGE_MULTIPLIER).toFixed(AUDIO_CONFIG.PERCENTAGE_DECIMAL_PLACES)}%
{((metrics.frames_dropped / metrics.frames_received) * 100).toFixed(2)}%
</div>
</div>
)}

View File

@ -1,167 +0,0 @@
// Centralized configuration constants
// Network and API Configuration
export const NETWORK_CONFIG = {
WEBSOCKET_RECONNECT_INTERVAL: 3000,
LONG_PRESS_DURATION: 3000,
ERROR_MESSAGE_TIMEOUT: 3000,
AUDIO_TEST_DURATION: 5000,
BACKEND_RETRY_DELAY: 500,
RESET_DELAY: 200,
STATE_CHECK_DELAY: 100,
VERIFICATION_DELAY: 1000,
} as const;
// Default URLs and Endpoints
export const DEFAULT_URLS = {
JETKVM_PROD_API: "https://api.jetkvm.com",
JETKVM_PROD_APP: "https://app.jetkvm.com",
JETKVM_DOCS_TROUBLESHOOTING: "https://jetkvm.com/docs/getting-started/troubleshooting",
JETKVM_DOCS_REMOTE_ACCESS: "https://jetkvm.com/docs/networking/remote-access",
JETKVM_DOCS_LOCAL_ACCESS_RESET: "https://jetkvm.com/docs/networking/local-access#reset-password",
JETKVM_GITHUB: "https://github.com/jetkvm",
CRONTAB_GURU: "https://crontab.guru/examples.html",
} as const;
// Sample ISO URLs for mounting
export const SAMPLE_ISOS = {
UBUNTU_24_04: {
name: "Ubuntu 24.04.2 Desktop",
url: "https://releases.ubuntu.com/24.04.2/ubuntu-24.04.2-desktop-amd64.iso",
},
DEBIAN_13: {
name: "Debian 13.0.0 (Testing)",
url: "https://cdimage.debian.org/debian-cd/current/amd64/iso-cd/debian-13.0.0-amd64-netinst.iso",
},
DEBIAN_12: {
name: "Debian 12.11.0 (Stable)",
url: "https://cdimage.debian.org/mirror/cdimage/archive/12.11.0/amd64/iso-cd/debian-12.11.0-amd64-netinst.iso",
},
FEDORA_41: {
name: "Fedora 41 Workstation",
url: "https://download.fedoraproject.org/pub/fedora/linux/releases/41/Workstation/x86_64/iso/Fedora-Workstation-Live-x86_64-41-1.4.iso",
},
OPENSUSE_LEAP: {
name: "openSUSE Leap 15.6",
url: "https://download.opensuse.org/distribution/leap/15.6/iso/openSUSE-Leap-15.6-NET-x86_64-Media.iso",
},
OPENSUSE_TUMBLEWEED: {
name: "openSUSE Tumbleweed",
url: "https://download.opensuse.org/tumbleweed/iso/openSUSE-Tumbleweed-NET-x86_64-Current.iso",
},
ARCH_LINUX: {
name: "Arch Linux",
url: "https://archlinux.doridian.net/iso/2025.02.01/archlinux-2025.02.01-x86_64.iso",
},
NETBOOT_XYZ: {
name: "netboot.xyz",
url: "https://boot.netboot.xyz/ipxe/netboot.xyz.iso",
},
} as const;
// Security and Access Configuration
export const SECURITY_CONFIG = {
LOCALHOST_ONLY_IP: "127.0.0.1",
LOCALHOST_HOSTNAME: "localhost",
HTTPS_PROTOCOL: "https:",
} as const;
// Default Hardware Configuration
export const HARDWARE_CONFIG = {
DEFAULT_OFF_AFTER: 50000,
SAMPLE_EDID: "00FFFFFFFFFFFF00047265058A3F6101101E0104A53420783FC125A8554EA0260D5054BFEF80714F8140818081C081008B009500B300283C80A070B023403020360006442100001A000000FD00304C575716010A202020202020000000FC0042323436574C0A202020202020000000FF0054384E4545303033383532320A01F802031CF14F90020304050607011112131415161F2309070783010000011D8018711C1620582C250006442100009E011D007251D01E206E28550006442100001E8C0AD08A20E02D10103E9600064421000018C344806E70B028401720A80406442100001E00000000000000000000000000000000000000000000000000000096",
} as const;
// Audio Configuration
export const AUDIO_CONFIG = {
// Audio Level Analysis
LEVEL_UPDATE_INTERVAL: 100, // ms - throttle audio level updates for performance
FFT_SIZE: 128, // reduced from 256 for better performance
SMOOTHING_TIME_CONSTANT: 0.8,
RELEVANT_FREQUENCY_BINS: 32, // focus on lower frequencies for voice
RMS_SCALING_FACTOR: 180, // for converting RMS to percentage
MAX_LEVEL_PERCENTAGE: 100,
// Microphone Configuration
SAMPLE_RATE: 48000, // Hz - high quality audio sampling
CHANNEL_COUNT: 1, // mono for microphone input
OPERATION_DEBOUNCE_MS: 1000, // debounce microphone operations
SYNC_DEBOUNCE_MS: 1000, // debounce state synchronization
AUDIO_TEST_TIMEOUT: 100, // ms - timeout for audio testing
// Audio Output Quality Bitrates (matching backend config_constants.go)
OUTPUT_QUALITY_BITRATES: {
LOW: 32, // AudioQualityLowOutputBitrate
MEDIUM: 64, // AudioQualityMediumOutputBitrate
HIGH: 128, // AudioQualityHighOutputBitrate
ULTRA: 192, // AudioQualityUltraOutputBitrate
} as const,
// Audio Input Quality Bitrates (matching backend config_constants.go)
INPUT_QUALITY_BITRATES: {
LOW: 16, // AudioQualityLowInputBitrate
MEDIUM: 32, // AudioQualityMediumInputBitrate
HIGH: 64, // AudioQualityHighInputBitrate
ULTRA: 96, // AudioQualityUltraInputBitrate
} as const,
// Sample Rates (matching backend config_constants.go)
QUALITY_SAMPLE_RATES: {
LOW: 22050, // AudioQualityLowSampleRate
MEDIUM: 44100, // AudioQualityMediumSampleRate
HIGH: 48000, // Default SampleRate
ULTRA: 48000, // Default SampleRate
} as const,
// Microphone Sample Rates
MIC_QUALITY_SAMPLE_RATES: {
LOW: 16000, // AudioQualityMicLowSampleRate
MEDIUM: 44100, // AudioQualityMediumSampleRate
HIGH: 48000, // Default SampleRate
ULTRA: 48000, // Default SampleRate
} as const,
// Channels (matching backend config_constants.go)
QUALITY_CHANNELS: {
LOW: 1, // AudioQualityLowChannels (mono)
MEDIUM: 2, // AudioQualityMediumChannels (stereo)
HIGH: 2, // AudioQualityHighChannels (stereo)
ULTRA: 2, // AudioQualityUltraChannels (stereo)
} as const,
// Frame Sizes in milliseconds (matching backend config_constants.go)
QUALITY_FRAME_SIZES: {
LOW: 40, // AudioQualityLowFrameSize (40ms)
MEDIUM: 20, // AudioQualityMediumFrameSize (20ms)
HIGH: 20, // AudioQualityHighFrameSize (20ms)
ULTRA: 10, // AudioQualityUltraFrameSize (10ms)
} as const,
// Updated Quality Labels with correct output bitrates
QUALITY_LABELS: {
0: "Low (32 kbps)",
1: "Medium (64 kbps)",
2: "High (128 kbps)",
3: "Ultra (192 kbps)",
} as const,
// Legacy support - keeping for backward compatibility
QUALITY_BITRATES: {
LOW: 32,
MEDIUM: 64,
HIGH: 128,
ULTRA: 192, // Updated to match backend
},
// Audio Analysis
ANALYSIS_FFT_SIZE: 256, // for detailed audio analysis
ANALYSIS_UPDATE_INTERVAL: 100, // ms - 10fps for audio level updates
LEVEL_SCALING_FACTOR: 255, // for RMS to percentage conversion
// Audio Metrics Thresholds
DROP_RATE_WARNING_THRESHOLD: 1, // percentage - yellow warning
DROP_RATE_CRITICAL_THRESHOLD: 5, // percentage - red critical
PERCENTAGE_MULTIPLIER: 100, // for converting ratios to percentages
PERCENTAGE_DECIMAL_PLACES: 2, // decimal places for percentage display
} as const;
// Placeholder URLs
export const PLACEHOLDERS = {
ISO_URL: "https://example.com/image.iso",
PROXY_URL: "http://proxy.example.com:8080/",
API_URL: "https://api.example.com",
APP_URL: "https://app.example.com",
} as const;

View File

@ -7,8 +7,6 @@ import {
MAX_KEYS_PER_STEP,
} from "@/constants/macros";
import { devWarn } from '../utils/debug';
// Define the JsonRpc types for better type checking
interface JsonRpcResponse {
jsonrpc: string;
@ -784,7 +782,7 @@ export const useNetworkStateStore = create<NetworkState>((set, get) => ({
setDhcpLeaseExpiry: (expiry: Date) => {
const lease = get().dhcp_lease;
if (!lease) {
devWarn("No lease found");
console.warn("No lease found");
return;
}

View File

@ -2,7 +2,6 @@ import { useNavigate, useParams, NavigateOptions } from "react-router-dom";
import { useCallback, useMemo } from "react";
import { isOnDevice } from "../main";
import { devError } from '../utils/debug';
/**
* Generates the correct path based on whether the app is running on device or in cloud mode
@ -22,7 +21,7 @@ export function getDeviceUiPath(path: string, deviceId?: string): string {
return normalizedPath;
} else {
if (!deviceId) {
devError("No device ID provided when generating path in cloud mode");
console.error("No device ID provided when generating path in cloud mode");
throw new Error("Device ID is required for cloud mode path generation");
}
return `/devices/${deviceId}${normalizedPath}`;

View File

@ -1,7 +1,5 @@
import { useState, useEffect, useCallback } from 'react';
import { devError } from '../utils/debug';
export interface AudioDevice {
deviceId: string;
label: string;
@ -68,7 +66,7 @@ export function useAudioDevices(): UseAudioDevicesReturn {
// Audio devices enumerated
} catch (err) {
devError('Failed to enumerate audio devices:', err);
console.error('Failed to enumerate audio devices:', err);
setError(err instanceof Error ? err.message : 'Failed to access audio devices');
} finally {
setIsLoading(false);

View File

@ -1,9 +1,6 @@
import { useCallback, useEffect, useRef, useState } from 'react';
import useWebSocket, { ReadyState } from 'react-use-websocket';
import { devError, devWarn } from '../utils/debug';
import { NETWORK_CONFIG } from '../config/constants';
// Audio event types matching the backend
export type AudioEventType =
| 'audio-mute-changed'
@ -124,7 +121,7 @@ export function useAudioEvents(onAudioDeviceChanged?: (data: AudioDeviceChangedD
} = useWebSocket(getWebSocketUrl(), {
shouldReconnect: () => true,
reconnectAttempts: 10,
reconnectInterval: NETWORK_CONFIG.WEBSOCKET_RECONNECT_INTERVAL,
reconnectInterval: 3000,
share: true, // Share the WebSocket connection across multiple hooks
onOpen: () => {
// WebSocket connected
@ -140,7 +137,7 @@ export function useAudioEvents(onAudioDeviceChanged?: (data: AudioDeviceChangedD
globalSubscriptionState.connectionId = null;
},
onError: (event) => {
devError('[AudioEvents] WebSocket error:', event);
console.error('[AudioEvents] WebSocket error:', event);
},
});
@ -273,7 +270,7 @@ export function useAudioEvents(onAudioDeviceChanged?: (data: AudioDeviceChangedD
} catch (error) {
// Ignore parsing errors for non-JSON messages (like "pong")
if (lastMessage.data !== 'pong') {
devWarn('[AudioEvents] Failed to parse WebSocket message:', error);
console.warn('[AudioEvents] Failed to parse WebSocket message:', error);
}
}
}

View File

@ -1,7 +1,5 @@
import { useEffect, useRef, useState } from 'react';
import { AUDIO_CONFIG } from '@/config/constants';
interface AudioLevelHookResult {
audioLevel: number; // 0-100 percentage
isAnalyzing: boolean;
@ -9,14 +7,14 @@ interface AudioLevelHookResult {
interface AudioLevelOptions {
enabled?: boolean; // Allow external control of analysis
updateInterval?: number; // Throttle updates (default from AUDIO_CONFIG)
updateInterval?: number; // Throttle updates (default: 100ms for 10fps instead of 60fps)
}
export const useAudioLevel = (
stream: MediaStream | null,
options: AudioLevelOptions = {}
): AudioLevelHookResult => {
const { enabled = true, updateInterval = AUDIO_CONFIG.LEVEL_UPDATE_INTERVAL } = options;
const { enabled = true, updateInterval = 100 } = options;
const [audioLevel, setAudioLevel] = useState(0);
const [isAnalyzing, setIsAnalyzing] = useState(false);
@ -61,8 +59,8 @@ export const useAudioLevel = (
const source = audioContext.createMediaStreamSource(stream);
// Configure analyser - use smaller FFT for better performance
analyser.fftSize = AUDIO_CONFIG.FFT_SIZE;
analyser.smoothingTimeConstant = AUDIO_CONFIG.SMOOTHING_TIME_CONSTANT;
analyser.fftSize = 128; // Reduced from 256 for better performance
analyser.smoothingTimeConstant = 0.8;
// Connect nodes
source.connect(analyser);
@ -89,7 +87,7 @@ export const useAudioLevel = (
// Optimized RMS calculation - process only relevant frequency bands
let sum = 0;
const relevantBins = Math.min(dataArray.length, AUDIO_CONFIG.RELEVANT_FREQUENCY_BINS);
const relevantBins = Math.min(dataArray.length, 32); // Focus on lower frequencies for voice
for (let i = 0; i < relevantBins; i++) {
const value = dataArray[i];
sum += value * value;
@ -97,7 +95,7 @@ export const useAudioLevel = (
const rms = Math.sqrt(sum / relevantBins);
// Convert to percentage (0-100) with better scaling
const level = Math.min(AUDIO_CONFIG.MAX_LEVEL_PERCENTAGE, Math.max(0, (rms / AUDIO_CONFIG.RMS_SCALING_FACTOR) * AUDIO_CONFIG.MAX_LEVEL_PERCENTAGE));
const level = Math.min(100, Math.max(0, (rms / 180) * 100)); // Adjusted scaling for better sensitivity
setAudioLevel(Math.round(level));
};

View File

@ -2,8 +2,6 @@ import { useCallback, useEffect } from "react";
import { useRTCStore } from "@/hooks/stores";
import { devError } from '../utils/debug';
export interface JsonRpcRequest {
jsonrpc: string;
method: string;
@ -63,7 +61,7 @@ export function useJsonRpc(onRequest?: (payload: JsonRpcRequest) => void) {
return;
}
if ("error" in payload) devError(payload.error);
if ("error" in payload) console.error(payload.error);
if (!payload.id) return;
const callback = callbackStore.get(payload.id);

View File

@ -2,8 +2,6 @@ import { useCallback, useEffect, useRef, useState } from "react";
import { useRTCStore } from "@/hooks/stores";
import api from "@/api";
import { devLog, devInfo, devWarn, devError, devOnly } from "@/utils/debug";
import { NETWORK_CONFIG, AUDIO_CONFIG } from "@/config/constants";
export interface MicrophoneError {
type: 'permission' | 'device' | 'network' | 'unknown';
@ -33,14 +31,15 @@ export function useMicrophone() {
// Add debouncing refs to prevent rapid operations
const lastOperationRef = useRef<number>(0);
const operationTimeoutRef = useRef<number | null>(null);
const OPERATION_DEBOUNCE_MS = 1000; // 1 second debounce
// Debounced operation wrapper
const debouncedOperation = useCallback((operation: () => Promise<void>, operationType: string) => {
const now = Date.now();
const timeSinceLastOp = now - lastOperationRef.current;
if (timeSinceLastOp < AUDIO_CONFIG.OPERATION_DEBOUNCE_MS) {
devLog(`Debouncing ${operationType} operation - too soon (${timeSinceLastOp}ms since last)`);
if (timeSinceLastOp < OPERATION_DEBOUNCE_MS) {
console.log(`Debouncing ${operationType} operation - too soon (${timeSinceLastOp}ms since last)`);
return;
}
@ -52,7 +51,7 @@ export function useMicrophone() {
lastOperationRef.current = now;
operation().catch(error => {
devError(`Debounced ${operationType} operation failed:`, error);
console.error(`Debounced ${operationType} operation failed:`, error);
});
}, []);
@ -73,7 +72,7 @@ export function useMicrophone() {
try {
await microphoneSender.replaceTrack(null);
} catch (error) {
devWarn("Failed to replace track with null:", error);
console.warn("Failed to replace track with null:", error);
// Fallback to removing the track
peerConnection.removeTrack(microphoneSender);
}
@ -111,14 +110,14 @@ export function useMicrophone() {
} : "No peer connection",
streamMatch: refStream === microphoneStream
};
devLog("Microphone Debug State:", state);
console.log("Microphone Debug State:", state);
// Also check if streams are active
if (refStream) {
devLog("Ref stream active tracks:", refStream.getAudioTracks().filter(t => t.readyState === 'live').length);
console.log("Ref stream active tracks:", refStream.getAudioTracks().filter(t => t.readyState === 'live').length);
}
if (microphoneStream && microphoneStream !== refStream) {
devLog("Store stream active tracks:", microphoneStream.getAudioTracks().filter(t => t.readyState === 'live').length);
console.log("Store stream active tracks:", microphoneStream.getAudioTracks().filter(t => t.readyState === 'live').length);
}
return state;
@ -138,15 +137,15 @@ export function useMicrophone() {
const syncMicrophoneState = useCallback(async () => {
// Debounce sync calls to prevent race conditions
const now = Date.now();
if (now - lastSyncRef.current < AUDIO_CONFIG.SYNC_DEBOUNCE_MS) {
devLog("Skipping sync - too frequent");
if (now - lastSyncRef.current < 1000) { // Increased debounce time
console.log("Skipping sync - too frequent");
return;
}
lastSyncRef.current = now;
// Don't sync if we're in the middle of starting the microphone
if (isStartingRef.current) {
devLog("Skipping sync - microphone is starting");
console.log("Skipping sync - microphone is starting");
return;
}
@ -158,27 +157,27 @@ export function useMicrophone() {
// Only sync if there's a significant state difference and we're not in a transition
if (backendRunning !== isMicrophoneActive) {
devInfo(`Syncing microphone state: backend=${backendRunning}, frontend=${isMicrophoneActive}`);
console.info(`Syncing microphone state: backend=${backendRunning}, frontend=${isMicrophoneActive}`);
// If backend is running but frontend thinks it's not, just update frontend state
if (backendRunning && !isMicrophoneActive) {
devLog("Backend running, updating frontend state to active");
console.log("Backend running, updating frontend state to active");
setMicrophoneActive(true);
}
// If backend is not running but frontend thinks it is, clean up and update state
else if (!backendRunning && isMicrophoneActive) {
devLog("Backend not running, cleaning up frontend state");
console.log("Backend not running, cleaning up frontend state");
setMicrophoneActive(false);
// Only clean up stream if we actually have one
if (microphoneStreamRef.current) {
devLog("Cleaning up orphaned stream");
console.log("Cleaning up orphaned stream");
await stopMicrophoneStream();
}
}
}
}
} catch (error) {
devWarn("Failed to sync microphone state:", error);
console.warn("Failed to sync microphone state:", error);
}
}, [isMicrophoneActive, setMicrophoneActive, stopMicrophoneStream]);
@ -186,7 +185,7 @@ export function useMicrophone() {
const startMicrophone = useCallback(async (deviceId?: string): Promise<{ success: boolean; error?: MicrophoneError }> => {
// Prevent multiple simultaneous start operations
if (isStarting || isStopping || isToggling) {
devLog("Microphone operation already in progress, skipping start");
console.log("Microphone operation already in progress, skipping start");
return { success: false, error: { type: 'unknown', message: 'Operation already in progress' } };
}
@ -199,8 +198,8 @@ export function useMicrophone() {
echoCancellation: true,
noiseSuppression: true,
autoGainControl: true,
sampleRate: AUDIO_CONFIG.SAMPLE_RATE,
channelCount: AUDIO_CONFIG.CHANNEL_COUNT,
sampleRate: 48000,
channelCount: 1,
};
// Add device ID if specified
@ -208,7 +207,7 @@ export function useMicrophone() {
audioConstraints.deviceId = { exact: deviceId };
}
devLog("Requesting microphone with constraints:", audioConstraints);
console.log("Requesting microphone with constraints:", audioConstraints);
const stream = await navigator.mediaDevices.getUserMedia({
audio: audioConstraints
});
@ -220,14 +219,14 @@ export function useMicrophone() {
setMicrophoneStream(stream);
// Verify the stream was stored correctly
devLog("Stream storage verification:", {
console.log("Stream storage verification:", {
refSet: !!microphoneStreamRef.current,
refId: microphoneStreamRef.current?.id,
storeWillBeSet: true // Store update is async
});
// Add audio track to peer connection if available
devLog("Peer connection state:", peerConnection ? {
console.log("Peer connection state:", peerConnection ? {
connectionState: peerConnection.connectionState,
iceConnectionState: peerConnection.iceConnectionState,
signalingState: peerConnection.signalingState
@ -235,11 +234,11 @@ export function useMicrophone() {
if (peerConnection && stream.getAudioTracks().length > 0) {
const audioTrack = stream.getAudioTracks()[0];
devLog("Starting microphone with audio track:", audioTrack.id, "kind:", audioTrack.kind);
console.log("Starting microphone with audio track:", audioTrack.id, "kind:", audioTrack.kind);
// Find the audio transceiver (should already exist with sendrecv direction)
const transceivers = peerConnection.getTransceivers();
devLog("Available transceivers:", transceivers.map(t => ({
console.log("Available transceivers:", transceivers.map(t => ({
direction: t.direction,
mid: t.mid,
senderTrack: t.sender.track?.kind,
@ -265,7 +264,7 @@ export function useMicrophone() {
return false;
});
devLog("Found audio transceiver:", audioTransceiver ? {
console.log("Found audio transceiver:", audioTransceiver ? {
direction: audioTransceiver.direction,
mid: audioTransceiver.mid,
senderTrack: audioTransceiver.sender.track?.kind,
@ -277,10 +276,10 @@ export function useMicrophone() {
// Use the existing audio transceiver's sender
await audioTransceiver.sender.replaceTrack(audioTrack);
sender = audioTransceiver.sender;
devLog("Replaced audio track on existing transceiver");
console.log("Replaced audio track on existing transceiver");
// Verify the track was set correctly
devLog("Transceiver after track replacement:", {
console.log("Transceiver after track replacement:", {
direction: audioTransceiver.direction,
senderTrack: audioTransceiver.sender.track?.id,
senderTrackKind: audioTransceiver.sender.track?.kind,
@ -290,11 +289,11 @@ export function useMicrophone() {
} else {
// Fallback: add new track if no transceiver found
sender = peerConnection.addTrack(audioTrack, stream);
devLog("Added new audio track to peer connection");
console.log("Added new audio track to peer connection");
// Find the transceiver that was created for this track
const newTransceiver = peerConnection.getTransceivers().find(t => t.sender === sender);
devLog("New transceiver created:", newTransceiver ? {
console.log("New transceiver created:", newTransceiver ? {
direction: newTransceiver.direction,
senderTrack: newTransceiver.sender.track?.id,
senderTrackKind: newTransceiver.sender.track?.kind
@ -302,7 +301,7 @@ export function useMicrophone() {
}
setMicrophoneSender(sender);
devLog("Microphone sender set:", {
console.log("Microphone sender set:", {
senderId: sender,
track: sender.track?.id,
trackKind: sender.track?.kind,
@ -311,30 +310,28 @@ export function useMicrophone() {
});
// Check sender stats to verify audio is being transmitted
devOnly(() => {
setTimeout(async () => {
try {
const stats = await sender.getStats();
devLog("Sender stats after 2 seconds:");
stats.forEach((report, id) => {
if (report.type === 'outbound-rtp' && report.kind === 'audio') {
devLog("Outbound audio RTP stats:", {
id,
packetsSent: report.packetsSent,
bytesSent: report.bytesSent,
timestamp: report.timestamp
});
}
});
} catch (error) {
devError("Failed to get sender stats:", error);
}
}, 2000);
});
setTimeout(async () => {
try {
const stats = await sender.getStats();
console.log("Sender stats after 2 seconds:");
stats.forEach((report, id) => {
if (report.type === 'outbound-rtp' && report.kind === 'audio') {
console.log("Outbound audio RTP stats:", {
id,
packetsSent: report.packetsSent,
bytesSent: report.bytesSent,
timestamp: report.timestamp
});
}
});
} catch (error) {
console.error("Failed to get sender stats:", error);
}
}, 2000);
}
// Notify backend that microphone is started
devLog("Notifying backend about microphone start...");
console.log("Notifying backend about microphone start...");
// Retry logic for backend failures
let backendSuccess = false;
@ -344,12 +341,12 @@ export function useMicrophone() {
try {
// If this is a retry, first try to reset the backend microphone state
if (attempt > 1) {
devLog(`Backend start attempt ${attempt}, first trying to reset backend state...`);
console.log(`Backend start attempt ${attempt}, first trying to reset backend state...`);
try {
// Try the new reset endpoint first
const resetResp = await api.POST("/microphone/reset", {});
if (resetResp.ok) {
devLog("Backend reset successful");
console.log("Backend reset successful");
} else {
// Fallback to stop
await api.POST("/microphone/stop", {});
@ -357,59 +354,59 @@ export function useMicrophone() {
// Wait a bit for the backend to reset
await new Promise(resolve => setTimeout(resolve, 200));
} catch (resetError) {
devWarn("Failed to reset backend state:", resetError);
console.warn("Failed to reset backend state:", resetError);
}
}
const backendResp = await api.POST("/microphone/start", {});
devLog(`Backend response status (attempt ${attempt}):`, backendResp.status, "ok:", backendResp.ok);
console.log(`Backend response status (attempt ${attempt}):`, backendResp.status, "ok:", backendResp.ok);
if (!backendResp.ok) {
lastError = `Backend returned status ${backendResp.status}`;
devError(`Backend microphone start failed with status: ${backendResp.status} (attempt ${attempt})`);
console.error(`Backend microphone start failed with status: ${backendResp.status} (attempt ${attempt})`);
// For 500 errors, try again after a short delay
if (backendResp.status === 500 && attempt < 3) {
devLog(`Retrying backend start in 500ms (attempt ${attempt + 1}/3)...`);
console.log(`Retrying backend start in 500ms (attempt ${attempt + 1}/3)...`);
await new Promise(resolve => setTimeout(resolve, 500));
continue;
}
} else {
// Success!
const responseData = await backendResp.json();
devLog("Backend response data:", responseData);
console.log("Backend response data:", responseData);
if (responseData.status === "already running") {
devInfo("Backend microphone was already running");
console.info("Backend microphone was already running");
// If we're on the first attempt and backend says "already running",
// but frontend thinks it's not active, this might be a stuck state
if (attempt === 1 && !isMicrophoneActive) {
devWarn("Backend reports 'already running' but frontend is not active - possible stuck state");
devLog("Attempting to reset backend state and retry...");
console.warn("Backend reports 'already running' but frontend is not active - possible stuck state");
console.log("Attempting to reset backend state and retry...");
try {
const resetResp = await api.POST("/microphone/reset", {});
if (resetResp.ok) {
devLog("Backend reset successful, retrying start...");
console.log("Backend reset successful, retrying start...");
await new Promise(resolve => setTimeout(resolve, 200));
continue; // Retry the start
}
} catch (resetError) {
devWarn("Failed to reset stuck backend state:", resetError);
console.warn("Failed to reset stuck backend state:", resetError);
}
}
}
devLog("Backend microphone start successful");
console.log("Backend microphone start successful");
backendSuccess = true;
break;
}
} catch (error) {
lastError = error instanceof Error ? error : String(error);
devError(`Backend microphone start threw error (attempt ${attempt}):`, error);
console.error(`Backend microphone start threw error (attempt ${attempt}):`, error);
// For network errors, try again after a short delay
if (attempt < 3) {
devLog(`Retrying backend start in 500ms (attempt ${attempt + 1}/3)...`);
console.log(`Retrying backend start in 500ms (attempt ${attempt + 1}/3)...`);
await new Promise(resolve => setTimeout(resolve, 500));
continue;
}
@ -418,7 +415,7 @@ export function useMicrophone() {
// If all backend attempts failed, cleanup and return error
if (!backendSuccess) {
devError("All backend start attempts failed, cleaning up stream");
console.error("All backend start attempts failed, cleaning up stream");
await stopMicrophoneStream();
isStartingRef.current = false;
setIsStarting(false);
@ -435,7 +432,7 @@ export function useMicrophone() {
setMicrophoneActive(true);
setMicrophoneMuted(false);
devLog("Microphone state set to active. Verifying state:", {
console.log("Microphone state set to active. Verifying state:", {
streamInRef: !!microphoneStreamRef.current,
streamInStore: !!microphoneStream,
isActive: true,
@ -444,17 +441,15 @@ export function useMicrophone() {
// Don't sync immediately after starting - it causes race conditions
// The sync will happen naturally through other triggers
devOnly(() => {
setTimeout(() => {
// Just verify state after a delay for debugging
devLog("State check after delay:", {
streamInRef: !!microphoneStreamRef.current,
streamInStore: !!microphoneStream,
isActive: isMicrophoneActive,
isMuted: isMicrophoneMuted
});
}, AUDIO_CONFIG.AUDIO_TEST_TIMEOUT);
});
setTimeout(() => {
// Just verify state after a delay for debugging
console.log("State check after delay:", {
streamInRef: !!microphoneStreamRef.current,
streamInStore: !!microphoneStream,
isActive: isMicrophoneActive,
isMuted: isMicrophoneMuted
});
}, 100);
// Clear the starting flag
isStartingRef.current = false;
@ -498,12 +493,12 @@ export function useMicrophone() {
// Reset backend microphone state
const resetBackendMicrophoneState = useCallback(async (): Promise<boolean> => {
try {
devLog("Resetting backend microphone state...");
console.log("Resetting backend microphone state...");
const response = await api.POST("/microphone/reset", {});
if (response.ok) {
const data = await response.json();
devLog("Backend microphone reset successful:", data);
console.log("Backend microphone reset successful:", data);
// Update frontend state to match backend
setMicrophoneActive(false);
@ -511,7 +506,7 @@ export function useMicrophone() {
// Clean up any orphaned streams
if (microphoneStreamRef.current) {
devLog("Cleaning up orphaned stream after reset");
console.log("Cleaning up orphaned stream after reset");
await stopMicrophoneStream();
}
@ -523,19 +518,19 @@ export function useMicrophone() {
return true;
} else {
devError("Backend microphone reset failed:", response.status);
console.error("Backend microphone reset failed:", response.status);
return false;
}
} catch (error) {
devWarn("Failed to reset backend microphone state:", error);
console.warn("Failed to reset backend microphone state:", error);
// Fallback to old method
try {
devLog("Trying fallback reset method...");
console.log("Trying fallback reset method...");
await api.POST("/microphone/stop", {});
await new Promise(resolve => setTimeout(resolve, 300));
return true;
} catch (fallbackError) {
devError("Fallback reset also failed:", fallbackError);
console.error("Fallback reset also failed:", fallbackError);
return false;
}
}
@ -545,7 +540,7 @@ export function useMicrophone() {
const stopMicrophone = useCallback(async (): Promise<{ success: boolean; error?: MicrophoneError }> => {
// Prevent multiple simultaneous stop operations
if (isStarting || isStopping || isToggling) {
devLog("Microphone operation already in progress, skipping stop");
console.log("Microphone operation already in progress, skipping stop");
return { success: false, error: { type: 'unknown', message: 'Operation already in progress' } };
}
@ -557,9 +552,9 @@ export function useMicrophone() {
// Then notify backend that microphone is stopped
try {
await api.POST("/microphone/stop", {});
devLog("Backend notified about microphone stop");
console.log("Backend notified about microphone stop");
} catch (error) {
devWarn("Failed to notify backend about microphone stop:", error);
console.warn("Failed to notify backend about microphone stop:", error);
}
// Update frontend state immediately
@ -572,7 +567,7 @@ export function useMicrophone() {
setIsStopping(false);
return { success: true };
} catch (error) {
devError("Failed to stop microphone:", error);
console.error("Failed to stop microphone:", error);
setIsStopping(false);
return {
success: false,
@ -588,7 +583,7 @@ export function useMicrophone() {
const toggleMicrophoneMute = useCallback(async (): Promise<{ success: boolean; error?: MicrophoneError }> => {
// Prevent multiple simultaneous toggle operations
if (isStarting || isStopping || isToggling) {
devLog("Microphone operation already in progress, skipping toggle");
console.log("Microphone operation already in progress, skipping toggle");
return { success: false, error: { type: 'unknown', message: 'Operation already in progress' } };
}
@ -597,7 +592,7 @@ export function useMicrophone() {
// Use the ref instead of store value to avoid race conditions
const currentStream = microphoneStreamRef.current || microphoneStream;
devLog("Toggle microphone mute - current state:", {
console.log("Toggle microphone mute - current state:", {
hasRefStream: !!microphoneStreamRef.current,
hasStoreStream: !!microphoneStream,
isActive: isMicrophoneActive,
@ -615,7 +610,7 @@ export function useMicrophone() {
streamId: currentStream?.id,
audioTracks: currentStream?.getAudioTracks().length || 0
};
devWarn("Microphone mute failed: stream or active state missing", errorDetails);
console.warn("Microphone mute failed: stream or active state missing", errorDetails);
// Provide more specific error message
let errorMessage = 'Microphone is not active';
@ -652,7 +647,7 @@ export function useMicrophone() {
// Mute/unmute the audio track
audioTracks.forEach(track => {
track.enabled = !newMutedState;
devLog(`Audio track ${track.id} enabled: ${track.enabled}`);
console.log(`Audio track ${track.id} enabled: ${track.enabled}`);
});
setMicrophoneMuted(newMutedState);
@ -661,13 +656,13 @@ export function useMicrophone() {
try {
await api.POST("/microphone/mute", { muted: newMutedState });
} catch (error) {
devWarn("Failed to notify backend about microphone mute:", error);
console.warn("Failed to notify backend about microphone mute:", error);
}
setIsToggling(false);
return { success: true };
} catch (error) {
devError("Failed to toggle microphone mute:", error);
console.error("Failed to toggle microphone mute:", error);
setIsToggling(false);
return {
success: false,
@ -682,7 +677,7 @@ export function useMicrophone() {
// Function to check WebRTC audio transmission stats
const checkAudioTransmissionStats = useCallback(async () => {
if (!microphoneSender) {
devLog("No microphone sender available");
console.log("No microphone sender available");
return null;
}
@ -712,38 +707,38 @@ export function useMicrophone() {
}
});
devLog("Audio transmission stats:", audioStats);
console.log("Audio transmission stats:", audioStats);
return audioStats;
} catch (error) {
devError("Failed to get audio transmission stats:", error);
console.error("Failed to get audio transmission stats:", error);
return null;
}
}, [microphoneSender]);
// Comprehensive test function to diagnose microphone issues
const testMicrophoneAudio = useCallback(async () => {
devLog("=== MICROPHONE AUDIO TEST ===");
console.log("=== MICROPHONE AUDIO TEST ===");
// 1. Check if we have a stream
const stream = microphoneStreamRef.current;
if (!stream) {
devLog("❌ No microphone stream available");
console.log("❌ No microphone stream available");
return;
}
devLog("✅ Microphone stream exists:", stream.id);
console.log("✅ Microphone stream exists:", stream.id);
// 2. Check audio tracks
const audioTracks = stream.getAudioTracks();
devLog("Audio tracks:", audioTracks.length);
console.log("Audio tracks:", audioTracks.length);
if (audioTracks.length === 0) {
devLog("❌ No audio tracks in stream");
console.log("❌ No audio tracks in stream");
return;
}
const track = audioTracks[0];
devLog("✅ Audio track details:", {
console.log("✅ Audio track details:", {
id: track.id,
label: track.label,
enabled: track.enabled,
@ -757,13 +752,13 @@ export function useMicrophone() {
const analyser = audioContext.createAnalyser();
const source = audioContext.createMediaStreamSource(stream);
analyser.fftSize = AUDIO_CONFIG.ANALYSIS_FFT_SIZE;
analyser.fftSize = 256;
source.connect(analyser);
const dataArray = new Uint8Array(analyser.frequencyBinCount);
devLog("🎤 Testing audio level detection for 5 seconds...");
devLog("Please speak into your microphone now!");
console.log("🎤 Testing audio level detection for 5 seconds...");
console.log("Please speak into your microphone now!");
let maxLevel = 0;
let sampleCount = 0;
@ -776,39 +771,39 @@ export function useMicrophone() {
sum += value * value;
}
const rms = Math.sqrt(sum / dataArray.length);
const level = Math.min(AUDIO_CONFIG.MAX_LEVEL_PERCENTAGE, (rms / AUDIO_CONFIG.LEVEL_SCALING_FACTOR) * AUDIO_CONFIG.MAX_LEVEL_PERCENTAGE);
const level = Math.min(100, (rms / 255) * 100);
maxLevel = Math.max(maxLevel, level);
sampleCount++;
if (sampleCount % 10 === 0) { // Log every 10th sample
devLog(`Audio level: ${level.toFixed(1)}% (max so far: ${maxLevel.toFixed(1)}%)`);
console.log(`Audio level: ${level.toFixed(1)}% (max so far: ${maxLevel.toFixed(1)}%)`);
}
}, AUDIO_CONFIG.ANALYSIS_UPDATE_INTERVAL);
}, 100);
setTimeout(() => {
clearInterval(testInterval);
source.disconnect();
audioContext.close();
devLog("🎤 Audio test completed!");
devLog(`Maximum audio level detected: ${maxLevel.toFixed(1)}%`);
console.log("🎤 Audio test completed!");
console.log(`Maximum audio level detected: ${maxLevel.toFixed(1)}%`);
if (maxLevel > 5) {
devLog("✅ Microphone is detecting audio!");
console.log("✅ Microphone is detecting audio!");
} else {
devLog("❌ No significant audio detected. Check microphone permissions and hardware.");
console.log("❌ No significant audio detected. Check microphone permissions and hardware.");
}
}, NETWORK_CONFIG.AUDIO_TEST_DURATION);
}, 5000);
} catch (error) {
devError("❌ Failed to test audio level:", error);
console.error("❌ Failed to test audio level:", error);
}
// 4. Check WebRTC sender
if (microphoneSender) {
devLog("✅ WebRTC sender exists");
devLog("Sender track:", {
console.log("✅ WebRTC sender exists");
console.log("Sender track:", {
id: microphoneSender.track?.id,
kind: microphoneSender.track?.kind,
enabled: microphoneSender.track?.enabled,
@ -817,45 +812,45 @@ export function useMicrophone() {
// Check if sender track matches stream track
if (microphoneSender.track === track) {
devLog("✅ Sender track matches stream track");
console.log("✅ Sender track matches stream track");
} else {
devLog("❌ Sender track does NOT match stream track");
console.log("❌ Sender track does NOT match stream track");
}
} else {
devLog("❌ No WebRTC sender available");
console.log("❌ No WebRTC sender available");
}
// 5. Check peer connection
if (peerConnection) {
devLog("✅ Peer connection exists");
devLog("Connection state:", peerConnection.connectionState);
devLog("ICE connection state:", peerConnection.iceConnectionState);
console.log("✅ Peer connection exists");
console.log("Connection state:", peerConnection.connectionState);
console.log("ICE connection state:", peerConnection.iceConnectionState);
const transceivers = peerConnection.getTransceivers();
const audioTransceivers = transceivers.filter(t =>
t.sender.track?.kind === 'audio' || t.receiver.track?.kind === 'audio'
);
devLog("Audio transceivers:", audioTransceivers.map(t => ({
console.log("Audio transceivers:", audioTransceivers.map(t => ({
direction: t.direction,
senderTrack: t.sender.track?.id,
receiverTrack: t.receiver.track?.id
})));
} else {
devLog("❌ No peer connection available");
console.log("❌ No peer connection available");
}
}, [microphoneSender, peerConnection]);
const startMicrophoneDebounced = useCallback((deviceId?: string) => {
debouncedOperation(async () => {
await startMicrophone(deviceId).catch(devError);
await startMicrophone(deviceId).catch(console.error);
}, "start");
}, [startMicrophone, debouncedOperation]);
const stopMicrophoneDebounced = useCallback(() => {
debouncedOperation(async () => {
await stopMicrophone().catch(devError);
await stopMicrophone().catch(console.error);
}, "stop");
}, [stopMicrophone, debouncedOperation]);
@ -924,10 +919,10 @@ export function useMicrophone() {
// Clean up stream directly without depending on the callback
const stream = microphoneStreamRef.current;
if (stream) {
devLog("Cleanup: stopping microphone stream on unmount");
console.log("Cleanup: stopping microphone stream on unmount");
stream.getAudioTracks().forEach(track => {
track.stop();
devLog(`Cleanup: stopped audio track ${track.id}`);
console.log(`Cleanup: stopped audio track ${track.id}`);
});
microphoneStreamRef.current = null;
}

View File

@ -1,7 +1,5 @@
import { useCallback, useEffect, useState } from "react";
import { devError } from '../utils/debug';
import { JsonRpcResponse, useJsonRpc } from "./useJsonRpc";
import { useAudioEvents } from "./useAudioEvents";
@ -27,7 +25,7 @@ export function useUsbDeviceConfig() {
setLoading(false);
if ("error" in resp) {
devError("Failed to load USB devices:", resp.error);
console.error("Failed to load USB devices:", resp.error);
setError(resp.error.data || "Unknown error");
setUsbDeviceConfig(null);
} else {

View File

@ -1,142 +0,0 @@
import api from '@/api';
interface AudioConfig {
Quality: number;
Bitrate: number;
SampleRate: number;
Channels: number;
FrameSize: string;
}
type QualityPresets = Record<number, AudioConfig>;
interface AudioQualityResponse {
current: AudioConfig;
presets: QualityPresets;
}
class AudioQualityService {
private audioPresets: QualityPresets | null = null;
private microphonePresets: QualityPresets | null = null;
private qualityLabels: Record<number, string> = {
0: 'Low',
1: 'Medium',
2: 'High',
3: 'Ultra'
};
/**
* Fetch audio quality presets from the backend
*/
async fetchAudioQualityPresets(): Promise<AudioQualityResponse | null> {
try {
const response = await api.GET('/audio/quality');
if (response.ok) {
const data = await response.json();
this.audioPresets = data.presets;
this.updateQualityLabels(data.presets);
return data;
}
} catch (error) {
console.error('Failed to fetch audio quality presets:', error);
}
return null;
}
/**
* Fetch microphone quality presets from the backend
*/
async fetchMicrophoneQualityPresets(): Promise<AudioQualityResponse | null> {
try {
const response = await api.GET('/microphone/quality');
if (response.ok) {
const data = await response.json();
this.microphonePresets = data.presets;
return data;
}
} catch (error) {
console.error('Failed to fetch microphone quality presets:', error);
}
return null;
}
/**
* Update quality labels with actual bitrates from presets
*/
private updateQualityLabels(presets: QualityPresets): void {
const newQualityLabels: Record<number, string> = {};
Object.entries(presets).forEach(([qualityNum, preset]) => {
const quality = parseInt(qualityNum);
const qualityNames = ['Low', 'Medium', 'High', 'Ultra'];
const name = qualityNames[quality] || `Quality ${quality}`;
newQualityLabels[quality] = `${name} (${preset.Bitrate}kbps)`;
});
this.qualityLabels = newQualityLabels;
}
/**
* Get quality labels with bitrates
*/
getQualityLabels(): Record<number, string> {
return this.qualityLabels;
}
/**
* Get cached audio presets
*/
getAudioPresets(): QualityPresets | null {
return this.audioPresets;
}
/**
* Get cached microphone presets
*/
getMicrophonePresets(): QualityPresets | null {
return this.microphonePresets;
}
/**
* Set audio quality
*/
async setAudioQuality(quality: number): Promise<boolean> {
try {
const response = await api.POST('/audio/quality', { quality });
return response.ok;
} catch (error) {
console.error('Failed to set audio quality:', error);
return false;
}
}
/**
* Set microphone quality
*/
async setMicrophoneQuality(quality: number): Promise<boolean> {
try {
const response = await api.POST('/microphone/quality', { quality });
return response.ok;
} catch (error) {
console.error('Failed to set microphone quality:', error);
return false;
}
}
/**
* Load both audio and microphone configurations
*/
async loadAllConfigurations(): Promise<{
audio: AudioQualityResponse | null;
microphone: AudioQualityResponse | null;
}> {
const [audio, microphone] = await Promise.all([
this.fetchAudioQualityPresets(),
this.fetchMicrophoneQualityPresets()
]);
return { audio, microphone };
}
}
// Export a singleton instance
export const audioQualityService = new AudioQualityService();
export default audioQualityService;

View File

@ -1,64 +0,0 @@
/**
* Debug utilities for development mode logging
*/
// Check if we're in development mode
const isDevelopment = import.meta.env.DEV || import.meta.env.MODE === 'development';
/**
* Development-only console.log wrapper
* Only logs in development mode, silent in production
*/
export const devLog = (...args: unknown[]): void => {
if (isDevelopment) {
console.log(...args);
}
};
/**
* Development-only console.info wrapper
* Only logs in development mode, silent in production
*/
export const devInfo = (...args: unknown[]): void => {
if (isDevelopment) {
console.info(...args);
}
};
/**
* Development-only console.warn wrapper
* Only logs in development mode, silent in production
*/
export const devWarn = (...args: unknown[]): void => {
if (isDevelopment) {
console.warn(...args);
}
};
/**
* Development-only console.error wrapper
* Always logs errors, but with dev prefix in development
*/
export const devError = (...args: unknown[]): void => {
if (isDevelopment) {
console.error('[DEV]', ...args);
} else {
console.error(...args);
}
};
/**
* Development-only debug function wrapper
* Only executes the function in development mode
*/
export const devOnly = <T>(fn: () => T): T | undefined => {
if (isDevelopment) {
return fn();
}
return undefined;
};
/**
* Check if we're in development mode
*/
export const isDevMode = (): boolean => isDevelopment;