mirror of https://github.com/jetkvm/kvm.git
Compare commits
14 Commits
8fb0b9f9c6
...
0d4176cf98
Author | SHA1 | Date |
---|---|---|
|
0d4176cf98 | |
|
fe4571956d | |
|
f9adb4382d | |
|
758bbbfff6 | |
|
3efe2f2a1d | |
|
ece36ce5fd | |
|
cdf0b20bc7 | |
|
25363cef90 | |
|
e3e7b898b5 | |
|
9dda569523 | |
|
6355dd87be | |
|
cb20956445 | |
|
50e04192bf | |
|
dc2db8ed2d |
|
@ -105,13 +105,20 @@ type AdaptiveBufferManager struct {
|
|||
|
||||
// NewAdaptiveBufferManager creates a new adaptive buffer manager
|
||||
func NewAdaptiveBufferManager(config AdaptiveBufferConfig) *AdaptiveBufferManager {
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "adaptive-buffer").Logger()
|
||||
|
||||
if err := ValidateAdaptiveBufferConfig(config.MinBufferSize, config.MaxBufferSize, config.DefaultBufferSize); err != nil {
|
||||
logger.Warn().Err(err).Msg("invalid adaptive buffer config, using defaults")
|
||||
config = DefaultAdaptiveBufferConfig()
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
return &AdaptiveBufferManager{
|
||||
currentInputBufferSize: int64(config.DefaultBufferSize),
|
||||
currentOutputBufferSize: int64(config.DefaultBufferSize),
|
||||
config: config,
|
||||
logger: logging.GetDefaultLogger().With().Str("component", "adaptive-buffer").Logger(),
|
||||
logger: logger,
|
||||
processMonitor: GetProcessMonitor(),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
|
@ -123,14 +130,14 @@ func NewAdaptiveBufferManager(config AdaptiveBufferConfig) *AdaptiveBufferManage
|
|||
func (abm *AdaptiveBufferManager) Start() {
|
||||
abm.wg.Add(1)
|
||||
go abm.adaptationLoop()
|
||||
abm.logger.Info().Msg("Adaptive buffer manager started")
|
||||
abm.logger.Info().Msg("adaptive buffer manager started")
|
||||
}
|
||||
|
||||
// Stop stops the adaptive buffer management
|
||||
func (abm *AdaptiveBufferManager) Stop() {
|
||||
abm.cancel()
|
||||
abm.wg.Wait()
|
||||
abm.logger.Info().Msg("Adaptive buffer manager stopped")
|
||||
abm.logger.Info().Msg("adaptive buffer manager stopped")
|
||||
}
|
||||
|
||||
// GetInputBufferSize returns the current recommended input buffer size
|
||||
|
|
|
@ -72,14 +72,14 @@ func NewAdaptiveOptimizer(latencyMonitor *LatencyMonitor, bufferManager *Adaptiv
|
|||
func (ao *AdaptiveOptimizer) Start() {
|
||||
ao.wg.Add(1)
|
||||
go ao.optimizationLoop()
|
||||
ao.logger.Info().Msg("Adaptive optimizer started")
|
||||
ao.logger.Debug().Msg("adaptive optimizer started")
|
||||
}
|
||||
|
||||
// Stop stops the adaptive optimizer
|
||||
func (ao *AdaptiveOptimizer) Stop() {
|
||||
ao.cancel()
|
||||
ao.wg.Wait()
|
||||
ao.logger.Info().Msg("Adaptive optimizer stopped")
|
||||
ao.logger.Debug().Msg("adaptive optimizer stopped")
|
||||
}
|
||||
|
||||
// initializeStrategies sets up the available optimization strategies
|
||||
|
@ -178,9 +178,9 @@ func (ao *AdaptiveOptimizer) checkStability() {
|
|||
if metrics.Current > ao.config.RollbackThreshold {
|
||||
currentLevel := int(atomic.LoadInt64(&ao.optimizationLevel))
|
||||
if currentLevel > 0 {
|
||||
ao.logger.Warn().Dur("current_latency", metrics.Current).Dur("threshold", ao.config.RollbackThreshold).Msg("Rolling back optimizations due to excessive latency")
|
||||
ao.logger.Warn().Dur("current_latency", metrics.Current).Dur("threshold", ao.config.RollbackThreshold).Msg("rolling back optimizations due to excessive latency")
|
||||
if err := ao.decreaseOptimization(currentLevel - 1); err != nil {
|
||||
ao.logger.Error().Err(err).Msg("Failed to decrease optimization level")
|
||||
ao.logger.Error().Err(err).Msg("failed to decrease optimization level")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,82 +1,10 @@
|
|||
// Package audio provides a comprehensive real-time audio processing system for JetKVM.
|
||||
// Package audio provides real-time audio processing for JetKVM with low-latency streaming.
|
||||
//
|
||||
// # Architecture Overview
|
||||
// Key components: output/input pipelines with Opus codec, adaptive buffer management,
|
||||
// zero-copy frame pools, IPC communication, and process supervision.
|
||||
//
|
||||
// The audio package implements a multi-component architecture designed for low-latency,
|
||||
// high-quality audio streaming in embedded ARM environments. The system consists of:
|
||||
//
|
||||
// - Audio Output Pipeline: Receives compressed audio frames, decodes via Opus, and
|
||||
// outputs to ALSA-compatible audio devices
|
||||
// - Audio Input Pipeline: Captures microphone input, encodes via Opus, and streams
|
||||
// to connected clients
|
||||
// - Adaptive Buffer Management: Dynamically adjusts buffer sizes based on system
|
||||
// load and latency requirements
|
||||
// - Zero-Copy Frame Pool: Minimizes memory allocations through frame reuse
|
||||
// - IPC Communication: Unix domain sockets for inter-process communication
|
||||
// - Process Supervision: Automatic restart and health monitoring of audio subprocesses
|
||||
//
|
||||
// # Key Components
|
||||
//
|
||||
// ## Buffer Pool System (buffer_pool.go)
|
||||
// Implements a two-tier buffer pool with separate pools for audio frames and control
|
||||
// messages. Uses sync.Pool for efficient memory reuse and tracks allocation statistics.
|
||||
//
|
||||
// ## Zero-Copy Frame Management (zero_copy.go)
|
||||
// Provides reference-counted audio frames that can be shared between components
|
||||
// without copying data. Includes automatic cleanup and pool-based allocation.
|
||||
//
|
||||
// ## Adaptive Buffering Algorithm (adaptive_buffer.go)
|
||||
// Dynamically adjusts buffer sizes based on:
|
||||
// - System CPU and memory usage
|
||||
// - Audio latency measurements
|
||||
// - Frame drop rates
|
||||
// - Network conditions
|
||||
//
|
||||
// The algorithm uses exponential smoothing and configurable thresholds to balance
|
||||
// latency and stability. Buffer sizes are adjusted in discrete steps to prevent
|
||||
// oscillation.
|
||||
//
|
||||
// ## Latency Monitoring (latency_monitor.go)
|
||||
// Tracks end-to-end audio latency using high-resolution timestamps. Implements
|
||||
// adaptive optimization that adjusts system parameters when latency exceeds
|
||||
// configured thresholds.
|
||||
//
|
||||
// ## Process Supervision (supervisor.go)
|
||||
// Manages audio subprocess lifecycle with automatic restart capabilities.
|
||||
// Monitors process health and implements exponential backoff for restart attempts.
|
||||
//
|
||||
// # Quality Levels
|
||||
//
|
||||
// The system supports four quality presets optimized for different use cases:
|
||||
// - Low: 32kbps output, 16kbps input - minimal bandwidth, voice-optimized
|
||||
// - Medium: 96kbps output, 64kbps input - balanced quality and bandwidth
|
||||
// - High: 192kbps output, 128kbps input - high quality for music
|
||||
// - Ultra: 320kbps output, 256kbps input - maximum quality
|
||||
//
|
||||
// # Configuration System
|
||||
//
|
||||
// All configuration is centralized in config_constants.go, allowing runtime
|
||||
// tuning of performance parameters. Key configuration areas include:
|
||||
// - Opus codec parameters (bitrate, complexity, VBR settings)
|
||||
// - Buffer sizes and pool configurations
|
||||
// - Latency thresholds and optimization parameters
|
||||
// - Process monitoring and restart policies
|
||||
//
|
||||
// # Thread Safety
|
||||
//
|
||||
// All public APIs are thread-safe. Internal synchronization uses:
|
||||
// - atomic operations for performance counters
|
||||
// - sync.RWMutex for configuration updates
|
||||
// - sync.Pool for buffer management
|
||||
// - channel-based communication for IPC
|
||||
//
|
||||
// # Error Handling
|
||||
//
|
||||
// The system implements comprehensive error handling with:
|
||||
// - Graceful degradation on component failures
|
||||
// - Automatic retry with exponential backoff
|
||||
// - Detailed error context for debugging
|
||||
// - Metrics collection for monitoring
|
||||
// Supports four quality presets (Low/Medium/High/Ultra) with configurable bitrates.
|
||||
// All APIs are thread-safe with comprehensive error handling and metrics collection.
|
||||
//
|
||||
// # Performance Characteristics
|
||||
//
|
||||
|
@ -100,6 +28,8 @@ import (
|
|||
"errors"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/jetkvm/kvm/internal/logging"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -190,13 +120,14 @@ var qualityPresets = map[AudioQuality]struct {
|
|||
func GetAudioQualityPresets() map[AudioQuality]AudioConfig {
|
||||
result := make(map[AudioQuality]AudioConfig)
|
||||
for quality, preset := range qualityPresets {
|
||||
result[quality] = AudioConfig{
|
||||
config := AudioConfig{
|
||||
Quality: quality,
|
||||
Bitrate: preset.outputBitrate,
|
||||
SampleRate: preset.sampleRate,
|
||||
Channels: preset.channels,
|
||||
FrameSize: preset.frameSize,
|
||||
}
|
||||
result[quality] = config
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
@ -205,7 +136,7 @@ func GetAudioQualityPresets() map[AudioQuality]AudioConfig {
|
|||
func GetMicrophoneQualityPresets() map[AudioQuality]AudioConfig {
|
||||
result := make(map[AudioQuality]AudioConfig)
|
||||
for quality, preset := range qualityPresets {
|
||||
result[quality] = AudioConfig{
|
||||
config := AudioConfig{
|
||||
Quality: quality,
|
||||
Bitrate: preset.inputBitrate,
|
||||
SampleRate: func() int {
|
||||
|
@ -217,15 +148,67 @@ func GetMicrophoneQualityPresets() map[AudioQuality]AudioConfig {
|
|||
Channels: 1, // Microphone is always mono
|
||||
FrameSize: preset.frameSize,
|
||||
}
|
||||
result[quality] = config
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// SetAudioQuality updates the current audio quality configuration
|
||||
func SetAudioQuality(quality AudioQuality) {
|
||||
// Validate audio quality parameter
|
||||
if err := ValidateAudioQuality(quality); err != nil {
|
||||
// Log validation error but don't fail - maintain backward compatibility
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "audio").Logger()
|
||||
logger.Warn().Err(err).Int("quality", int(quality)).Msg("invalid audio quality, using current config")
|
||||
return
|
||||
}
|
||||
|
||||
presets := GetAudioQualityPresets()
|
||||
if config, exists := presets[quality]; exists {
|
||||
currentConfig = config
|
||||
|
||||
// Update CGO OPUS encoder parameters based on quality
|
||||
var complexity, vbr, signalType, bandwidth, dtx int
|
||||
switch quality {
|
||||
case AudioQualityLow:
|
||||
complexity = GetConfig().AudioQualityLowOpusComplexity
|
||||
vbr = GetConfig().AudioQualityLowOpusVBR
|
||||
signalType = GetConfig().AudioQualityLowOpusSignalType
|
||||
bandwidth = GetConfig().AudioQualityLowOpusBandwidth
|
||||
dtx = GetConfig().AudioQualityLowOpusDTX
|
||||
case AudioQualityMedium:
|
||||
complexity = GetConfig().AudioQualityMediumOpusComplexity
|
||||
vbr = GetConfig().AudioQualityMediumOpusVBR
|
||||
signalType = GetConfig().AudioQualityMediumOpusSignalType
|
||||
bandwidth = GetConfig().AudioQualityMediumOpusBandwidth
|
||||
dtx = GetConfig().AudioQualityMediumOpusDTX
|
||||
case AudioQualityHigh:
|
||||
complexity = GetConfig().AudioQualityHighOpusComplexity
|
||||
vbr = GetConfig().AudioQualityHighOpusVBR
|
||||
signalType = GetConfig().AudioQualityHighOpusSignalType
|
||||
bandwidth = GetConfig().AudioQualityHighOpusBandwidth
|
||||
dtx = GetConfig().AudioQualityHighOpusDTX
|
||||
case AudioQualityUltra:
|
||||
complexity = GetConfig().AudioQualityUltraOpusComplexity
|
||||
vbr = GetConfig().AudioQualityUltraOpusVBR
|
||||
signalType = GetConfig().AudioQualityUltraOpusSignalType
|
||||
bandwidth = GetConfig().AudioQualityUltraOpusBandwidth
|
||||
dtx = GetConfig().AudioQualityUltraOpusDTX
|
||||
default:
|
||||
// Use medium quality as fallback
|
||||
complexity = GetConfig().AudioQualityMediumOpusComplexity
|
||||
vbr = GetConfig().AudioQualityMediumOpusVBR
|
||||
signalType = GetConfig().AudioQualityMediumOpusSignalType
|
||||
bandwidth = GetConfig().AudioQualityMediumOpusBandwidth
|
||||
dtx = GetConfig().AudioQualityMediumOpusDTX
|
||||
}
|
||||
|
||||
// Dynamically update CGO OPUS encoder parameters
|
||||
// Use current VBR constraint setting from config
|
||||
vbrConstraint := GetConfig().CGOOpusVBRConstraint
|
||||
if err := updateOpusEncoderParams(config.Bitrate*1000, complexity, vbr, vbrConstraint, signalType, bandwidth, dtx); err != nil {
|
||||
logging.GetDefaultLogger().Error().Err(err).Msg("Failed to update OPUS encoder parameters")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -236,6 +219,14 @@ func GetAudioConfig() AudioConfig {
|
|||
|
||||
// SetMicrophoneQuality updates the current microphone quality configuration
|
||||
func SetMicrophoneQuality(quality AudioQuality) {
|
||||
// Validate audio quality parameter
|
||||
if err := ValidateAudioQuality(quality); err != nil {
|
||||
// Log validation error but don't fail - maintain backward compatibility
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "audio").Logger()
|
||||
logger.Warn().Err(err).Int("quality", int(quality)).Msg("invalid microphone quality, using current config")
|
||||
return
|
||||
}
|
||||
|
||||
presets := GetMicrophoneQualityPresets()
|
||||
if config, exists := presets[quality]; exists {
|
||||
currentMicrophoneConfig = config
|
||||
|
|
|
@ -0,0 +1,317 @@
|
|||
//go:build cgo
|
||||
// +build cgo
|
||||
|
||||
package audio
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestAudioQualityEdgeCases tests edge cases for audio quality functions
|
||||
// These tests ensure the recent validation removal doesn't introduce regressions
|
||||
func TestAudioQualityEdgeCases(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
testFunc func(t *testing.T)
|
||||
}{
|
||||
{"AudioQualityBoundaryValues", testAudioQualityBoundaryValues},
|
||||
{"MicrophoneQualityBoundaryValues", testMicrophoneQualityBoundaryValues},
|
||||
{"AudioQualityPresetsConsistency", testAudioQualityPresetsConsistency},
|
||||
{"MicrophoneQualityPresetsConsistency", testMicrophoneQualityPresetsConsistency},
|
||||
{"QualitySettingsThreadSafety", testQualitySettingsThreadSafety},
|
||||
{"QualityPresetsImmutability", testQualityPresetsImmutability},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.testFunc(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// testAudioQualityBoundaryValues tests boundary values for audio quality
|
||||
func testAudioQualityBoundaryValues(t *testing.T) {
|
||||
// Test minimum valid quality (0)
|
||||
originalConfig := GetAudioConfig()
|
||||
SetAudioQuality(AudioQualityLow)
|
||||
assert.Equal(t, AudioQualityLow, GetAudioConfig().Quality, "Should accept minimum quality value")
|
||||
|
||||
// Test maximum valid quality (3)
|
||||
SetAudioQuality(AudioQualityUltra)
|
||||
assert.Equal(t, AudioQualityUltra, GetAudioConfig().Quality, "Should accept maximum quality value")
|
||||
|
||||
// Test that quality settings work correctly
|
||||
SetAudioQuality(AudioQualityMedium)
|
||||
currentConfig := GetAudioConfig()
|
||||
assert.Equal(t, AudioQualityMedium, currentConfig.Quality, "Should set medium quality")
|
||||
t.Logf("Medium quality config: %+v", currentConfig)
|
||||
|
||||
SetAudioQuality(AudioQualityHigh)
|
||||
currentConfig = GetAudioConfig()
|
||||
assert.Equal(t, AudioQualityHigh, currentConfig.Quality, "Should set high quality")
|
||||
t.Logf("High quality config: %+v", currentConfig)
|
||||
|
||||
// Restore original quality
|
||||
SetAudioQuality(originalConfig.Quality)
|
||||
}
|
||||
|
||||
// testMicrophoneQualityBoundaryValues tests boundary values for microphone quality
|
||||
func testMicrophoneQualityBoundaryValues(t *testing.T) {
|
||||
// Test minimum valid quality
|
||||
originalConfig := GetMicrophoneConfig()
|
||||
SetMicrophoneQuality(AudioQualityLow)
|
||||
assert.Equal(t, AudioQualityLow, GetMicrophoneConfig().Quality, "Should accept minimum microphone quality value")
|
||||
|
||||
// Test maximum valid quality
|
||||
SetMicrophoneQuality(AudioQualityUltra)
|
||||
assert.Equal(t, AudioQualityUltra, GetMicrophoneConfig().Quality, "Should accept maximum microphone quality value")
|
||||
|
||||
// Test that quality settings work correctly
|
||||
SetMicrophoneQuality(AudioQualityMedium)
|
||||
currentConfig := GetMicrophoneConfig()
|
||||
assert.Equal(t, AudioQualityMedium, currentConfig.Quality, "Should set medium microphone quality")
|
||||
t.Logf("Medium microphone quality config: %+v", currentConfig)
|
||||
|
||||
SetMicrophoneQuality(AudioQualityHigh)
|
||||
currentConfig = GetMicrophoneConfig()
|
||||
assert.Equal(t, AudioQualityHigh, currentConfig.Quality, "Should set high microphone quality")
|
||||
t.Logf("High microphone quality config: %+v", currentConfig)
|
||||
|
||||
// Restore original quality
|
||||
SetMicrophoneQuality(originalConfig.Quality)
|
||||
}
|
||||
|
||||
// testAudioQualityPresetsConsistency tests consistency of audio quality presets
|
||||
func testAudioQualityPresetsConsistency(t *testing.T) {
|
||||
presets := GetAudioQualityPresets()
|
||||
require.NotNil(t, presets, "Audio quality presets should not be nil")
|
||||
require.NotEmpty(t, presets, "Audio quality presets should not be empty")
|
||||
|
||||
// Verify presets have expected structure
|
||||
for i, preset := range presets {
|
||||
t.Logf("Audio preset %d: %+v", i, preset)
|
||||
|
||||
// Each preset should have reasonable values
|
||||
assert.GreaterOrEqual(t, preset.Bitrate, 0, "Bitrate should be non-negative")
|
||||
assert.Greater(t, preset.SampleRate, 0, "Sample rate should be positive")
|
||||
assert.Greater(t, preset.Channels, 0, "Channels should be positive")
|
||||
}
|
||||
|
||||
// Test that presets are accessible by valid quality levels
|
||||
qualityLevels := []AudioQuality{AudioQualityLow, AudioQualityMedium, AudioQualityHigh, AudioQualityUltra}
|
||||
for _, quality := range qualityLevels {
|
||||
preset, exists := presets[quality]
|
||||
assert.True(t, exists, "Preset should exist for quality %v", quality)
|
||||
assert.Greater(t, preset.Bitrate, 0, "Preset bitrate should be positive for quality %v", quality)
|
||||
}
|
||||
}
|
||||
|
||||
// testMicrophoneQualityPresetsConsistency tests consistency of microphone quality presets
|
||||
func testMicrophoneQualityPresetsConsistency(t *testing.T) {
|
||||
presets := GetMicrophoneQualityPresets()
|
||||
require.NotNil(t, presets, "Microphone quality presets should not be nil")
|
||||
require.NotEmpty(t, presets, "Microphone quality presets should not be empty")
|
||||
|
||||
// Verify presets have expected structure
|
||||
for i, preset := range presets {
|
||||
t.Logf("Microphone preset %d: %+v", i, preset)
|
||||
|
||||
// Each preset should have reasonable values
|
||||
assert.GreaterOrEqual(t, preset.Bitrate, 0, "Bitrate should be non-negative")
|
||||
assert.Greater(t, preset.SampleRate, 0, "Sample rate should be positive")
|
||||
assert.Greater(t, preset.Channels, 0, "Channels should be positive")
|
||||
}
|
||||
|
||||
// Test that presets are accessible by valid quality levels
|
||||
qualityLevels := []AudioQuality{AudioQualityLow, AudioQualityMedium, AudioQualityHigh, AudioQualityUltra}
|
||||
for _, quality := range qualityLevels {
|
||||
preset, exists := presets[quality]
|
||||
assert.True(t, exists, "Microphone preset should exist for quality %v", quality)
|
||||
assert.Greater(t, preset.Bitrate, 0, "Microphone preset bitrate should be positive for quality %v", quality)
|
||||
}
|
||||
}
|
||||
|
||||
// testQualitySettingsThreadSafety tests thread safety of quality settings
|
||||
func testQualitySettingsThreadSafety(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping thread safety test in short mode")
|
||||
}
|
||||
|
||||
originalAudioConfig := GetAudioConfig()
|
||||
originalMicConfig := GetMicrophoneConfig()
|
||||
|
||||
// Test concurrent access to quality settings
|
||||
const numGoroutines = 50
|
||||
const numOperations = 100
|
||||
|
||||
done := make(chan bool, numGoroutines*2)
|
||||
|
||||
// Audio quality goroutines
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func(id int) {
|
||||
for j := 0; j < numOperations; j++ {
|
||||
// Cycle through valid quality values
|
||||
qualityIndex := j % 4
|
||||
var quality AudioQuality
|
||||
switch qualityIndex {
|
||||
case 0:
|
||||
quality = AudioQualityLow
|
||||
case 1:
|
||||
quality = AudioQualityMedium
|
||||
case 2:
|
||||
quality = AudioQualityHigh
|
||||
case 3:
|
||||
quality = AudioQualityUltra
|
||||
}
|
||||
SetAudioQuality(quality)
|
||||
_ = GetAudioConfig()
|
||||
}
|
||||
done <- true
|
||||
}(i)
|
||||
}
|
||||
|
||||
// Microphone quality goroutines
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func(id int) {
|
||||
for j := 0; j < numOperations; j++ {
|
||||
// Cycle through valid quality values
|
||||
qualityIndex := j % 4
|
||||
var quality AudioQuality
|
||||
switch qualityIndex {
|
||||
case 0:
|
||||
quality = AudioQualityLow
|
||||
case 1:
|
||||
quality = AudioQualityMedium
|
||||
case 2:
|
||||
quality = AudioQualityHigh
|
||||
case 3:
|
||||
quality = AudioQualityUltra
|
||||
}
|
||||
SetMicrophoneQuality(quality)
|
||||
_ = GetMicrophoneConfig()
|
||||
}
|
||||
done <- true
|
||||
}(i)
|
||||
}
|
||||
|
||||
// Wait for all goroutines to complete
|
||||
for i := 0; i < numGoroutines*2; i++ {
|
||||
<-done
|
||||
}
|
||||
|
||||
// Verify system is still functional
|
||||
SetAudioQuality(AudioQualityHigh)
|
||||
assert.Equal(t, AudioQualityHigh, GetAudioConfig().Quality, "Audio quality should be settable after concurrent access")
|
||||
|
||||
SetMicrophoneQuality(AudioQualityMedium)
|
||||
assert.Equal(t, AudioQualityMedium, GetMicrophoneConfig().Quality, "Microphone quality should be settable after concurrent access")
|
||||
|
||||
// Restore original values
|
||||
SetAudioQuality(originalAudioConfig.Quality)
|
||||
SetMicrophoneQuality(originalMicConfig.Quality)
|
||||
}
|
||||
|
||||
// testQualityPresetsImmutability tests that quality presets are not accidentally modified
|
||||
func testQualityPresetsImmutability(t *testing.T) {
|
||||
// Get presets multiple times and verify they're consistent
|
||||
presets1 := GetAudioQualityPresets()
|
||||
presets2 := GetAudioQualityPresets()
|
||||
|
||||
require.Equal(t, len(presets1), len(presets2), "Preset count should be consistent")
|
||||
|
||||
// Verify each preset is identical
|
||||
for quality := range presets1 {
|
||||
assert.Equal(t, presets1[quality].Bitrate, presets2[quality].Bitrate,
|
||||
"Preset %v bitrate should be consistent", quality)
|
||||
assert.Equal(t, presets1[quality].SampleRate, presets2[quality].SampleRate,
|
||||
"Preset %v sample rate should be consistent", quality)
|
||||
assert.Equal(t, presets1[quality].Channels, presets2[quality].Channels,
|
||||
"Preset %v channels should be consistent", quality)
|
||||
}
|
||||
|
||||
// Test microphone presets as well
|
||||
micPresets1 := GetMicrophoneQualityPresets()
|
||||
micPresets2 := GetMicrophoneQualityPresets()
|
||||
|
||||
require.Equal(t, len(micPresets1), len(micPresets2), "Microphone preset count should be consistent")
|
||||
|
||||
for quality := range micPresets1 {
|
||||
assert.Equal(t, micPresets1[quality].Bitrate, micPresets2[quality].Bitrate,
|
||||
"Microphone preset %v bitrate should be consistent", quality)
|
||||
assert.Equal(t, micPresets1[quality].SampleRate, micPresets2[quality].SampleRate,
|
||||
"Microphone preset %v sample rate should be consistent", quality)
|
||||
assert.Equal(t, micPresets1[quality].Channels, micPresets2[quality].Channels,
|
||||
"Microphone preset %v channels should be consistent", quality)
|
||||
}
|
||||
}
|
||||
|
||||
// TestQualityValidationRemovalRegression tests that validation removal doesn't cause regressions
|
||||
func TestQualityValidationRemovalRegression(t *testing.T) {
|
||||
// This test ensures that removing validation from GET endpoints doesn't break functionality
|
||||
|
||||
// Test that presets are still accessible
|
||||
audioPresets := GetAudioQualityPresets()
|
||||
assert.NotNil(t, audioPresets, "Audio presets should be accessible after validation removal")
|
||||
assert.NotEmpty(t, audioPresets, "Audio presets should not be empty")
|
||||
|
||||
micPresets := GetMicrophoneQualityPresets()
|
||||
assert.NotNil(t, micPresets, "Microphone presets should be accessible after validation removal")
|
||||
assert.NotEmpty(t, micPresets, "Microphone presets should not be empty")
|
||||
|
||||
// Test that quality getters still work
|
||||
audioConfig := GetAudioConfig()
|
||||
assert.GreaterOrEqual(t, int(audioConfig.Quality), 0, "Audio quality should be non-negative")
|
||||
|
||||
micConfig := GetMicrophoneConfig()
|
||||
assert.GreaterOrEqual(t, int(micConfig.Quality), 0, "Microphone quality should be non-negative")
|
||||
|
||||
// Test that setters still work (for valid values)
|
||||
originalAudio := GetAudioConfig()
|
||||
originalMic := GetMicrophoneConfig()
|
||||
|
||||
SetAudioQuality(AudioQualityMedium)
|
||||
assert.Equal(t, AudioQualityMedium, GetAudioConfig().Quality, "Audio quality setter should work")
|
||||
|
||||
SetMicrophoneQuality(AudioQualityHigh)
|
||||
assert.Equal(t, AudioQualityHigh, GetMicrophoneConfig().Quality, "Microphone quality setter should work")
|
||||
|
||||
// Restore original values
|
||||
SetAudioQuality(originalAudio.Quality)
|
||||
SetMicrophoneQuality(originalMic.Quality)
|
||||
}
|
||||
|
||||
// TestPerformanceAfterValidationRemoval tests that performance improved after validation removal
|
||||
func TestPerformanceAfterValidationRemoval(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping performance test in short mode")
|
||||
}
|
||||
|
||||
// Benchmark preset access (should be faster without validation)
|
||||
const iterations = 10000
|
||||
|
||||
// Time audio preset access
|
||||
start := time.Now()
|
||||
for i := 0; i < iterations; i++ {
|
||||
_ = GetAudioQualityPresets()
|
||||
}
|
||||
audioDuration := time.Since(start)
|
||||
|
||||
// Time microphone preset access
|
||||
start = time.Now()
|
||||
for i := 0; i < iterations; i++ {
|
||||
_ = GetMicrophoneQualityPresets()
|
||||
}
|
||||
micDuration := time.Since(start)
|
||||
|
||||
t.Logf("Audio presets access time for %d iterations: %v", iterations, audioDuration)
|
||||
t.Logf("Microphone presets access time for %d iterations: %v", iterations, micDuration)
|
||||
|
||||
// Verify reasonable performance (should complete quickly without validation overhead)
|
||||
maxExpectedDuration := time.Second // Very generous limit
|
||||
assert.Less(t, audioDuration, maxExpectedDuration, "Audio preset access should be fast")
|
||||
assert.Less(t, micDuration, maxExpectedDuration, "Microphone preset access should be fast")
|
||||
}
|
|
@ -0,0 +1,120 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// BaseAudioMetrics provides common metrics fields for both input and output
|
||||
// Atomic fields MUST be first for ARM32 alignment (int64 fields need 8-byte alignment)
|
||||
type BaseAudioMetrics struct {
|
||||
// Atomic int64 fields first for proper ARM32 alignment
|
||||
FramesProcessed int64 `json:"frames_processed"`
|
||||
FramesDropped int64 `json:"frames_dropped"`
|
||||
BytesProcessed int64 `json:"bytes_processed"`
|
||||
ConnectionDrops int64 `json:"connection_drops"`
|
||||
|
||||
// Non-atomic fields after atomic fields
|
||||
LastFrameTime time.Time `json:"last_frame_time"`
|
||||
AverageLatency time.Duration `json:"average_latency"`
|
||||
}
|
||||
|
||||
// BaseAudioManager provides common functionality for audio managers
|
||||
type BaseAudioManager struct {
|
||||
metrics BaseAudioMetrics
|
||||
logger zerolog.Logger
|
||||
running int32
|
||||
}
|
||||
|
||||
// NewBaseAudioManager creates a new base audio manager
|
||||
func NewBaseAudioManager(logger zerolog.Logger) *BaseAudioManager {
|
||||
return &BaseAudioManager{
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// IsRunning returns whether the manager is running
|
||||
func (bam *BaseAudioManager) IsRunning() bool {
|
||||
return atomic.LoadInt32(&bam.running) == 1
|
||||
}
|
||||
|
||||
// setRunning atomically sets the running state
|
||||
func (bam *BaseAudioManager) setRunning(running bool) bool {
|
||||
if running {
|
||||
return atomic.CompareAndSwapInt32(&bam.running, 0, 1)
|
||||
}
|
||||
return atomic.CompareAndSwapInt32(&bam.running, 1, 0)
|
||||
}
|
||||
|
||||
// resetMetrics resets all metrics to zero
|
||||
func (bam *BaseAudioManager) resetMetrics() {
|
||||
atomic.StoreInt64(&bam.metrics.FramesProcessed, 0)
|
||||
atomic.StoreInt64(&bam.metrics.FramesDropped, 0)
|
||||
atomic.StoreInt64(&bam.metrics.BytesProcessed, 0)
|
||||
atomic.StoreInt64(&bam.metrics.ConnectionDrops, 0)
|
||||
bam.metrics.LastFrameTime = time.Time{}
|
||||
bam.metrics.AverageLatency = 0
|
||||
}
|
||||
|
||||
// getBaseMetrics returns a copy of the base metrics
|
||||
func (bam *BaseAudioManager) getBaseMetrics() BaseAudioMetrics {
|
||||
return BaseAudioMetrics{
|
||||
FramesProcessed: atomic.LoadInt64(&bam.metrics.FramesProcessed),
|
||||
FramesDropped: atomic.LoadInt64(&bam.metrics.FramesDropped),
|
||||
BytesProcessed: atomic.LoadInt64(&bam.metrics.BytesProcessed),
|
||||
ConnectionDrops: atomic.LoadInt64(&bam.metrics.ConnectionDrops),
|
||||
LastFrameTime: bam.metrics.LastFrameTime,
|
||||
AverageLatency: bam.metrics.AverageLatency,
|
||||
}
|
||||
}
|
||||
|
||||
// recordFrameProcessed records a processed frame
|
||||
func (bam *BaseAudioManager) recordFrameProcessed(bytes int) {
|
||||
atomic.AddInt64(&bam.metrics.FramesProcessed, 1)
|
||||
atomic.AddInt64(&bam.metrics.BytesProcessed, int64(bytes))
|
||||
bam.metrics.LastFrameTime = time.Now()
|
||||
}
|
||||
|
||||
// recordFrameDropped records a dropped frame
|
||||
func (bam *BaseAudioManager) recordFrameDropped() {
|
||||
atomic.AddInt64(&bam.metrics.FramesDropped, 1)
|
||||
}
|
||||
|
||||
// updateLatency updates the average latency
|
||||
func (bam *BaseAudioManager) updateLatency(latency time.Duration) {
|
||||
// Simple moving average - could be enhanced with more sophisticated algorithms
|
||||
currentAvg := bam.metrics.AverageLatency
|
||||
if currentAvg == 0 {
|
||||
bam.metrics.AverageLatency = latency
|
||||
} else {
|
||||
// Weighted average: 90% old + 10% new
|
||||
bam.metrics.AverageLatency = time.Duration(float64(currentAvg)*0.9 + float64(latency)*0.1)
|
||||
}
|
||||
}
|
||||
|
||||
// logComponentStart logs component start with consistent format
|
||||
func (bam *BaseAudioManager) logComponentStart(component string) {
|
||||
bam.logger.Debug().Str("component", component).Msg("starting component")
|
||||
}
|
||||
|
||||
// logComponentStarted logs component started with consistent format
|
||||
func (bam *BaseAudioManager) logComponentStarted(component string) {
|
||||
bam.logger.Debug().Str("component", component).Msg("component started successfully")
|
||||
}
|
||||
|
||||
// logComponentStop logs component stop with consistent format
|
||||
func (bam *BaseAudioManager) logComponentStop(component string) {
|
||||
bam.logger.Debug().Str("component", component).Msg("stopping component")
|
||||
}
|
||||
|
||||
// logComponentStopped logs component stopped with consistent format
|
||||
func (bam *BaseAudioManager) logComponentStopped(component string) {
|
||||
bam.logger.Debug().Str("component", component).Msg("component stopped")
|
||||
}
|
||||
|
||||
// logComponentError logs component error with consistent format
|
||||
func (bam *BaseAudioManager) logComponentError(component string, err error, msg string) {
|
||||
bam.logger.Error().Err(err).Str("component", component).Msg(msg)
|
||||
}
|
|
@ -0,0 +1,133 @@
|
|||
//go:build cgo
|
||||
// +build cgo
|
||||
|
||||
package audio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os/exec"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/jetkvm/kvm/internal/logging"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// BaseSupervisor provides common functionality for audio supervisors
|
||||
type BaseSupervisor struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
logger *zerolog.Logger
|
||||
mutex sync.RWMutex
|
||||
running int32
|
||||
|
||||
// Process management
|
||||
cmd *exec.Cmd
|
||||
processPID int
|
||||
|
||||
// Process monitoring
|
||||
processMonitor *ProcessMonitor
|
||||
|
||||
// Exit tracking
|
||||
lastExitCode int
|
||||
lastExitTime time.Time
|
||||
}
|
||||
|
||||
// NewBaseSupervisor creates a new base supervisor
|
||||
func NewBaseSupervisor(componentName string) *BaseSupervisor {
|
||||
logger := logging.GetDefaultLogger().With().Str("component", componentName).Logger()
|
||||
return &BaseSupervisor{
|
||||
logger: &logger,
|
||||
processMonitor: GetProcessMonitor(),
|
||||
}
|
||||
}
|
||||
|
||||
// IsRunning returns whether the supervisor is currently running
|
||||
func (bs *BaseSupervisor) IsRunning() bool {
|
||||
return atomic.LoadInt32(&bs.running) == 1
|
||||
}
|
||||
|
||||
// setRunning atomically sets the running state
|
||||
func (bs *BaseSupervisor) setRunning(running bool) {
|
||||
if running {
|
||||
atomic.StoreInt32(&bs.running, 1)
|
||||
} else {
|
||||
atomic.StoreInt32(&bs.running, 0)
|
||||
}
|
||||
}
|
||||
|
||||
// GetProcessPID returns the current process PID
|
||||
func (bs *BaseSupervisor) GetProcessPID() int {
|
||||
bs.mutex.RLock()
|
||||
defer bs.mutex.RUnlock()
|
||||
return bs.processPID
|
||||
}
|
||||
|
||||
// GetLastExitInfo returns the last exit code and time
|
||||
func (bs *BaseSupervisor) GetLastExitInfo() (exitCode int, exitTime time.Time) {
|
||||
bs.mutex.RLock()
|
||||
defer bs.mutex.RUnlock()
|
||||
return bs.lastExitCode, bs.lastExitTime
|
||||
}
|
||||
|
||||
// GetProcessMetrics returns process metrics if available
|
||||
func (bs *BaseSupervisor) GetProcessMetrics() *ProcessMetrics {
|
||||
bs.mutex.RLock()
|
||||
defer bs.mutex.RUnlock()
|
||||
|
||||
if bs.cmd == nil || bs.cmd.Process == nil {
|
||||
return &ProcessMetrics{
|
||||
PID: 0,
|
||||
CPUPercent: 0.0,
|
||||
MemoryRSS: 0,
|
||||
MemoryVMS: 0,
|
||||
MemoryPercent: 0.0,
|
||||
Timestamp: time.Now(),
|
||||
ProcessName: "audio-server",
|
||||
}
|
||||
}
|
||||
|
||||
pid := bs.cmd.Process.Pid
|
||||
if bs.processMonitor != nil {
|
||||
metrics := bs.processMonitor.GetCurrentMetrics()
|
||||
for _, metric := range metrics {
|
||||
if metric.PID == pid {
|
||||
return &metric
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Return default metrics if process not found in monitor
|
||||
return &ProcessMetrics{
|
||||
PID: pid,
|
||||
CPUPercent: 0.0,
|
||||
MemoryRSS: 0,
|
||||
MemoryVMS: 0,
|
||||
MemoryPercent: 0.0,
|
||||
Timestamp: time.Now(),
|
||||
ProcessName: "audio-server",
|
||||
}
|
||||
}
|
||||
|
||||
// logSupervisorStart logs supervisor start event
|
||||
func (bs *BaseSupervisor) logSupervisorStart() {
|
||||
bs.logger.Info().Msg("Supervisor starting")
|
||||
}
|
||||
|
||||
// logSupervisorStop logs supervisor stop event
|
||||
func (bs *BaseSupervisor) logSupervisorStop() {
|
||||
bs.logger.Info().Msg("Supervisor stopping")
|
||||
}
|
||||
|
||||
// createContext creates a new context for the supervisor
|
||||
func (bs *BaseSupervisor) createContext() {
|
||||
bs.ctx, bs.cancel = context.WithCancel(context.Background())
|
||||
}
|
||||
|
||||
// cancelContext cancels the supervisor context
|
||||
func (bs *BaseSupervisor) cancelContext() {
|
||||
if bs.cancel != nil {
|
||||
bs.cancel()
|
||||
}
|
||||
}
|
|
@ -60,6 +60,18 @@ type batchReadResult struct {
|
|||
|
||||
// NewBatchAudioProcessor creates a new batch audio processor
|
||||
func NewBatchAudioProcessor(batchSize int, batchDuration time.Duration) *BatchAudioProcessor {
|
||||
// Validate input parameters
|
||||
if err := ValidateBufferSize(batchSize); err != nil {
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "batch-audio").Logger()
|
||||
logger.Warn().Err(err).Int("batchSize", batchSize).Msg("invalid batch size, using default")
|
||||
batchSize = GetConfig().BatchProcessorFramesPerBatch
|
||||
}
|
||||
if batchDuration <= 0 {
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "batch-audio").Logger()
|
||||
logger.Warn().Dur("batchDuration", batchDuration).Msg("invalid batch duration, using default")
|
||||
batchDuration = GetConfig().BatchProcessingDelay
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "batch-audio").Logger()
|
||||
|
||||
|
@ -117,6 +129,12 @@ func (bap *BatchAudioProcessor) Stop() {
|
|||
|
||||
// BatchReadEncode performs batched audio read and encode operations
|
||||
func (bap *BatchAudioProcessor) BatchReadEncode(buffer []byte) (int, error) {
|
||||
// Validate buffer before processing
|
||||
if err := ValidateBufferSize(len(buffer)); err != nil {
|
||||
bap.logger.Debug().Err(err).Msg("invalid buffer for batch processing")
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if atomic.LoadInt32(&bap.running) == 0 {
|
||||
// Fallback to single operation if batch processor is not running
|
||||
atomic.AddInt64(&bap.stats.SingleReads, 1)
|
||||
|
@ -202,12 +220,12 @@ func (bap *BatchAudioProcessor) processBatchRead(batch []batchReadRequest) {
|
|||
|
||||
// Set high priority for batch audio processing
|
||||
if err := SetAudioThreadPriority(); err != nil {
|
||||
bap.logger.Warn().Err(err).Msg("Failed to set batch audio processing priority")
|
||||
bap.logger.Warn().Err(err).Msg("failed to set batch audio processing priority")
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := ResetThreadPriority(); err != nil {
|
||||
bap.logger.Warn().Err(err).Msg("Failed to reset thread priority")
|
||||
bap.logger.Warn().Err(err).Msg("failed to reset thread priority")
|
||||
}
|
||||
runtime.UnlockOSThread()
|
||||
atomic.StoreInt32(&bap.threadPinned, 0)
|
||||
|
|
|
@ -1,11 +1,43 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/jetkvm/kvm/internal/logging"
|
||||
)
|
||||
|
||||
// Lock-free buffer cache for per-goroutine optimization
|
||||
type lockFreeBufferCache struct {
|
||||
buffers [4]*[]byte // Small fixed-size array for lock-free access
|
||||
}
|
||||
|
||||
// Per-goroutine buffer cache using goroutine-local storage
|
||||
var goroutineBufferCache = make(map[int64]*lockFreeBufferCache)
|
||||
var goroutineCacheMutex sync.RWMutex
|
||||
|
||||
// getGoroutineID extracts goroutine ID from runtime stack for cache key
|
||||
func getGoroutineID() int64 {
|
||||
b := make([]byte, 64)
|
||||
b = b[:runtime.Stack(b, false)]
|
||||
// Parse "goroutine 123 [running]:" format
|
||||
for i := 10; i < len(b); i++ {
|
||||
if b[i] == ' ' {
|
||||
id := int64(0)
|
||||
for j := 10; j < i; j++ {
|
||||
if b[j] >= '0' && b[j] <= '9' {
|
||||
id = id*10 + int64(b[j]-'0')
|
||||
}
|
||||
}
|
||||
return id
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type AudioBufferPool struct {
|
||||
// Atomic fields MUST be first for ARM32 alignment (int64 fields need 8-byte alignment)
|
||||
currentSize int64 // Current pool size (atomic)
|
||||
|
@ -23,23 +55,42 @@ type AudioBufferPool struct {
|
|||
}
|
||||
|
||||
func NewAudioBufferPool(bufferSize int) *AudioBufferPool {
|
||||
// Pre-allocate 20% of max pool size for immediate availability
|
||||
preallocSize := GetConfig().PreallocPercentage
|
||||
// Validate buffer size parameter
|
||||
if err := ValidateBufferSize(bufferSize); err != nil {
|
||||
// Log validation error and use default value
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "AudioBufferPool").Logger()
|
||||
logger.Warn().Err(err).Int("bufferSize", bufferSize).Msg("invalid buffer size, using default")
|
||||
bufferSize = GetConfig().AudioFramePoolSize
|
||||
}
|
||||
|
||||
// Optimize preallocation based on buffer size to reduce memory footprint
|
||||
var preallocSize int
|
||||
if bufferSize <= GetConfig().AudioFramePoolSize {
|
||||
// For frame buffers, use configured percentage
|
||||
preallocSize = GetConfig().PreallocPercentage
|
||||
} else {
|
||||
// For larger buffers, reduce preallocation to save memory
|
||||
preallocSize = GetConfig().PreallocPercentage / 2
|
||||
}
|
||||
|
||||
// Pre-allocate with exact capacity to avoid slice growth
|
||||
preallocated := make([]*[]byte, 0, preallocSize)
|
||||
|
||||
// Pre-allocate buffers to reduce initial allocation overhead
|
||||
// Pre-allocate buffers with optimized capacity
|
||||
for i := 0; i < preallocSize; i++ {
|
||||
// Use exact buffer size to prevent over-allocation
|
||||
buf := make([]byte, 0, bufferSize)
|
||||
preallocated = append(preallocated, &buf)
|
||||
}
|
||||
|
||||
return &AudioBufferPool{
|
||||
bufferSize: bufferSize,
|
||||
maxPoolSize: GetConfig().MaxPoolSize, // Limit pool size to prevent excessive memory usage
|
||||
maxPoolSize: GetConfig().MaxPoolSize,
|
||||
preallocated: preallocated,
|
||||
preallocSize: preallocSize,
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
// Allocate exact size to minimize memory waste
|
||||
buf := make([]byte, 0, bufferSize)
|
||||
return &buf
|
||||
},
|
||||
|
@ -49,41 +100,72 @@ func NewAudioBufferPool(bufferSize int) *AudioBufferPool {
|
|||
|
||||
func (p *AudioBufferPool) Get() []byte {
|
||||
start := time.Now()
|
||||
wasHit := false
|
||||
defer func() {
|
||||
latency := time.Since(start)
|
||||
// Record metrics for frame pool (assuming this is the main usage)
|
||||
if p.bufferSize >= GetConfig().AudioFramePoolSize {
|
||||
GetGranularMetricsCollector().RecordFramePoolGet(latency, atomic.LoadInt64(&p.hitCount) > 0)
|
||||
GetGranularMetricsCollector().RecordFramePoolGet(latency, wasHit)
|
||||
} else {
|
||||
GetGranularMetricsCollector().RecordControlPoolGet(latency, atomic.LoadInt64(&p.hitCount) > 0)
|
||||
GetGranularMetricsCollector().RecordControlPoolGet(latency, wasHit)
|
||||
}
|
||||
}()
|
||||
|
||||
// First try pre-allocated buffers for fastest access
|
||||
// Fast path: Try lock-free per-goroutine cache first
|
||||
gid := getGoroutineID()
|
||||
goroutineCacheMutex.RLock()
|
||||
cache, exists := goroutineBufferCache[gid]
|
||||
goroutineCacheMutex.RUnlock()
|
||||
|
||||
if exists && cache != nil {
|
||||
// Try to get buffer from lock-free cache
|
||||
for i := 0; i < len(cache.buffers); i++ {
|
||||
bufPtr := (*unsafe.Pointer)(unsafe.Pointer(&cache.buffers[i]))
|
||||
buf := (*[]byte)(atomic.LoadPointer(bufPtr))
|
||||
if buf != nil && atomic.CompareAndSwapPointer(bufPtr, unsafe.Pointer(buf), nil) {
|
||||
atomic.AddInt64(&p.hitCount, 1)
|
||||
wasHit = true
|
||||
*buf = (*buf)[:0]
|
||||
return *buf
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: Try pre-allocated pool with mutex
|
||||
p.mutex.Lock()
|
||||
if len(p.preallocated) > 0 {
|
||||
buf := p.preallocated[len(p.preallocated)-1]
|
||||
p.preallocated = p.preallocated[:len(p.preallocated)-1]
|
||||
lastIdx := len(p.preallocated) - 1
|
||||
buf := p.preallocated[lastIdx]
|
||||
p.preallocated = p.preallocated[:lastIdx]
|
||||
p.mutex.Unlock()
|
||||
|
||||
// Update hit counter
|
||||
atomic.AddInt64(&p.hitCount, 1)
|
||||
return (*buf)[:0] // Reset length but keep capacity
|
||||
wasHit = true
|
||||
// Ensure buffer is properly reset
|
||||
*buf = (*buf)[:0]
|
||||
return *buf
|
||||
}
|
||||
p.mutex.Unlock()
|
||||
|
||||
// Try sync.Pool next
|
||||
if buf := p.pool.Get(); buf != nil {
|
||||
bufPtr := buf.(*[]byte)
|
||||
// Update pool size counter when retrieving from pool
|
||||
p.mutex.Lock()
|
||||
if p.currentSize > 0 {
|
||||
p.currentSize--
|
||||
}
|
||||
p.mutex.Unlock()
|
||||
if poolBuf := p.pool.Get(); poolBuf != nil {
|
||||
buf := poolBuf.(*[]byte)
|
||||
// Update hit counter
|
||||
atomic.AddInt64(&p.hitCount, 1)
|
||||
return (*bufPtr)[:0] // Reset length but keep capacity
|
||||
// Ensure buffer is properly reset and check capacity
|
||||
if cap(*buf) >= p.bufferSize {
|
||||
wasHit = true
|
||||
*buf = (*buf)[:0]
|
||||
return *buf
|
||||
} else {
|
||||
// Buffer too small, allocate new one
|
||||
atomic.AddInt64(&p.missCount, 1)
|
||||
return make([]byte, 0, p.bufferSize)
|
||||
}
|
||||
}
|
||||
|
||||
// Last resort: allocate new buffer
|
||||
// Pool miss - allocate new buffer with exact capacity
|
||||
atomic.AddInt64(&p.missCount, 1)
|
||||
return make([]byte, 0, p.bufferSize)
|
||||
}
|
||||
|
@ -100,14 +182,40 @@ func (p *AudioBufferPool) Put(buf []byte) {
|
|||
}
|
||||
}()
|
||||
|
||||
if cap(buf) < p.bufferSize {
|
||||
return // Buffer too small, don't pool it
|
||||
// Validate buffer capacity - reject buffers that are too small or too large
|
||||
bufCap := cap(buf)
|
||||
if bufCap < p.bufferSize || bufCap > p.bufferSize*2 {
|
||||
return // Buffer size mismatch, don't pool it to prevent memory bloat
|
||||
}
|
||||
|
||||
// Reset buffer for reuse
|
||||
// Reset buffer for reuse - clear any sensitive data
|
||||
resetBuf := buf[:0]
|
||||
|
||||
// First try to return to pre-allocated pool for fastest reuse
|
||||
// Fast path: Try to put in lock-free per-goroutine cache
|
||||
gid := getGoroutineID()
|
||||
goroutineCacheMutex.RLock()
|
||||
cache, exists := goroutineBufferCache[gid]
|
||||
goroutineCacheMutex.RUnlock()
|
||||
|
||||
if !exists {
|
||||
// Create new cache for this goroutine
|
||||
cache = &lockFreeBufferCache{}
|
||||
goroutineCacheMutex.Lock()
|
||||
goroutineBufferCache[gid] = cache
|
||||
goroutineCacheMutex.Unlock()
|
||||
}
|
||||
|
||||
if cache != nil {
|
||||
// Try to store in lock-free cache
|
||||
for i := 0; i < len(cache.buffers); i++ {
|
||||
bufPtr := (*unsafe.Pointer)(unsafe.Pointer(&cache.buffers[i]))
|
||||
if atomic.CompareAndSwapPointer(bufPtr, nil, unsafe.Pointer(&buf)) {
|
||||
return // Successfully cached
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: Try to return to pre-allocated pool for fastest reuse
|
||||
p.mutex.Lock()
|
||||
if len(p.preallocated) < p.preallocSize {
|
||||
p.preallocated = append(p.preallocated, &resetBuf)
|
||||
|
@ -117,10 +225,7 @@ func (p *AudioBufferPool) Put(buf []byte) {
|
|||
p.mutex.Unlock()
|
||||
|
||||
// Check sync.Pool size limit to prevent excessive memory usage
|
||||
p.mutex.RLock()
|
||||
currentSize := p.currentSize
|
||||
p.mutex.RUnlock()
|
||||
|
||||
currentSize := atomic.LoadInt64(&p.currentSize)
|
||||
if currentSize >= int64(p.maxPoolSize) {
|
||||
return // Pool is full, let GC handle this buffer
|
||||
}
|
||||
|
@ -128,10 +233,8 @@ func (p *AudioBufferPool) Put(buf []byte) {
|
|||
// Return to sync.Pool
|
||||
p.pool.Put(&resetBuf)
|
||||
|
||||
// Update pool size counter
|
||||
p.mutex.Lock()
|
||||
p.currentSize++
|
||||
p.mutex.Unlock()
|
||||
// Update pool size counter atomically
|
||||
atomic.AddInt64(&p.currentSize, 1)
|
||||
}
|
||||
|
||||
var (
|
||||
|
|
|
@ -36,11 +36,13 @@ static int channels = 2; // Will be set from GetConfig().CGOChann
|
|||
static int frame_size = 960; // Will be set from GetConfig().CGOFrameSize
|
||||
static int max_packet_size = 1500; // Will be set from GetConfig().CGOMaxPacketSize
|
||||
static int sleep_microseconds = 1000; // Will be set from GetConfig().CGOUsleepMicroseconds
|
||||
static int max_attempts_global = 5; // Will be set from GetConfig().CGOMaxAttempts
|
||||
static int max_backoff_us_global = 500000; // Will be set from GetConfig().CGOMaxBackoffMicroseconds
|
||||
|
||||
// Function to update constants from Go configuration
|
||||
void update_audio_constants(int bitrate, int complexity, int vbr, int vbr_constraint,
|
||||
int signal_type, int bandwidth, int dtx, int sr, int ch,
|
||||
int fs, int max_pkt, int sleep_us) {
|
||||
int fs, int max_pkt, int sleep_us, int max_attempts, int max_backoff) {
|
||||
opus_bitrate = bitrate;
|
||||
opus_complexity = complexity;
|
||||
opus_vbr = vbr;
|
||||
|
@ -53,6 +55,8 @@ void update_audio_constants(int bitrate, int complexity, int vbr, int vbr_constr
|
|||
frame_size = fs;
|
||||
max_packet_size = max_pkt;
|
||||
sleep_microseconds = sleep_us;
|
||||
max_attempts_global = max_attempts;
|
||||
max_backoff_us_global = max_backoff;
|
||||
}
|
||||
|
||||
// State tracking to prevent race conditions during rapid start/stop
|
||||
|
@ -61,15 +65,42 @@ static volatile int capture_initialized = 0;
|
|||
static volatile int playback_initializing = 0;
|
||||
static volatile int playback_initialized = 0;
|
||||
|
||||
// Function to dynamically update Opus encoder parameters
|
||||
int update_opus_encoder_params(int bitrate, int complexity, int vbr, int vbr_constraint,
|
||||
int signal_type, int bandwidth, int dtx) {
|
||||
if (!encoder || !capture_initialized) {
|
||||
return -1; // Encoder not initialized
|
||||
}
|
||||
|
||||
// Update the static variables
|
||||
opus_bitrate = bitrate;
|
||||
opus_complexity = complexity;
|
||||
opus_vbr = vbr;
|
||||
opus_vbr_constraint = vbr_constraint;
|
||||
opus_signal_type = signal_type;
|
||||
opus_bandwidth = bandwidth;
|
||||
opus_dtx = dtx;
|
||||
|
||||
// Apply the new settings to the encoder
|
||||
int result = 0;
|
||||
result |= opus_encoder_ctl(encoder, OPUS_SET_BITRATE(opus_bitrate));
|
||||
result |= opus_encoder_ctl(encoder, OPUS_SET_COMPLEXITY(opus_complexity));
|
||||
result |= opus_encoder_ctl(encoder, OPUS_SET_VBR(opus_vbr));
|
||||
result |= opus_encoder_ctl(encoder, OPUS_SET_VBR_CONSTRAINT(opus_vbr_constraint));
|
||||
result |= opus_encoder_ctl(encoder, OPUS_SET_SIGNAL(opus_signal_type));
|
||||
result |= opus_encoder_ctl(encoder, OPUS_SET_BANDWIDTH(opus_bandwidth));
|
||||
result |= opus_encoder_ctl(encoder, OPUS_SET_DTX(opus_dtx));
|
||||
|
||||
return result; // 0 on success, non-zero on error
|
||||
}
|
||||
|
||||
// Enhanced ALSA device opening with exponential backoff retry logic
|
||||
static int safe_alsa_open(snd_pcm_t **handle, const char *device, snd_pcm_stream_t stream) {
|
||||
int max_attempts = 5; // Increased from 3 to 5
|
||||
int attempt = 0;
|
||||
int err;
|
||||
int backoff_us = sleep_microseconds; // Start with base sleep time
|
||||
const int max_backoff_us = 500000; // Max 500ms backoff
|
||||
|
||||
while (attempt < max_attempts) {
|
||||
while (attempt < max_attempts_global) {
|
||||
err = snd_pcm_open(handle, device, stream, SND_PCM_NONBLOCK);
|
||||
if (err >= 0) {
|
||||
// Switch to blocking mode after successful open
|
||||
|
@ -78,24 +109,24 @@ static int safe_alsa_open(snd_pcm_t **handle, const char *device, snd_pcm_stream
|
|||
}
|
||||
|
||||
attempt++;
|
||||
if (attempt >= max_attempts) break;
|
||||
if (attempt >= max_attempts_global) break;
|
||||
|
||||
// Enhanced error handling with specific retry strategies
|
||||
if (err == -EBUSY || err == -EAGAIN) {
|
||||
// Device busy or temporarily unavailable - retry with backoff
|
||||
usleep(backoff_us);
|
||||
backoff_us = (backoff_us * 2 < max_backoff_us) ? backoff_us * 2 : max_backoff_us;
|
||||
backoff_us = (backoff_us * 2 < max_backoff_us_global) ? backoff_us * 2 : max_backoff_us_global;
|
||||
} else if (err == -ENODEV || err == -ENOENT) {
|
||||
// Device not found - longer wait as device might be initializing
|
||||
usleep(backoff_us * 2);
|
||||
backoff_us = (backoff_us * 2 < max_backoff_us) ? backoff_us * 2 : max_backoff_us;
|
||||
backoff_us = (backoff_us * 2 < max_backoff_us_global) ? backoff_us * 2 : max_backoff_us_global;
|
||||
} else if (err == -EPERM || err == -EACCES) {
|
||||
// Permission denied - shorter wait, likely persistent issue
|
||||
usleep(backoff_us / 2);
|
||||
} else {
|
||||
// Other errors - standard backoff
|
||||
usleep(backoff_us);
|
||||
backoff_us = (backoff_us * 2 < max_backoff_us) ? backoff_us * 2 : max_backoff_us;
|
||||
backoff_us = (backoff_us * 2 < max_backoff_us_global) ? backoff_us * 2 : max_backoff_us_global;
|
||||
}
|
||||
}
|
||||
return err;
|
||||
|
@ -234,31 +265,9 @@ int jetkvm_audio_init() {
|
|||
return 0;
|
||||
}
|
||||
|
||||
// jetkvm_audio_read_encode reads one audio frame from ALSA, encodes it with Opus, and handles errors.
|
||||
//
|
||||
// This function implements a robust audio capture pipeline with the following features:
|
||||
// - ALSA PCM capture with automatic device recovery
|
||||
// - Opus encoding with optimized settings for real-time processing
|
||||
// - Progressive error recovery with exponential backoff
|
||||
// - Buffer underrun and device suspension handling
|
||||
//
|
||||
// Error Recovery Strategy:
|
||||
// 1. EPIPE (buffer underrun): Prepare device and retry with progressive delays
|
||||
// 2. ESTRPIPE (device suspended): Resume device with timeout and fallback to prepare
|
||||
// 3. Other errors: Log and attempt recovery up to max_recovery_attempts
|
||||
//
|
||||
// Performance Optimizations:
|
||||
// - Stack-allocated PCM buffer to avoid heap allocations
|
||||
// - Direct memory access for Opus encoding
|
||||
// - Minimal system calls in the hot path
|
||||
//
|
||||
// Parameters:
|
||||
// opus_buf: Output buffer for encoded Opus data (must be at least max_packet_size bytes)
|
||||
//
|
||||
// Returns:
|
||||
// >0: Number of bytes written to opus_buf
|
||||
// -1: Initialization error or safety check failure
|
||||
// -2: Unrecoverable ALSA or Opus error after all retry attempts
|
||||
// jetkvm_audio_read_encode captures audio from ALSA, encodes with Opus, and handles errors.
|
||||
// Implements robust error recovery for buffer underruns and device suspension.
|
||||
// Returns: >0 (bytes written), -1 (init error), -2 (unrecoverable error)
|
||||
int jetkvm_audio_read_encode(void *opus_buf) {
|
||||
short pcm_buffer[1920]; // max 2ch*960
|
||||
unsigned char *out = (unsigned char*)opus_buf;
|
||||
|
@ -608,29 +617,59 @@ var (
|
|||
errInvalidBufferPtr = errors.New("invalid buffer pointer")
|
||||
)
|
||||
|
||||
// Error creation functions with context
|
||||
// Error creation functions with enhanced context
|
||||
func newBufferTooSmallError(actual, required int) error {
|
||||
return fmt.Errorf("buffer too small: got %d bytes, need at least %d bytes", actual, required)
|
||||
baseErr := fmt.Errorf("buffer too small: got %d bytes, need at least %d bytes", actual, required)
|
||||
return WrapWithMetadata(baseErr, "cgo_audio", "buffer_validation", map[string]interface{}{
|
||||
"actual_size": actual,
|
||||
"required_size": required,
|
||||
"error_type": "buffer_undersize",
|
||||
})
|
||||
}
|
||||
|
||||
func newBufferTooLargeError(actual, max int) error {
|
||||
return fmt.Errorf("buffer too large: got %d bytes, maximum allowed %d bytes", actual, max)
|
||||
baseErr := fmt.Errorf("buffer too large: got %d bytes, maximum allowed %d bytes", actual, max)
|
||||
return WrapWithMetadata(baseErr, "cgo_audio", "buffer_validation", map[string]interface{}{
|
||||
"actual_size": actual,
|
||||
"max_size": max,
|
||||
"error_type": "buffer_oversize",
|
||||
})
|
||||
}
|
||||
|
||||
func newAudioInitError(cErrorCode int) error {
|
||||
return fmt.Errorf("%w: C error code %d", errAudioInitFailed, cErrorCode)
|
||||
baseErr := fmt.Errorf("%w: C error code %d", errAudioInitFailed, cErrorCode)
|
||||
return WrapWithMetadata(baseErr, "cgo_audio", "initialization", map[string]interface{}{
|
||||
"c_error_code": cErrorCode,
|
||||
"error_type": "init_failure",
|
||||
"severity": "critical",
|
||||
})
|
||||
}
|
||||
|
||||
func newAudioPlaybackInitError(cErrorCode int) error {
|
||||
return fmt.Errorf("%w: C error code %d", errAudioPlaybackInit, cErrorCode)
|
||||
baseErr := fmt.Errorf("%w: C error code %d", errAudioPlaybackInit, cErrorCode)
|
||||
return WrapWithMetadata(baseErr, "cgo_audio", "playback_init", map[string]interface{}{
|
||||
"c_error_code": cErrorCode,
|
||||
"error_type": "playback_init_failure",
|
||||
"severity": "high",
|
||||
})
|
||||
}
|
||||
|
||||
func newAudioReadEncodeError(cErrorCode int) error {
|
||||
return fmt.Errorf("%w: C error code %d", errAudioReadEncode, cErrorCode)
|
||||
baseErr := fmt.Errorf("%w: C error code %d", errAudioReadEncode, cErrorCode)
|
||||
return WrapWithMetadata(baseErr, "cgo_audio", "read_encode", map[string]interface{}{
|
||||
"c_error_code": cErrorCode,
|
||||
"error_type": "read_encode_failure",
|
||||
"severity": "medium",
|
||||
})
|
||||
}
|
||||
|
||||
func newAudioDecodeWriteError(cErrorCode int) error {
|
||||
return fmt.Errorf("%w: C error code %d", errAudioDecodeWrite, cErrorCode)
|
||||
baseErr := fmt.Errorf("%w: C error code %d", errAudioDecodeWrite, cErrorCode)
|
||||
return WrapWithMetadata(baseErr, "cgo_audio", "decode_write", map[string]interface{}{
|
||||
"c_error_code": cErrorCode,
|
||||
"error_type": "decode_write_failure",
|
||||
"severity": "medium",
|
||||
})
|
||||
}
|
||||
|
||||
func cgoAudioInit() error {
|
||||
|
@ -649,6 +688,8 @@ func cgoAudioInit() error {
|
|||
C.int(config.CGOFrameSize),
|
||||
C.int(config.CGOMaxPacketSize),
|
||||
C.int(config.CGOUsleepMicroseconds),
|
||||
C.int(config.CGOMaxAttempts),
|
||||
C.int(config.CGOMaxBackoffMicroseconds),
|
||||
)
|
||||
|
||||
result := C.jetkvm_audio_init()
|
||||
|
@ -721,12 +762,30 @@ func cgoAudioDecodeWrite(buf []byte) (int, error) {
|
|||
return int(n), nil
|
||||
}
|
||||
|
||||
// updateOpusEncoderParams dynamically updates OPUS encoder parameters
|
||||
func updateOpusEncoderParams(bitrate, complexity, vbr, vbrConstraint, signalType, bandwidth, dtx int) error {
|
||||
result := C.update_opus_encoder_params(
|
||||
C.int(bitrate),
|
||||
C.int(complexity),
|
||||
C.int(vbr),
|
||||
C.int(vbrConstraint),
|
||||
C.int(signalType),
|
||||
C.int(bandwidth),
|
||||
C.int(dtx),
|
||||
)
|
||||
if result != 0 {
|
||||
return fmt.Errorf("failed to update OPUS encoder parameters: C error code %d", result)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CGO function aliases
|
||||
var (
|
||||
CGOAudioInit = cgoAudioInit
|
||||
CGOAudioClose = cgoAudioClose
|
||||
CGOAudioReadEncode = cgoAudioReadEncode
|
||||
CGOAudioPlaybackInit = cgoAudioPlaybackInit
|
||||
CGOAudioPlaybackClose = cgoAudioPlaybackClose
|
||||
CGOAudioDecodeWrite = cgoAudioDecodeWrite
|
||||
CGOAudioInit = cgoAudioInit
|
||||
CGOAudioClose = cgoAudioClose
|
||||
CGOAudioReadEncode = cgoAudioReadEncode
|
||||
CGOAudioPlaybackInit = cgoAudioPlaybackInit
|
||||
CGOAudioPlaybackClose = cgoAudioPlaybackClose
|
||||
CGOAudioDecodeWrite = cgoAudioDecodeWrite
|
||||
CGOUpdateOpusEncoderParams = updateOpusEncoderParams
|
||||
)
|
||||
|
|
|
@ -1,213 +1,85 @@
|
|||
package audio
|
||||
|
||||
import "time"
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/jetkvm/kvm/internal/logging"
|
||||
)
|
||||
|
||||
// AudioConfigConstants centralizes all hardcoded values used across audio components.
|
||||
// This configuration system allows runtime tuning of audio performance, quality, and resource usage.
|
||||
// Each constant is documented with its purpose, usage location, and impact on system behavior.
|
||||
type AudioConfigConstants struct {
|
||||
// Audio Quality Presets
|
||||
// MaxAudioFrameSize defines the maximum size of an audio frame in bytes.
|
||||
// Used in: buffer_pool.go, adaptive_buffer.go
|
||||
// Impact: Higher values allow larger audio chunks but increase memory usage and latency.
|
||||
// Typical range: 1024-8192 bytes. Default 4096 provides good balance.
|
||||
MaxAudioFrameSize int
|
||||
MaxAudioFrameSize int // Maximum audio frame size in bytes (default: 4096)
|
||||
|
||||
// Opus Encoding Parameters - Core codec settings for audio compression
|
||||
// OpusBitrate sets the target bitrate for Opus encoding in bits per second.
|
||||
// Used in: cgo_audio.go for encoder initialization
|
||||
// Impact: Higher bitrates improve audio quality but increase bandwidth usage.
|
||||
// Range: 6000-510000 bps. 128000 (128kbps) provides high quality for most use cases.
|
||||
OpusBitrate int
|
||||
// Opus Encoding Parameters
|
||||
OpusBitrate int // Target bitrate for Opus encoding in bps (default: 128000)
|
||||
OpusComplexity int // Computational complexity 0-10 (default: 10 for best quality)
|
||||
OpusVBR int // Variable Bit Rate: 0=CBR, 1=VBR (default: 1)
|
||||
OpusVBRConstraint int // VBR constraint: 0=unconstrained, 1=constrained (default: 0)
|
||||
OpusDTX int // Discontinuous Transmission: 0=disabled, 1=enabled (default: 0)
|
||||
|
||||
// OpusComplexity controls the computational complexity of Opus encoding (0-10).
|
||||
// Used in: cgo_audio.go for encoder configuration
|
||||
// Impact: Higher values improve quality but increase CPU usage and encoding latency.
|
||||
// Range: 0-10. Value 10 provides best quality, 0 fastest encoding.
|
||||
OpusComplexity int
|
||||
// Audio Parameters
|
||||
SampleRate int // Audio sampling frequency in Hz (default: 48000)
|
||||
Channels int // Number of audio channels: 1=mono, 2=stereo (default: 2)
|
||||
FrameSize int // Samples per audio frame (default: 960 for 20ms at 48kHz)
|
||||
MaxPacketSize int // Maximum encoded packet size in bytes (default: 4000)
|
||||
|
||||
// OpusVBR enables Variable Bit Rate encoding (0=CBR, 1=VBR).
|
||||
// Used in: cgo_audio.go for encoder mode selection
|
||||
// Impact: VBR (1) adapts bitrate to content complexity, improving efficiency.
|
||||
// CBR (0) maintains constant bitrate for predictable bandwidth usage.
|
||||
OpusVBR int
|
||||
// Audio Quality Bitrates (kbps)
|
||||
AudioQualityLowOutputBitrate int // Low-quality output bitrate (default: 32)
|
||||
AudioQualityLowInputBitrate int // Low-quality input bitrate (default: 16)
|
||||
AudioQualityMediumOutputBitrate int // Medium-quality output bitrate (default: 64)
|
||||
AudioQualityMediumInputBitrate int // Medium-quality input bitrate (default: 32)
|
||||
AudioQualityHighOutputBitrate int // High-quality output bitrate (default: 128)
|
||||
AudioQualityHighInputBitrate int // High-quality input bitrate (default: 64)
|
||||
AudioQualityUltraOutputBitrate int // Ultra-quality output bitrate (default: 192)
|
||||
AudioQualityUltraInputBitrate int // Ultra-quality input bitrate (default: 96)
|
||||
|
||||
// OpusVBRConstraint enables constrained VBR mode (0=unconstrained, 1=constrained).
|
||||
// Used in: cgo_audio.go when VBR is enabled
|
||||
// Impact: Constrained VBR (1) limits bitrate variation for more predictable bandwidth.
|
||||
// Unconstrained (0) allows full bitrate adaptation for optimal quality.
|
||||
OpusVBRConstraint int
|
||||
// Audio Quality Sample Rates (Hz)
|
||||
AudioQualityLowSampleRate int // Low-quality sample rate (default: 22050)
|
||||
AudioQualityMediumSampleRate int // Medium-quality sample rate (default: 44100)
|
||||
AudioQualityMicLowSampleRate int // Low-quality microphone sample rate (default: 16000)
|
||||
|
||||
// OpusDTX enables Discontinuous Transmission (0=disabled, 1=enabled).
|
||||
// Used in: cgo_audio.go for encoder optimization
|
||||
// Impact: DTX (1) reduces bandwidth during silence but may cause audio artifacts.
|
||||
// Disabled (0) maintains constant transmission for consistent quality.
|
||||
OpusDTX int
|
||||
// Audio Quality Frame Sizes
|
||||
AudioQualityLowFrameSize time.Duration // Low-quality frame duration (default: 40ms)
|
||||
AudioQualityMediumFrameSize time.Duration // Medium-quality frame duration (default: 20ms)
|
||||
AudioQualityHighFrameSize time.Duration // High-quality frame duration (default: 20ms)
|
||||
|
||||
// Audio Parameters - Fundamental audio stream characteristics
|
||||
// SampleRate defines the number of audio samples per second in Hz.
|
||||
// Used in: All audio processing components
|
||||
// Impact: Higher rates improve frequency response but increase processing load.
|
||||
// Common values: 16000 (voice), 44100 (CD quality), 48000 (professional).
|
||||
SampleRate int
|
||||
AudioQualityUltraFrameSize time.Duration // Ultra-quality frame duration (default: 10ms)
|
||||
|
||||
// Channels specifies the number of audio channels (1=mono, 2=stereo).
|
||||
// Used in: All audio processing and encoding/decoding operations
|
||||
// Impact: Stereo (2) provides spatial audio but doubles bandwidth and processing.
|
||||
// Mono (1) reduces resource usage but loses spatial information.
|
||||
Channels int
|
||||
// Audio Quality Channels
|
||||
AudioQualityLowChannels int // Low-quality channel count (default: 1)
|
||||
AudioQualityMediumChannels int // Medium-quality channel count (default: 2)
|
||||
AudioQualityHighChannels int // High-quality channel count (default: 2)
|
||||
AudioQualityUltraChannels int // Ultra-quality channel count (default: 2)
|
||||
|
||||
// FrameSize defines the number of samples per audio frame.
|
||||
// Used in: Opus encoding/decoding, buffer management
|
||||
// Impact: Larger frames reduce overhead but increase latency.
|
||||
// Must match Opus frame sizes: 120, 240, 480, 960, 1920, 2880 samples.
|
||||
FrameSize int
|
||||
// Audio Quality OPUS Encoder Parameters
|
||||
AudioQualityLowOpusComplexity int // Low-quality OPUS complexity (default: 1)
|
||||
AudioQualityLowOpusVBR int // Low-quality OPUS VBR setting (default: 0)
|
||||
AudioQualityLowOpusSignalType int // Low-quality OPUS signal type (default: 3001)
|
||||
AudioQualityLowOpusBandwidth int // Low-quality OPUS bandwidth (default: 1101)
|
||||
AudioQualityLowOpusDTX int // Low-quality OPUS DTX setting (default: 1)
|
||||
|
||||
// MaxPacketSize sets the maximum size of encoded audio packets in bytes.
|
||||
// Used in: Network transmission, buffer allocation
|
||||
// Impact: Larger packets reduce network overhead but increase burst bandwidth.
|
||||
// Should accommodate worst-case Opus output plus protocol headers.
|
||||
MaxPacketSize int
|
||||
AudioQualityMediumOpusComplexity int // Medium-quality OPUS complexity (default: 5)
|
||||
AudioQualityMediumOpusVBR int // Medium-quality OPUS VBR setting (default: 1)
|
||||
AudioQualityMediumOpusSignalType int // Medium-quality OPUS signal type (default: 3002)
|
||||
AudioQualityMediumOpusBandwidth int // Medium-quality OPUS bandwidth (default: 1103)
|
||||
AudioQualityMediumOpusDTX int // Medium-quality OPUS DTX setting (default: 0)
|
||||
|
||||
// Audio Quality Bitrates - Predefined quality presets for different use cases
|
||||
// These bitrates are used in audio.go for quality level selection
|
||||
// Impact: Higher bitrates improve audio fidelity but increase bandwidth usage
|
||||
AudioQualityHighOpusComplexity int // High-quality OPUS complexity (default: 8)
|
||||
AudioQualityHighOpusVBR int // High-quality OPUS VBR setting (default: 1)
|
||||
AudioQualityHighOpusSignalType int // High-quality OPUS signal type (default: 3002)
|
||||
AudioQualityHighOpusBandwidth int // High-quality OPUS bandwidth (default: 1104)
|
||||
AudioQualityHighOpusDTX int // High-quality OPUS DTX setting (default: 0)
|
||||
|
||||
// AudioQualityLowOutputBitrate defines bitrate for low-quality audio output (kbps).
|
||||
// Used in: audio.go for bandwidth-constrained scenarios
|
||||
// Impact: Minimal bandwidth usage but reduced audio quality. Suitable for voice-only.
|
||||
// Default 32kbps provides acceptable voice quality with very low bandwidth.
|
||||
AudioQualityLowOutputBitrate int
|
||||
AudioQualityUltraOpusComplexity int // Ultra-quality OPUS complexity (default: 10)
|
||||
AudioQualityUltraOpusVBR int // Ultra-quality OPUS VBR setting (default: 1)
|
||||
AudioQualityUltraOpusSignalType int // Ultra-quality OPUS signal type (default: 3002)
|
||||
AudioQualityUltraOpusBandwidth int // Ultra-quality OPUS bandwidth (default: 1105)
|
||||
AudioQualityUltraOpusDTX int // Ultra-quality OPUS DTX setting (default: 0)
|
||||
|
||||
// AudioQualityLowInputBitrate defines bitrate for low-quality audio input (kbps).
|
||||
// Used in: audio.go for microphone input in low-bandwidth scenarios
|
||||
// Impact: Reduces upload bandwidth but may affect voice clarity.
|
||||
// Default 16kbps suitable for basic voice communication.
|
||||
AudioQualityLowInputBitrate int
|
||||
|
||||
// AudioQualityMediumOutputBitrate defines bitrate for medium-quality audio output (kbps).
|
||||
// Used in: audio.go for balanced quality/bandwidth scenarios
|
||||
// Impact: Good balance between quality and bandwidth usage.
|
||||
// Default 64kbps provides clear voice and acceptable music quality.
|
||||
AudioQualityMediumOutputBitrate int
|
||||
|
||||
// AudioQualityMediumInputBitrate defines bitrate for medium-quality audio input (kbps).
|
||||
// Used in: audio.go for microphone input with balanced quality
|
||||
// Impact: Better voice quality than low setting with moderate bandwidth usage.
|
||||
// Default 32kbps suitable for clear voice communication.
|
||||
AudioQualityMediumInputBitrate int
|
||||
|
||||
// AudioQualityHighOutputBitrate defines bitrate for high-quality audio output (kbps).
|
||||
// Used in: audio.go for high-fidelity audio scenarios
|
||||
// Impact: Excellent audio quality but higher bandwidth requirements.
|
||||
// Default 128kbps provides near-CD quality for music and crystal-clear voice.
|
||||
AudioQualityHighOutputBitrate int
|
||||
|
||||
// AudioQualityHighInputBitrate defines bitrate for high-quality audio input (kbps).
|
||||
// Used in: audio.go for high-quality microphone capture
|
||||
// Impact: Superior voice quality but increased upload bandwidth usage.
|
||||
// Default 64kbps suitable for professional voice communication.
|
||||
AudioQualityHighInputBitrate int
|
||||
|
||||
// AudioQualityUltraOutputBitrate defines bitrate for ultra-high-quality audio output (kbps).
|
||||
// Used in: audio.go for maximum quality scenarios
|
||||
// Impact: Maximum audio fidelity but highest bandwidth consumption.
|
||||
// Default 192kbps provides studio-quality audio for critical applications.
|
||||
AudioQualityUltraOutputBitrate int
|
||||
|
||||
// AudioQualityUltraInputBitrate defines bitrate for ultra-high-quality audio input (kbps).
|
||||
// Used in: audio.go for maximum quality microphone capture
|
||||
// Impact: Best possible voice quality but maximum upload bandwidth usage.
|
||||
// Default 96kbps suitable for broadcast-quality voice communication.
|
||||
AudioQualityUltraInputBitrate int
|
||||
|
||||
// Audio Quality Sample Rates - Frequency sampling rates for different quality levels
|
||||
// Used in: audio.go for configuring audio capture and playback sample rates
|
||||
// Impact: Higher sample rates capture more frequency detail but increase processing load
|
||||
|
||||
// AudioQualityLowSampleRate defines sample rate for low-quality audio (Hz).
|
||||
// Used in: audio.go for bandwidth-constrained scenarios
|
||||
// Impact: Reduces frequency response but minimizes processing and bandwidth.
|
||||
// Default 22050Hz captures frequencies up to 11kHz, adequate for voice.
|
||||
AudioQualityLowSampleRate int
|
||||
|
||||
// AudioQualityMediumSampleRate defines sample rate for medium-quality audio (Hz).
|
||||
// Used in: audio.go for balanced quality scenarios
|
||||
// Impact: Good frequency response with moderate processing requirements.
|
||||
// Default 44100Hz (CD quality) captures frequencies up to 22kHz.
|
||||
AudioQualityMediumSampleRate int
|
||||
|
||||
// AudioQualityMicLowSampleRate defines sample rate for low-quality microphone input (Hz).
|
||||
// Used in: audio.go for microphone capture in constrained scenarios
|
||||
// Impact: Optimized for voice communication with minimal processing overhead.
|
||||
// Default 16000Hz captures voice frequencies (300-3400Hz) efficiently.
|
||||
AudioQualityMicLowSampleRate int
|
||||
|
||||
// Audio Quality Frame Sizes - Duration of audio frames for different quality levels
|
||||
// Used in: audio.go for configuring Opus frame duration
|
||||
// Impact: Larger frames reduce overhead but increase latency and memory usage
|
||||
|
||||
// AudioQualityLowFrameSize defines frame duration for low-quality audio.
|
||||
// Used in: audio.go for low-latency scenarios with minimal processing
|
||||
// Impact: Longer frames reduce CPU overhead but increase audio latency.
|
||||
// Default 40ms provides good efficiency for voice communication.
|
||||
AudioQualityLowFrameSize time.Duration
|
||||
|
||||
// AudioQualityMediumFrameSize defines frame duration for medium-quality audio.
|
||||
// Used in: audio.go for balanced latency and efficiency
|
||||
// Impact: Moderate frame size balances latency and processing efficiency.
|
||||
// Default 20ms provides good balance for most applications.
|
||||
AudioQualityMediumFrameSize time.Duration
|
||||
|
||||
// AudioQualityHighFrameSize defines frame duration for high-quality audio.
|
||||
// Used in: audio.go for high-quality scenarios
|
||||
// Impact: Optimized frame size for high-quality encoding efficiency.
|
||||
// Default 20ms maintains low latency while supporting high bitrates.
|
||||
AudioQualityHighFrameSize time.Duration
|
||||
|
||||
// AudioQualityUltraFrameSize defines frame duration for ultra-quality audio.
|
||||
// Used in: audio.go for maximum quality scenarios
|
||||
// Impact: Smaller frames reduce latency but increase processing overhead.
|
||||
// Default 10ms provides minimal latency for real-time applications.
|
||||
AudioQualityUltraFrameSize time.Duration
|
||||
|
||||
// Audio Quality Channels - Channel configuration for different quality levels
|
||||
// Used in: audio.go for configuring mono/stereo audio
|
||||
// Impact: Stereo doubles bandwidth and processing but provides spatial audio
|
||||
|
||||
// AudioQualityLowChannels defines channel count for low-quality audio.
|
||||
// Used in: audio.go for bandwidth-constrained scenarios
|
||||
// Impact: Mono (1) minimizes bandwidth and processing for voice communication.
|
||||
// Default 1 (mono) suitable for voice-only applications.
|
||||
AudioQualityLowChannels int
|
||||
|
||||
// AudioQualityMediumChannels defines channel count for medium-quality audio.
|
||||
// Used in: audio.go for balanced quality scenarios
|
||||
// Impact: Stereo (2) provides spatial audio with moderate bandwidth increase.
|
||||
// Default 2 (stereo) suitable for general audio applications.
|
||||
AudioQualityMediumChannels int
|
||||
|
||||
// AudioQualityHighChannels defines channel count for high-quality audio.
|
||||
// Used in: audio.go for high-fidelity scenarios
|
||||
// Impact: Stereo (2) essential for high-quality music and spatial audio.
|
||||
// Default 2 (stereo) required for full audio experience.
|
||||
AudioQualityHighChannels int
|
||||
|
||||
// AudioQualityUltraChannels defines channel count for ultra-quality audio.
|
||||
// Used in: audio.go for maximum quality scenarios
|
||||
// Impact: Stereo (2) mandatory for studio-quality audio reproduction.
|
||||
// Default 2 (stereo) provides full spatial audio fidelity.
|
||||
AudioQualityUltraChannels int
|
||||
|
||||
// CGO Audio Constants - Low-level C library configuration for audio processing
|
||||
// These constants are passed to C code in cgo_audio.go for native audio operations
|
||||
// Impact: Direct control over native audio library behavior and performance
|
||||
|
||||
// CGOOpusBitrate sets the bitrate for native Opus encoder (bits per second).
|
||||
// Used in: cgo_audio.go update_audio_constants() function
|
||||
// Impact: Controls quality vs bandwidth tradeoff in native encoding.
|
||||
// Default 96000 (96kbps) provides good quality for real-time applications.
|
||||
CGOOpusBitrate int
|
||||
// CGO Audio Constants
|
||||
CGOOpusBitrate int // Native Opus encoder bitrate in bps (default: 96000)
|
||||
|
||||
// CGOOpusComplexity sets computational complexity for native Opus encoder (0-10).
|
||||
// Used in: cgo_audio.go for native encoder configuration
|
||||
|
@ -1541,6 +1413,82 @@ type AudioConfigConstants struct {
|
|||
// Default 8 channels provides reasonable upper bound for multi-channel audio.
|
||||
MaxChannels int
|
||||
|
||||
// CGO Constants
|
||||
// Used in: cgo_audio.go for CGO operation limits and retry logic
|
||||
// Impact: Controls CGO retry behavior and backoff timing
|
||||
|
||||
// CGOMaxBackoffMicroseconds defines maximum backoff time in microseconds for CGO operations.
|
||||
// Used in: safe_alsa_open for exponential backoff retry logic
|
||||
// Impact: Prevents excessive wait times while allowing device recovery.
|
||||
// Default 500000 microseconds (500ms) provides reasonable maximum wait time.
|
||||
CGOMaxBackoffMicroseconds int
|
||||
|
||||
// CGOMaxAttempts defines maximum retry attempts for CGO operations.
|
||||
// Used in: safe_alsa_open for retry limit enforcement
|
||||
// Impact: Prevents infinite retry loops while allowing transient error recovery.
|
||||
// Default 5 attempts provides good balance between reliability and performance.
|
||||
CGOMaxAttempts int
|
||||
|
||||
// Validation Frame Size Limits
|
||||
// Used in: validation_enhanced.go for frame duration validation
|
||||
// Impact: Ensures frame sizes are within acceptable bounds for real-time audio
|
||||
|
||||
// MinFrameDuration defines minimum acceptable frame duration.
|
||||
// Used in: ValidateAudioConfiguration for frame size validation
|
||||
// Impact: Prevents excessively small frames that could impact performance.
|
||||
// Default 10ms provides minimum viable frame duration for real-time audio.
|
||||
MinFrameDuration time.Duration
|
||||
|
||||
// MaxFrameDuration defines maximum acceptable frame duration.
|
||||
// Used in: ValidateAudioConfiguration for frame size validation
|
||||
// Impact: Prevents excessively large frames that could impact latency.
|
||||
// Default 100ms provides reasonable maximum frame duration.
|
||||
MaxFrameDuration time.Duration
|
||||
|
||||
// Valid Sample Rates
|
||||
// Used in: validation_enhanced.go for sample rate validation
|
||||
// Impact: Defines the set of supported sample rates for audio processing
|
||||
|
||||
// ValidSampleRates defines the list of supported sample rates.
|
||||
// Used in: ValidateAudioConfiguration for sample rate validation
|
||||
// Impact: Ensures only supported sample rates are used in audio processing.
|
||||
// Default rates support common audio standards from voice (8kHz) to professional (48kHz).
|
||||
ValidSampleRates []int
|
||||
|
||||
// Opus Bitrate Validation Constants
|
||||
// Used in: validation_enhanced.go for bitrate range validation
|
||||
// Impact: Ensures bitrate values are within Opus codec specifications
|
||||
|
||||
// MinOpusBitrate defines the minimum valid Opus bitrate in bits per second.
|
||||
// Used in: ValidateAudioConfiguration for bitrate validation
|
||||
// Impact: Prevents bitrates below Opus codec minimum specification.
|
||||
// Default 6000 bps is the minimum supported by Opus codec.
|
||||
MinOpusBitrate int
|
||||
|
||||
// MaxOpusBitrate defines the maximum valid Opus bitrate in bits per second.
|
||||
// Used in: ValidateAudioConfiguration for bitrate validation
|
||||
// Impact: Prevents bitrates above Opus codec maximum specification.
|
||||
// Default 510000 bps is the maximum supported by Opus codec.
|
||||
MaxOpusBitrate int
|
||||
|
||||
// MaxValidationTime defines the maximum time allowed for validation operations.
|
||||
// Used in: GetValidationConfig for timeout control
|
||||
// Impact: Prevents validation operations from blocking indefinitely.
|
||||
// Default 5s provides reasonable timeout for validation operations.
|
||||
MaxValidationTime time.Duration
|
||||
|
||||
// MinFrameSize defines the minimum reasonable audio frame size in bytes.
|
||||
// Used in: ValidateAudioFrameComprehensive for frame size validation
|
||||
// Impact: Prevents processing of unreasonably small audio frames.
|
||||
// Default 64 bytes ensures minimum viable audio data.
|
||||
MinFrameSize int
|
||||
|
||||
// FrameSizeTolerance defines the tolerance for frame size validation in bytes.
|
||||
// Used in: ValidateAudioFrameComprehensive for frame size matching
|
||||
// Impact: Allows reasonable variation in frame sizes due to encoding.
|
||||
// Default 512 bytes accommodates typical encoding variations.
|
||||
FrameSizeTolerance int
|
||||
|
||||
// Device Health Monitoring Configuration
|
||||
// Used in: device_health.go for proactive device monitoring and recovery
|
||||
// Impact: Controls health check frequency and recovery thresholds
|
||||
|
@ -1590,105 +1538,27 @@ type AudioConfigConstants struct {
|
|||
// real-time audio requirements, and extensive testing for optimal performance.
|
||||
func DefaultAudioConfig() *AudioConfigConstants {
|
||||
return &AudioConfigConstants{
|
||||
// Audio Quality Presets - Core audio frame and packet size configuration
|
||||
// Used in: Throughout audio pipeline for buffer allocation and frame processing
|
||||
// Impact: Controls memory usage and prevents buffer overruns
|
||||
|
||||
// MaxAudioFrameSize defines maximum size for audio frames.
|
||||
// Used in: Buffer allocation throughout audio pipeline
|
||||
// Impact: Prevents buffer overruns while accommodating high-quality audio.
|
||||
// Default 4096 bytes provides safety margin for largest expected frames.
|
||||
// Audio Quality Presets
|
||||
MaxAudioFrameSize: 4096,
|
||||
|
||||
// Opus Encoding Parameters - Configuration for Opus audio codec
|
||||
// Used in: Audio encoding/decoding pipeline for quality control
|
||||
// Impact: Controls audio quality, bandwidth usage, and encoding performance
|
||||
|
||||
// OpusBitrate defines target bitrate for Opus encoding.
|
||||
// Used in: Opus encoder initialization and quality control
|
||||
// Impact: Higher bitrates improve quality but increase bandwidth usage.
|
||||
// Default 128kbps provides excellent quality with reasonable bandwidth.
|
||||
OpusBitrate: 128000,
|
||||
|
||||
// OpusComplexity defines computational complexity for Opus encoding.
|
||||
// Used in: Opus encoder for quality vs CPU usage balance
|
||||
// Impact: Higher complexity improves quality but increases CPU usage.
|
||||
// Default 10 (maximum) ensures best quality on modern ARM processors.
|
||||
OpusComplexity: 10,
|
||||
|
||||
// OpusVBR enables variable bitrate encoding.
|
||||
// Used in: Opus encoder for adaptive bitrate control
|
||||
// Impact: Optimizes bandwidth based on audio content complexity.
|
||||
// Default 1 (enabled) reduces bandwidth for simple audio content.
|
||||
OpusVBR: 1,
|
||||
|
||||
// OpusVBRConstraint controls VBR constraint mode.
|
||||
// Used in: Opus encoder for bitrate variation control
|
||||
// Impact: 0=unconstrained allows maximum flexibility for quality.
|
||||
// Default 0 provides optimal quality-bandwidth balance.
|
||||
// Opus Encoding Parameters
|
||||
OpusBitrate: 128000,
|
||||
OpusComplexity: 10,
|
||||
OpusVBR: 1,
|
||||
OpusVBRConstraint: 0,
|
||||
OpusDTX: 0,
|
||||
|
||||
// OpusDTX controls discontinuous transmission.
|
||||
// Used in: Opus encoder for silence detection and transmission
|
||||
// Impact: Can interfere with system audio monitoring in KVM applications.
|
||||
// Default 0 (disabled) ensures consistent audio stream.
|
||||
OpusDTX: 0,
|
||||
|
||||
// Audio Parameters - Core audio format configuration
|
||||
// Used in: Audio processing pipeline for format consistency
|
||||
// Impact: Controls audio quality, compatibility, and processing requirements
|
||||
|
||||
// SampleRate defines audio sampling frequency.
|
||||
// Used in: Audio capture, processing, and playback throughout pipeline
|
||||
// Impact: Higher rates improve quality but increase processing and bandwidth.
|
||||
// Default 48kHz provides professional audio quality with full frequency range.
|
||||
SampleRate: 48000,
|
||||
|
||||
// Channels defines number of audio channels.
|
||||
// Used in: Audio processing pipeline for channel handling
|
||||
// Impact: Stereo preserves spatial information but doubles bandwidth.
|
||||
// Default 2 (stereo) captures full system audio including spatial effects.
|
||||
Channels: 2,
|
||||
|
||||
// FrameSize defines number of samples per audio frame.
|
||||
// Used in: Audio processing for frame-based operations
|
||||
// Impact: Larger frames improve efficiency but increase latency.
|
||||
// Default 960 samples (20ms at 48kHz) balances latency and efficiency.
|
||||
FrameSize: 960,
|
||||
|
||||
// MaxPacketSize defines maximum size for audio packets.
|
||||
// Used in: Network transmission and buffer allocation
|
||||
// Impact: Must accommodate compressed frames with overhead.
|
||||
// Default 4000 bytes prevents fragmentation while allowing quality variations.
|
||||
// Audio Parameters
|
||||
SampleRate: 48000,
|
||||
Channels: 2,
|
||||
FrameSize: 960,
|
||||
MaxPacketSize: 4000,
|
||||
|
||||
// Audio Quality Bitrates - Preset bitrates for different quality levels
|
||||
// Used in: Audio quality management and adaptive bitrate control
|
||||
// Impact: Controls bandwidth usage and audio quality for different scenarios
|
||||
|
||||
// AudioQualityLowOutputBitrate defines bitrate for low-quality output audio.
|
||||
// Used in: Bandwidth-constrained connections and basic audio monitoring
|
||||
// Impact: Minimizes bandwidth while maintaining acceptable quality.
|
||||
// Default 32kbps optimized for constrained connections, higher than input.
|
||||
AudioQualityLowOutputBitrate: 32,
|
||||
|
||||
// AudioQualityLowInputBitrate defines bitrate for low-quality input audio.
|
||||
// Used in: Microphone input in bandwidth-constrained scenarios
|
||||
// Impact: Reduces bandwidth for microphone audio which is typically simpler.
|
||||
// Default 16kbps sufficient for basic voice input.
|
||||
AudioQualityLowInputBitrate: 16,
|
||||
|
||||
// AudioQualityMediumOutputBitrate defines bitrate for medium-quality output.
|
||||
// Used in: Typical KVM scenarios with reasonable network connections
|
||||
// Impact: Balances bandwidth and quality for most use cases.
|
||||
// Default 64kbps provides good quality for standard usage.
|
||||
// Audio Quality Bitrates
|
||||
AudioQualityLowOutputBitrate: 32,
|
||||
AudioQualityLowInputBitrate: 16,
|
||||
AudioQualityMediumOutputBitrate: 64,
|
||||
|
||||
// AudioQualityMediumInputBitrate defines bitrate for medium-quality input.
|
||||
// Used in: Standard microphone input scenarios
|
||||
// Impact: Provides good voice quality without excessive bandwidth.
|
||||
// Default 32kbps suitable for clear voice communication.
|
||||
AudioQualityMediumInputBitrate: 32,
|
||||
AudioQualityMediumInputBitrate: 32,
|
||||
|
||||
// AudioQualityHighOutputBitrate defines bitrate for high-quality output.
|
||||
// Used in: Professional applications requiring excellent audio fidelity
|
||||
|
@ -1766,106 +1636,57 @@ func DefaultAudioConfig() *AudioConfigConstants {
|
|||
|
||||
// Audio Quality Channels - Channel configuration for different quality levels
|
||||
// Used in: Audio processing pipeline for channel handling and bandwidth control
|
||||
// Impact: Controls spatial audio information and bandwidth requirements
|
||||
|
||||
// AudioQualityLowChannels defines channel count for low-quality audio.
|
||||
// Used in: Basic audio monitoring in bandwidth-constrained scenarios
|
||||
// Impact: Reduces bandwidth by 50% with acceptable quality trade-off.
|
||||
// Default 1 (mono) suitable where stereo separation not critical.
|
||||
AudioQualityLowChannels: 1,
|
||||
|
||||
// AudioQualityMediumChannels defines channel count for medium-quality audio.
|
||||
// Used in: Standard audio scenarios requiring spatial information
|
||||
// Impact: Preserves spatial audio information essential for modern systems.
|
||||
// Default 2 (stereo) maintains spatial audio for medium quality.
|
||||
AudioQualityLowChannels: 1,
|
||||
AudioQualityMediumChannels: 2,
|
||||
AudioQualityHighChannels: 2,
|
||||
AudioQualityUltraChannels: 2,
|
||||
|
||||
// AudioQualityHighChannels defines channel count for high-quality audio.
|
||||
// Used in: High-quality audio scenarios requiring full spatial reproduction
|
||||
// Impact: Ensures complete spatial audio information for quality scenarios.
|
||||
// Default 2 (stereo) preserves spatial information for high quality.
|
||||
AudioQualityHighChannels: 2,
|
||||
// Audio Quality OPUS Encoder Parameters - Quality-specific encoder settings
|
||||
// Used in: Dynamic OPUS encoder configuration based on quality presets
|
||||
// Impact: Controls encoding complexity, VBR, signal type, bandwidth, and DTX
|
||||
|
||||
// AudioQualityUltraChannels defines channel count for ultra-quality audio.
|
||||
// Used in: Ultra-quality scenarios requiring maximum spatial fidelity
|
||||
// Impact: Provides complete spatial audio reproduction for audiophile use.
|
||||
// Default 2 (stereo) ensures maximum spatial fidelity for ultra quality.
|
||||
AudioQualityUltraChannels: 2,
|
||||
// Low Quality OPUS Parameters - Optimized for bandwidth conservation
|
||||
AudioQualityLowOpusComplexity: 1, // Low complexity for minimal CPU usage
|
||||
AudioQualityLowOpusVBR: 0, // CBR for predictable bandwidth
|
||||
AudioQualityLowOpusSignalType: 3001, // OPUS_SIGNAL_VOICE
|
||||
AudioQualityLowOpusBandwidth: 1101, // OPUS_BANDWIDTH_NARROWBAND
|
||||
AudioQualityLowOpusDTX: 1, // Enable DTX for silence suppression
|
||||
|
||||
// CGO Audio Constants - Configuration for C interop audio processing
|
||||
// Used in: CGO audio operations and C library compatibility
|
||||
// Impact: Controls quality, performance, and compatibility for C-side processing
|
||||
// Medium Quality OPUS Parameters - Balanced performance and quality
|
||||
AudioQualityMediumOpusComplexity: 5, // Medium complexity for balanced performance
|
||||
AudioQualityMediumOpusVBR: 1, // VBR for better quality
|
||||
AudioQualityMediumOpusSignalType: 3002, // OPUS_SIGNAL_MUSIC
|
||||
AudioQualityMediumOpusBandwidth: 1103, // OPUS_BANDWIDTH_WIDEBAND
|
||||
AudioQualityMediumOpusDTX: 0, // Disable DTX for consistent quality
|
||||
|
||||
// CGOOpusBitrate defines bitrate for CGO Opus operations.
|
||||
// Used in: CGO audio encoding with embedded processing constraints
|
||||
// Impact: Conservative bitrate reduces processing load while maintaining quality.
|
||||
// Default 96kbps provides good quality suitable for embedded processing.
|
||||
CGOOpusBitrate: 96000,
|
||||
// High Quality OPUS Parameters - High quality with good performance
|
||||
AudioQualityHighOpusComplexity: 8, // High complexity for better quality
|
||||
AudioQualityHighOpusVBR: 1, // VBR for optimal quality
|
||||
AudioQualityHighOpusSignalType: 3002, // OPUS_SIGNAL_MUSIC
|
||||
AudioQualityHighOpusBandwidth: 1104, // OPUS_BANDWIDTH_SUPERWIDEBAND
|
||||
AudioQualityHighOpusDTX: 0, // Disable DTX for consistent quality
|
||||
|
||||
// CGOOpusComplexity defines complexity for CGO Opus operations.
|
||||
// Used in: CGO audio encoding for CPU load management
|
||||
// Impact: Lower complexity reduces CPU load while maintaining acceptable quality.
|
||||
// Default 3 balances quality and real-time processing requirements.
|
||||
CGOOpusComplexity: 3,
|
||||
// Ultra Quality OPUS Parameters - Maximum quality settings
|
||||
AudioQualityUltraOpusComplexity: 10, // Maximum complexity for best quality
|
||||
AudioQualityUltraOpusVBR: 1, // VBR for optimal quality
|
||||
AudioQualityUltraOpusSignalType: 3002, // OPUS_SIGNAL_MUSIC
|
||||
AudioQualityUltraOpusBandwidth: 1105, // OPUS_BANDWIDTH_FULLBAND
|
||||
AudioQualityUltraOpusDTX: 0, // Disable DTX for maximum quality
|
||||
|
||||
// CGOOpusVBR enables variable bitrate for CGO operations.
|
||||
// Used in: CGO audio encoding for adaptive bandwidth optimization
|
||||
// Impact: Allows bitrate adaptation based on content complexity.
|
||||
// Default 1 (enabled) optimizes bandwidth usage in CGO processing.
|
||||
CGOOpusVBR: 1,
|
||||
|
||||
// CGOOpusVBRConstraint controls VBR constraint for CGO operations.
|
||||
// Used in: CGO audio encoding for predictable processing load
|
||||
// Impact: Limits bitrate variations for more predictable embedded performance.
|
||||
// Default 1 (constrained) ensures predictable processing in embedded environment.
|
||||
// CGO Audio Constants
|
||||
CGOOpusBitrate: 96000,
|
||||
CGOOpusComplexity: 3,
|
||||
CGOOpusVBR: 1,
|
||||
CGOOpusVBRConstraint: 1,
|
||||
CGOOpusSignalType: 3, // OPUS_SIGNAL_MUSIC
|
||||
CGOOpusBandwidth: 1105, // OPUS_BANDWIDTH_FULLBAND
|
||||
CGOOpusDTX: 0,
|
||||
CGOSampleRate: 48000,
|
||||
CGOChannels: 2,
|
||||
CGOFrameSize: 960,
|
||||
CGOMaxPacketSize: 1500,
|
||||
|
||||
// CGOOpusSignalType defines signal type for CGO Opus operations.
|
||||
// Used in: CGO audio encoding for content-optimized processing
|
||||
// Impact: Optimizes encoding for general audio content types.
|
||||
// Default 3 (OPUS_SIGNAL_MUSIC) handles system sounds, music, and mixed audio.
|
||||
CGOOpusSignalType: 3, // OPUS_SIGNAL_MUSIC
|
||||
|
||||
// CGOOpusBandwidth defines bandwidth for CGO Opus operations.
|
||||
// Used in: CGO audio encoding for frequency range control
|
||||
// Impact: Enables full audio spectrum reproduction up to 20kHz.
|
||||
// Default 1105 (OPUS_BANDWIDTH_FULLBAND) provides complete spectrum coverage.
|
||||
CGOOpusBandwidth: 1105, // OPUS_BANDWIDTH_FULLBAND
|
||||
|
||||
// CGOOpusDTX controls discontinuous transmission for CGO operations.
|
||||
// Used in: CGO audio encoding for silence detection control
|
||||
// Impact: Prevents silence detection interference with system audio monitoring.
|
||||
// Default 0 (disabled) ensures consistent audio stream.
|
||||
CGOOpusDTX: 0,
|
||||
|
||||
// CGOSampleRate defines sample rate for CGO audio operations.
|
||||
// Used in: CGO audio processing for format consistency
|
||||
// Impact: Matches main audio parameters for pipeline consistency.
|
||||
// Default 48kHz provides professional audio quality and consistency.
|
||||
CGOSampleRate: 48000,
|
||||
|
||||
// CGOChannels defines channel count for CGO audio operations.
|
||||
// Used in: CGO audio processing for spatial audio handling
|
||||
// Impact: Maintains spatial audio information throughout CGO pipeline.
|
||||
// Default 2 (stereo) preserves spatial information in CGO processing.
|
||||
CGOChannels: 2,
|
||||
|
||||
// CGOFrameSize defines frame size for CGO audio operations.
|
||||
// Used in: CGO audio processing for timing consistency
|
||||
// Impact: Matches main frame size for consistent timing and efficiency.
|
||||
// Default 960 samples (20ms at 48kHz) ensures consistent processing timing.
|
||||
CGOFrameSize: 960,
|
||||
|
||||
// CGOMaxPacketSize defines maximum packet size for CGO operations.
|
||||
// Used in: CGO audio transmission and buffer allocation
|
||||
// Impact: Accommodates Ethernet MTU while providing sufficient packet space.
|
||||
// Default 1500 bytes fits Ethernet MTU constraints with compressed audio.
|
||||
CGOMaxPacketSize: 1500,
|
||||
|
||||
// Input IPC Constants - Configuration for microphone input IPC
|
||||
// Used in: Microphone input processing and IPC communication
|
||||
// Impact: Controls quality and compatibility for input audio processing
|
||||
|
||||
// Input IPC Constants
|
||||
// InputIPCSampleRate defines sample rate for input IPC operations.
|
||||
// Used in: Microphone input capture and processing
|
||||
// Impact: Ensures high-quality input matching system audio output.
|
||||
|
@ -2607,6 +2428,26 @@ func DefaultAudioConfig() *AudioConfigConstants {
|
|||
MaxSampleRate: 48000, // 48kHz maximum sample rate
|
||||
MaxChannels: 8, // 8 maximum audio channels
|
||||
|
||||
// CGO Constants
|
||||
CGOMaxBackoffMicroseconds: 500000, // 500ms maximum backoff in microseconds
|
||||
CGOMaxAttempts: 5, // 5 maximum retry attempts
|
||||
|
||||
// Validation Frame Size Limits
|
||||
MinFrameDuration: 10 * time.Millisecond, // 10ms minimum frame duration
|
||||
MaxFrameDuration: 100 * time.Millisecond, // 100ms maximum frame duration
|
||||
|
||||
// Valid Sample Rates
|
||||
ValidSampleRates: []int{8000, 12000, 16000, 22050, 24000, 44100, 48000}, // Supported sample rates
|
||||
|
||||
// Opus Bitrate Validation Constants
|
||||
MinOpusBitrate: 6000, // 6000 bps minimum Opus bitrate
|
||||
MaxOpusBitrate: 510000, // 510000 bps maximum Opus bitrate
|
||||
|
||||
// Validation Configuration
|
||||
MaxValidationTime: 5 * time.Second, // 5s maximum validation timeout
|
||||
MinFrameSize: 1, // 1 byte minimum frame size (allow small frames)
|
||||
FrameSizeTolerance: 512, // 512 bytes frame size tolerance
|
||||
|
||||
// Device Health Monitoring Configuration
|
||||
HealthCheckIntervalMS: 5000, // 5000ms (5s) health check interval
|
||||
HealthRecoveryThreshold: 3, // 3 consecutive successes for recovery
|
||||
|
@ -2630,7 +2471,17 @@ var audioConfigInstance = DefaultAudioConfig()
|
|||
|
||||
// UpdateConfig allows runtime configuration updates
|
||||
func UpdateConfig(newConfig *AudioConfigConstants) {
|
||||
// Validate the new configuration before applying it
|
||||
if err := ValidateAudioConfigConstants(newConfig); err != nil {
|
||||
// Log validation error and keep current configuration
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "AudioConfig").Logger()
|
||||
logger.Error().Err(err).Msg("Configuration validation failed, keeping current configuration")
|
||||
return
|
||||
}
|
||||
|
||||
audioConfigInstance = newConfig
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "AudioConfig").Logger()
|
||||
logger.Info().Msg("Audio configuration updated successfully")
|
||||
}
|
||||
|
||||
// GetConfig returns the current configuration
|
||||
|
|
|
@ -130,7 +130,7 @@ func (dhm *DeviceHealthMonitor) Start() error {
|
|||
return fmt.Errorf("device health monitor already running")
|
||||
}
|
||||
|
||||
dhm.logger.Info().Msg("starting device health monitor")
|
||||
dhm.logger.Debug().Msg("device health monitor starting")
|
||||
atomic.StoreInt32(&dhm.monitoringEnabled, 1)
|
||||
|
||||
go dhm.monitoringLoop()
|
||||
|
@ -143,7 +143,7 @@ func (dhm *DeviceHealthMonitor) Stop() {
|
|||
return
|
||||
}
|
||||
|
||||
dhm.logger.Info().Msg("stopping device health monitor")
|
||||
dhm.logger.Debug().Msg("device health monitor stopping")
|
||||
atomic.StoreInt32(&dhm.monitoringEnabled, 0)
|
||||
|
||||
close(dhm.stopChan)
|
||||
|
@ -152,7 +152,7 @@ func (dhm *DeviceHealthMonitor) Stop() {
|
|||
// Wait for monitoring loop to finish
|
||||
select {
|
||||
case <-dhm.doneChan:
|
||||
dhm.logger.Info().Msg("device health monitor stopped")
|
||||
dhm.logger.Debug().Msg("device health monitor stopped")
|
||||
case <-time.After(time.Duration(dhm.config.SupervisorTimeout)):
|
||||
dhm.logger.Warn().Msg("device health monitor stop timeout")
|
||||
}
|
||||
|
@ -163,7 +163,7 @@ func (dhm *DeviceHealthMonitor) RegisterRecoveryCallback(component string, callb
|
|||
dhm.callbackMutex.Lock()
|
||||
defer dhm.callbackMutex.Unlock()
|
||||
dhm.recoveryCallbacks[component] = callback
|
||||
dhm.logger.Info().Str("component", component).Msg("registered recovery callback")
|
||||
dhm.logger.Debug().Str("component", component).Msg("registered recovery callback")
|
||||
}
|
||||
|
||||
// RecordError records an error for health tracking
|
||||
|
|
|
@ -144,7 +144,7 @@ func (aeb *AudioEventBroadcaster) Subscribe(connectionID string, conn *websocket
|
|||
logger: logger,
|
||||
}
|
||||
|
||||
aeb.logger.Info().Str("connectionID", connectionID).Msg("audio events subscription added")
|
||||
aeb.logger.Debug().Str("connectionID", connectionID).Msg("audio events subscription added")
|
||||
|
||||
// Send initial state to new subscriber
|
||||
go aeb.sendInitialState(connectionID)
|
||||
|
@ -156,7 +156,7 @@ func (aeb *AudioEventBroadcaster) Unsubscribe(connectionID string) {
|
|||
defer aeb.mutex.Unlock()
|
||||
|
||||
delete(aeb.subscribers, connectionID)
|
||||
aeb.logger.Info().Str("connectionID", connectionID).Msg("audio events subscription removed")
|
||||
aeb.logger.Debug().Str("connectionID", connectionID).Msg("audio events subscription removed")
|
||||
}
|
||||
|
||||
// BroadcastAudioMuteChanged broadcasts audio mute state changes
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
@ -10,24 +9,6 @@ import (
|
|||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// LatencyHistogram tracks latency distribution with percentile calculations
|
||||
type LatencyHistogram struct {
|
||||
// Atomic fields MUST be first for ARM32 alignment
|
||||
sampleCount int64 // Total number of samples (atomic)
|
||||
totalLatency int64 // Sum of all latencies in nanoseconds (atomic)
|
||||
|
||||
// Latency buckets for histogram (in nanoseconds)
|
||||
buckets []int64 // Bucket boundaries
|
||||
counts []int64 // Count for each bucket (atomic)
|
||||
|
||||
// Recent samples for percentile calculation
|
||||
recentSamples []time.Duration
|
||||
samplesMutex sync.RWMutex
|
||||
maxSamples int
|
||||
|
||||
logger zerolog.Logger
|
||||
}
|
||||
|
||||
// LatencyPercentiles holds calculated percentile values
|
||||
type LatencyPercentiles struct {
|
||||
P50 time.Duration `json:"p50"`
|
||||
|
@ -59,11 +40,6 @@ type BufferPoolEfficiencyMetrics struct {
|
|||
|
||||
// GranularMetricsCollector aggregates all granular metrics
|
||||
type GranularMetricsCollector struct {
|
||||
// Latency histograms by source
|
||||
inputLatencyHist *LatencyHistogram
|
||||
outputLatencyHist *LatencyHistogram
|
||||
processingLatencyHist *LatencyHistogram
|
||||
|
||||
// Buffer pool efficiency tracking
|
||||
framePoolMetrics *BufferPoolEfficiencyTracker
|
||||
controlPoolMetrics *BufferPoolEfficiencyTracker
|
||||
|
@ -91,92 +67,6 @@ type BufferPoolEfficiencyTracker struct {
|
|||
logger zerolog.Logger
|
||||
}
|
||||
|
||||
// NewLatencyHistogram creates a new latency histogram with predefined buckets
|
||||
func NewLatencyHistogram(maxSamples int, logger zerolog.Logger) *LatencyHistogram {
|
||||
// Define latency buckets using configuration constants
|
||||
buckets := []int64{
|
||||
int64(1 * time.Millisecond),
|
||||
int64(5 * time.Millisecond),
|
||||
int64(GetConfig().LatencyBucket10ms),
|
||||
int64(GetConfig().LatencyBucket25ms),
|
||||
int64(GetConfig().LatencyBucket50ms),
|
||||
int64(GetConfig().LatencyBucket100ms),
|
||||
int64(GetConfig().LatencyBucket250ms),
|
||||
int64(GetConfig().LatencyBucket500ms),
|
||||
int64(GetConfig().LatencyBucket1s),
|
||||
int64(GetConfig().LatencyBucket2s),
|
||||
}
|
||||
|
||||
return &LatencyHistogram{
|
||||
buckets: buckets,
|
||||
counts: make([]int64, len(buckets)+1), // +1 for overflow bucket
|
||||
recentSamples: make([]time.Duration, 0, maxSamples),
|
||||
maxSamples: maxSamples,
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// RecordLatency adds a latency measurement to the histogram
|
||||
func (lh *LatencyHistogram) RecordLatency(latency time.Duration) {
|
||||
latencyNs := latency.Nanoseconds()
|
||||
atomic.AddInt64(&lh.sampleCount, 1)
|
||||
atomic.AddInt64(&lh.totalLatency, latencyNs)
|
||||
|
||||
// Find appropriate bucket
|
||||
bucketIndex := len(lh.buckets) // Default to overflow bucket
|
||||
for i, boundary := range lh.buckets {
|
||||
if latencyNs <= boundary {
|
||||
bucketIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
atomic.AddInt64(&lh.counts[bucketIndex], 1)
|
||||
|
||||
// Store recent sample for percentile calculation
|
||||
lh.samplesMutex.Lock()
|
||||
if len(lh.recentSamples) >= lh.maxSamples {
|
||||
// Remove oldest sample
|
||||
lh.recentSamples = lh.recentSamples[1:]
|
||||
}
|
||||
lh.recentSamples = append(lh.recentSamples, latency)
|
||||
lh.samplesMutex.Unlock()
|
||||
}
|
||||
|
||||
// GetPercentiles calculates latency percentiles from recent samples
|
||||
func (lh *LatencyHistogram) GetPercentiles() LatencyPercentiles {
|
||||
lh.samplesMutex.RLock()
|
||||
samples := make([]time.Duration, len(lh.recentSamples))
|
||||
copy(samples, lh.recentSamples)
|
||||
lh.samplesMutex.RUnlock()
|
||||
|
||||
if len(samples) == 0 {
|
||||
return LatencyPercentiles{}
|
||||
}
|
||||
|
||||
// Sort samples for percentile calculation
|
||||
sort.Slice(samples, func(i, j int) bool {
|
||||
return samples[i] < samples[j]
|
||||
})
|
||||
|
||||
n := len(samples)
|
||||
totalLatency := atomic.LoadInt64(&lh.totalLatency)
|
||||
sampleCount := atomic.LoadInt64(&lh.sampleCount)
|
||||
|
||||
var avg time.Duration
|
||||
if sampleCount > 0 {
|
||||
avg = time.Duration(totalLatency / sampleCount)
|
||||
}
|
||||
|
||||
return LatencyPercentiles{
|
||||
P50: samples[n*50/100],
|
||||
P95: samples[n*95/100],
|
||||
P99: samples[n*99/100],
|
||||
Min: samples[0],
|
||||
Max: samples[n-1],
|
||||
Avg: avg,
|
||||
}
|
||||
}
|
||||
|
||||
// NewBufferPoolEfficiencyTracker creates a new efficiency tracker
|
||||
func NewBufferPoolEfficiencyTracker(poolName string, logger zerolog.Logger) *BufferPoolEfficiencyTracker {
|
||||
return &BufferPoolEfficiencyTracker{
|
||||
|
@ -274,34 +164,14 @@ func (bpet *BufferPoolEfficiencyTracker) GetEfficiencyMetrics() BufferPoolEffici
|
|||
|
||||
// NewGranularMetricsCollector creates a new granular metrics collector
|
||||
func NewGranularMetricsCollector(logger zerolog.Logger) *GranularMetricsCollector {
|
||||
maxSamples := GetConfig().LatencyHistorySize
|
||||
|
||||
return &GranularMetricsCollector{
|
||||
inputLatencyHist: NewLatencyHistogram(maxSamples, logger.With().Str("histogram", "input").Logger()),
|
||||
outputLatencyHist: NewLatencyHistogram(maxSamples, logger.With().Str("histogram", "output").Logger()),
|
||||
processingLatencyHist: NewLatencyHistogram(maxSamples, logger.With().Str("histogram", "processing").Logger()),
|
||||
framePoolMetrics: NewBufferPoolEfficiencyTracker("frame_pool", logger.With().Str("pool", "frame").Logger()),
|
||||
controlPoolMetrics: NewBufferPoolEfficiencyTracker("control_pool", logger.With().Str("pool", "control").Logger()),
|
||||
zeroCopyMetrics: NewBufferPoolEfficiencyTracker("zero_copy_pool", logger.With().Str("pool", "zero_copy").Logger()),
|
||||
logger: logger,
|
||||
framePoolMetrics: NewBufferPoolEfficiencyTracker("frame_pool", logger.With().Str("pool", "frame").Logger()),
|
||||
controlPoolMetrics: NewBufferPoolEfficiencyTracker("control_pool", logger.With().Str("pool", "control").Logger()),
|
||||
zeroCopyMetrics: NewBufferPoolEfficiencyTracker("zero_copy_pool", logger.With().Str("pool", "zero_copy").Logger()),
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// RecordInputLatency records latency for input operations
|
||||
func (gmc *GranularMetricsCollector) RecordInputLatency(latency time.Duration) {
|
||||
gmc.inputLatencyHist.RecordLatency(latency)
|
||||
}
|
||||
|
||||
// RecordOutputLatency records latency for output operations
|
||||
func (gmc *GranularMetricsCollector) RecordOutputLatency(latency time.Duration) {
|
||||
gmc.outputLatencyHist.RecordLatency(latency)
|
||||
}
|
||||
|
||||
// RecordProcessingLatency records latency for processing operations
|
||||
func (gmc *GranularMetricsCollector) RecordProcessingLatency(latency time.Duration) {
|
||||
gmc.processingLatencyHist.RecordLatency(latency)
|
||||
}
|
||||
|
||||
// RecordFramePoolOperation records frame pool operations
|
||||
func (gmc *GranularMetricsCollector) RecordFramePoolGet(latency time.Duration, wasHit bool) {
|
||||
gmc.framePoolMetrics.RecordGetOperation(latency, wasHit)
|
||||
|
@ -329,18 +199,6 @@ func (gmc *GranularMetricsCollector) RecordZeroCopyPut(latency time.Duration, bu
|
|||
gmc.zeroCopyMetrics.RecordPutOperation(latency, bufferSize)
|
||||
}
|
||||
|
||||
// GetLatencyPercentiles returns percentiles for all latency types
|
||||
func (gmc *GranularMetricsCollector) GetLatencyPercentiles() map[string]LatencyPercentiles {
|
||||
gmc.mutex.RLock()
|
||||
defer gmc.mutex.RUnlock()
|
||||
|
||||
return map[string]LatencyPercentiles{
|
||||
"input": gmc.inputLatencyHist.GetPercentiles(),
|
||||
"output": gmc.outputLatencyHist.GetPercentiles(),
|
||||
"processing": gmc.processingLatencyHist.GetPercentiles(),
|
||||
}
|
||||
}
|
||||
|
||||
// GetBufferPoolEfficiency returns efficiency metrics for all buffer pools
|
||||
func (gmc *GranularMetricsCollector) GetBufferPoolEfficiency() map[string]BufferPoolEfficiencyMetrics {
|
||||
gmc.mutex.RLock()
|
||||
|
@ -355,22 +213,8 @@ func (gmc *GranularMetricsCollector) GetBufferPoolEfficiency() map[string]Buffer
|
|||
|
||||
// LogGranularMetrics logs comprehensive granular metrics
|
||||
func (gmc *GranularMetricsCollector) LogGranularMetrics() {
|
||||
latencyPercentiles := gmc.GetLatencyPercentiles()
|
||||
bufferEfficiency := gmc.GetBufferPoolEfficiency()
|
||||
|
||||
// Log latency percentiles
|
||||
for source, percentiles := range latencyPercentiles {
|
||||
gmc.logger.Info().
|
||||
Str("source", source).
|
||||
Dur("p50", percentiles.P50).
|
||||
Dur("p95", percentiles.P95).
|
||||
Dur("p99", percentiles.P99).
|
||||
Dur("min", percentiles.Min).
|
||||
Dur("max", percentiles.Max).
|
||||
Dur("avg", percentiles.Avg).
|
||||
Msg("Latency percentiles")
|
||||
}
|
||||
|
||||
// Log buffer pool efficiency
|
||||
for poolName, efficiency := range bufferEfficiency {
|
||||
gmc.logger.Info().
|
||||
|
|
|
@ -0,0 +1,100 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestGranularMetricsCollector tests the GranularMetricsCollector functionality
|
||||
func TestGranularMetricsCollector(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
testFunc func(t *testing.T)
|
||||
}{
|
||||
{"GetGranularMetricsCollector", testGetGranularMetricsCollector},
|
||||
{"ConcurrentCollectorAccess", testConcurrentCollectorAccess},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.testFunc(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// testGetGranularMetricsCollector tests singleton behavior
|
||||
func testGetGranularMetricsCollector(t *testing.T) {
|
||||
collector1 := GetGranularMetricsCollector()
|
||||
collector2 := GetGranularMetricsCollector()
|
||||
|
||||
require.NotNil(t, collector1)
|
||||
require.NotNil(t, collector2)
|
||||
assert.Same(t, collector1, collector2, "Should return the same singleton instance")
|
||||
}
|
||||
|
||||
// testConcurrentCollectorAccess tests thread safety of the collector
|
||||
func testConcurrentCollectorAccess(t *testing.T) {
|
||||
collector := GetGranularMetricsCollector()
|
||||
require.NotNil(t, collector)
|
||||
|
||||
const numGoroutines = 10
|
||||
const operationsPerGoroutine = 50
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(numGoroutines)
|
||||
|
||||
// Concurrent buffer pool operations
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func(id int) {
|
||||
defer wg.Done()
|
||||
for j := 0; j < operationsPerGoroutine; j++ {
|
||||
// Test buffer pool operations
|
||||
latency := time.Duration(id*operationsPerGoroutine+j) * time.Microsecond
|
||||
collector.RecordFramePoolGet(latency, true)
|
||||
collector.RecordFramePoolPut(latency, 1024)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// Verify collector is still functional
|
||||
efficiency := collector.GetBufferPoolEfficiency()
|
||||
assert.NotNil(t, efficiency)
|
||||
}
|
||||
|
||||
func BenchmarkGranularMetricsCollector(b *testing.B) {
|
||||
collector := GetGranularMetricsCollector()
|
||||
|
||||
b.Run("RecordFramePoolGet", func(b *testing.B) {
|
||||
latency := 5 * time.Millisecond
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
collector.RecordFramePoolGet(latency, true)
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("RecordFramePoolPut", func(b *testing.B) {
|
||||
latency := 5 * time.Millisecond
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
collector.RecordFramePoolPut(latency, 1024)
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("GetBufferPoolEfficiency", func(b *testing.B) {
|
||||
// Pre-populate with some data
|
||||
for i := 0; i < 100; i++ {
|
||||
collector.RecordFramePoolGet(time.Duration(i)*time.Microsecond, true)
|
||||
collector.RecordFramePoolPut(time.Duration(i)*time.Microsecond, 1024)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = collector.GetBufferPoolEfficiency()
|
||||
}
|
||||
})
|
||||
}
|
|
@ -6,79 +6,75 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/jetkvm/kvm/internal/logging"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// AudioInputMetrics holds metrics for microphone input
|
||||
// Atomic fields MUST be first for ARM32 alignment (int64 fields need 8-byte alignment)
|
||||
type AudioInputMetrics struct {
|
||||
FramesSent int64 // Total frames sent
|
||||
FramesDropped int64 // Total frames dropped
|
||||
BytesProcessed int64 // Total bytes processed
|
||||
ConnectionDrops int64 // Connection drops
|
||||
AverageLatency time.Duration // time.Duration is int64
|
||||
LastFrameTime time.Time
|
||||
// Atomic int64 field first for proper ARM32 alignment
|
||||
FramesSent int64 `json:"frames_sent"` // Total frames sent (input-specific)
|
||||
|
||||
// Embedded struct with atomic fields properly aligned
|
||||
BaseAudioMetrics
|
||||
}
|
||||
|
||||
// AudioInputManager manages microphone input stream using IPC mode only
|
||||
type AudioInputManager struct {
|
||||
metrics AudioInputMetrics
|
||||
|
||||
*BaseAudioManager
|
||||
ipcManager *AudioInputIPCManager
|
||||
logger zerolog.Logger
|
||||
running int32
|
||||
framesSent int64 // Input-specific metric
|
||||
}
|
||||
|
||||
// NewAudioInputManager creates a new audio input manager (IPC mode only)
|
||||
// NewAudioInputManager creates a new audio input manager
|
||||
func NewAudioInputManager() *AudioInputManager {
|
||||
logger := logging.GetDefaultLogger().With().Str("component", AudioInputManagerComponent).Logger()
|
||||
return &AudioInputManager{
|
||||
ipcManager: NewAudioInputIPCManager(),
|
||||
logger: logging.GetDefaultLogger().With().Str("component", AudioInputManagerComponent).Logger(),
|
||||
BaseAudioManager: NewBaseAudioManager(logger),
|
||||
ipcManager: NewAudioInputIPCManager(),
|
||||
}
|
||||
}
|
||||
|
||||
// Start begins processing microphone input
|
||||
func (aim *AudioInputManager) Start() error {
|
||||
if !atomic.CompareAndSwapInt32(&aim.running, 0, 1) {
|
||||
if !aim.setRunning(true) {
|
||||
return fmt.Errorf("audio input manager is already running")
|
||||
}
|
||||
|
||||
aim.logger.Info().Str("component", AudioInputManagerComponent).Msg("starting component")
|
||||
aim.logComponentStart(AudioInputManagerComponent)
|
||||
|
||||
// Start the IPC-based audio input
|
||||
err := aim.ipcManager.Start()
|
||||
if err != nil {
|
||||
aim.logger.Error().Err(err).Str("component", AudioInputManagerComponent).Msg("failed to start component")
|
||||
aim.logComponentError(AudioInputManagerComponent, err, "failed to start component")
|
||||
// Ensure proper cleanup on error
|
||||
atomic.StoreInt32(&aim.running, 0)
|
||||
aim.setRunning(false)
|
||||
// Reset metrics on failed start
|
||||
aim.resetMetrics()
|
||||
return err
|
||||
}
|
||||
|
||||
aim.logger.Info().Str("component", AudioInputManagerComponent).Msg("component started successfully")
|
||||
aim.logComponentStarted(AudioInputManagerComponent)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop stops processing microphone input
|
||||
func (aim *AudioInputManager) Stop() {
|
||||
if !atomic.CompareAndSwapInt32(&aim.running, 1, 0) {
|
||||
if !aim.setRunning(false) {
|
||||
return // Already stopped
|
||||
}
|
||||
|
||||
aim.logger.Info().Str("component", AudioInputManagerComponent).Msg("stopping component")
|
||||
aim.logComponentStop(AudioInputManagerComponent)
|
||||
|
||||
// Stop the IPC-based audio input
|
||||
aim.ipcManager.Stop()
|
||||
|
||||
aim.logger.Info().Str("component", AudioInputManagerComponent).Msg("component stopped")
|
||||
aim.logComponentStopped(AudioInputManagerComponent)
|
||||
}
|
||||
|
||||
// resetMetrics resets all metrics to zero
|
||||
func (aim *AudioInputManager) resetMetrics() {
|
||||
atomic.StoreInt64(&aim.metrics.FramesSent, 0)
|
||||
atomic.StoreInt64(&aim.metrics.FramesDropped, 0)
|
||||
atomic.StoreInt64(&aim.metrics.BytesProcessed, 0)
|
||||
atomic.StoreInt64(&aim.metrics.ConnectionDrops, 0)
|
||||
aim.BaseAudioManager.resetMetrics()
|
||||
atomic.StoreInt64(&aim.framesSent, 0)
|
||||
}
|
||||
|
||||
// WriteOpusFrame writes an Opus frame to the audio input system with latency tracking
|
||||
|
@ -87,6 +83,12 @@ func (aim *AudioInputManager) WriteOpusFrame(frame []byte) error {
|
|||
return nil // Not running, silently drop
|
||||
}
|
||||
|
||||
// Use ultra-fast validation for critical audio path
|
||||
if err := ValidateAudioFrame(frame); err != nil {
|
||||
aim.logComponentError(AudioInputManagerComponent, err, "Frame validation failed")
|
||||
return fmt.Errorf("input frame validation failed: %w", err)
|
||||
}
|
||||
|
||||
// Track end-to-end latency from WebRTC to IPC
|
||||
startTime := time.Now()
|
||||
err := aim.ipcManager.WriteOpusFrame(frame)
|
||||
|
@ -105,10 +107,10 @@ func (aim *AudioInputManager) WriteOpusFrame(frame []byte) error {
|
|||
}
|
||||
|
||||
// Update metrics
|
||||
atomic.AddInt64(&aim.metrics.FramesSent, 1)
|
||||
atomic.AddInt64(&aim.metrics.BytesProcessed, int64(len(frame)))
|
||||
aim.metrics.LastFrameTime = time.Now()
|
||||
aim.metrics.AverageLatency = processingTime
|
||||
atomic.AddInt64(&aim.framesSent, 1)
|
||||
aim.recordFrameProcessed(len(frame))
|
||||
aim.updateLatency(processingTime)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -141,21 +143,18 @@ func (aim *AudioInputManager) WriteOpusFrameZeroCopy(frame *ZeroCopyAudioFrame)
|
|||
}
|
||||
|
||||
// Update metrics
|
||||
atomic.AddInt64(&aim.metrics.FramesSent, 1)
|
||||
atomic.AddInt64(&aim.metrics.BytesProcessed, int64(frame.Length()))
|
||||
aim.metrics.LastFrameTime = time.Now()
|
||||
aim.metrics.AverageLatency = processingTime
|
||||
atomic.AddInt64(&aim.framesSent, 1)
|
||||
aim.recordFrameProcessed(frame.Length())
|
||||
aim.updateLatency(processingTime)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetMetrics returns current audio input metrics
|
||||
// GetMetrics returns current metrics
|
||||
func (aim *AudioInputManager) GetMetrics() AudioInputMetrics {
|
||||
return AudioInputMetrics{
|
||||
FramesSent: atomic.LoadInt64(&aim.metrics.FramesSent),
|
||||
FramesDropped: atomic.LoadInt64(&aim.metrics.FramesDropped),
|
||||
BytesProcessed: atomic.LoadInt64(&aim.metrics.BytesProcessed),
|
||||
AverageLatency: aim.metrics.AverageLatency,
|
||||
LastFrameTime: aim.metrics.LastFrameTime,
|
||||
FramesSent: atomic.LoadInt64(&aim.framesSent),
|
||||
BaseAudioMetrics: aim.getBaseMetrics(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -209,10 +208,7 @@ func (aim *AudioInputManager) LogPerformanceStats() {
|
|||
Msg("Audio input performance metrics")
|
||||
}
|
||||
|
||||
// IsRunning returns whether the audio input manager is running
|
||||
func (aim *AudioInputManager) IsRunning() bool {
|
||||
return atomic.LoadInt32(&aim.running) == 1
|
||||
}
|
||||
// Note: IsRunning() is inherited from BaseAudioManager
|
||||
|
||||
// IsReady returns whether the audio input manager is ready to receive frames
|
||||
// This checks both that it's running and that the IPC connection is established
|
||||
|
|
|
@ -135,6 +135,9 @@ func (mp *MessagePool) Get() *OptimizedIPCMessage {
|
|||
mp.preallocated = mp.preallocated[:len(mp.preallocated)-1]
|
||||
mp.mutex.Unlock()
|
||||
atomic.AddInt64(&mp.hitCount, 1)
|
||||
// Reset message for reuse
|
||||
msg.data = msg.data[:0]
|
||||
msg.msg = InputIPCMessage{}
|
||||
return msg
|
||||
}
|
||||
mp.mutex.Unlock()
|
||||
|
@ -143,9 +146,16 @@ func (mp *MessagePool) Get() *OptimizedIPCMessage {
|
|||
select {
|
||||
case msg := <-mp.pool:
|
||||
atomic.AddInt64(&mp.hitCount, 1)
|
||||
// Reset message for reuse and ensure proper capacity
|
||||
msg.data = msg.data[:0]
|
||||
msg.msg = InputIPCMessage{}
|
||||
// Ensure data buffer has sufficient capacity
|
||||
if cap(msg.data) < maxFrameSize {
|
||||
msg.data = make([]byte, 0, maxFrameSize)
|
||||
}
|
||||
return msg
|
||||
default:
|
||||
// Pool exhausted, create new message
|
||||
// Pool exhausted, create new message with exact capacity
|
||||
atomic.AddInt64(&mp.missCount, 1)
|
||||
return &OptimizedIPCMessage{
|
||||
data: make([]byte, 0, maxFrameSize),
|
||||
|
@ -155,6 +165,15 @@ func (mp *MessagePool) Get() *OptimizedIPCMessage {
|
|||
|
||||
// Put returns a message to the pool
|
||||
func (mp *MessagePool) Put(msg *OptimizedIPCMessage) {
|
||||
if msg == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Validate buffer capacity - reject if too small or too large
|
||||
if cap(msg.data) < maxFrameSize/2 || cap(msg.data) > maxFrameSize*2 {
|
||||
return // Let GC handle oversized or undersized buffers
|
||||
}
|
||||
|
||||
// Reset the message for reuse
|
||||
msg.data = msg.data[:0]
|
||||
msg.msg = InputIPCMessage{}
|
||||
|
@ -301,8 +320,8 @@ func (ais *AudioInputServer) acceptConnections() {
|
|||
if err != nil {
|
||||
if ais.running {
|
||||
// Log error and continue accepting
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "audio-input-server").Logger()
|
||||
logger.Warn().Err(err).Msg("Failed to accept connection, retrying")
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "audio-input").Logger()
|
||||
logger.Warn().Err(err).Msg("failed to accept connection, retrying")
|
||||
continue
|
||||
}
|
||||
return
|
||||
|
@ -311,8 +330,8 @@ func (ais *AudioInputServer) acceptConnections() {
|
|||
// Configure socket buffers for optimal performance
|
||||
if err := ConfigureSocketBuffers(conn, ais.socketBufferConfig); err != nil {
|
||||
// Log warning but don't fail - socket buffer optimization is not critical
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "audio-input-server").Logger()
|
||||
logger.Warn().Err(err).Msg("Failed to configure socket buffers, continuing with defaults")
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "audio-input").Logger()
|
||||
logger.Warn().Err(err).Msg("failed to configure socket buffers, using defaults")
|
||||
} else {
|
||||
// Record socket buffer metrics for monitoring
|
||||
RecordSocketBufferMetrics(conn, "audio-input")
|
||||
|
@ -458,6 +477,13 @@ func (ais *AudioInputServer) processOpusFrame(data []byte) error {
|
|||
return nil // Empty frame, ignore
|
||||
}
|
||||
|
||||
// Use ultra-fast validation for critical audio path
|
||||
if err := ValidateAudioFrame(data); err != nil {
|
||||
logger := logging.GetDefaultLogger().With().Str("component", AudioInputServerComponent).Logger()
|
||||
logger.Error().Err(err).Msg("Frame validation failed")
|
||||
return fmt.Errorf("input frame validation failed: %w", err)
|
||||
}
|
||||
|
||||
// Process the Opus frame using CGO
|
||||
_, err := CGOAudioDecodeWrite(data)
|
||||
return err
|
||||
|
@ -465,6 +491,18 @@ func (ais *AudioInputServer) processOpusFrame(data []byte) error {
|
|||
|
||||
// processConfig processes a configuration update
|
||||
func (ais *AudioInputServer) processConfig(data []byte) error {
|
||||
// Validate configuration data
|
||||
if len(data) == 0 {
|
||||
return fmt.Errorf("empty configuration data")
|
||||
}
|
||||
|
||||
// Basic validation for configuration size
|
||||
if err := ValidateBufferSize(len(data)); err != nil {
|
||||
logger := logging.GetDefaultLogger().With().Str("component", AudioInputServerComponent).Logger()
|
||||
logger.Error().Err(err).Msg("Configuration buffer validation failed")
|
||||
return fmt.Errorf("configuration validation failed: %w", err)
|
||||
}
|
||||
|
||||
// Acknowledge configuration receipt
|
||||
return ais.sendAck()
|
||||
}
|
||||
|
@ -596,6 +634,13 @@ func (aic *AudioInputClient) SendFrame(frame []byte) error {
|
|||
return nil // Empty frame, ignore
|
||||
}
|
||||
|
||||
// Validate frame data before sending
|
||||
if err := ValidateAudioFrame(frame); err != nil {
|
||||
logger := logging.GetDefaultLogger().With().Str("component", AudioInputClientComponent).Logger()
|
||||
logger.Error().Err(err).Msg("Frame validation failed")
|
||||
return fmt.Errorf("input frame validation failed: %w", err)
|
||||
}
|
||||
|
||||
if len(frame) > maxFrameSize {
|
||||
return fmt.Errorf("frame too large: got %d bytes, maximum allowed %d bytes", len(frame), maxFrameSize)
|
||||
}
|
||||
|
@ -624,6 +669,13 @@ func (aic *AudioInputClient) SendFrameZeroCopy(frame *ZeroCopyAudioFrame) error
|
|||
return nil // Empty frame, ignore
|
||||
}
|
||||
|
||||
// Validate zero-copy frame before sending
|
||||
if err := ValidateZeroCopyFrame(frame); err != nil {
|
||||
logger := logging.GetDefaultLogger().With().Str("component", AudioInputClientComponent).Logger()
|
||||
logger.Error().Err(err).Msg("Zero-copy frame validation failed")
|
||||
return fmt.Errorf("input frame validation failed: %w", err)
|
||||
}
|
||||
|
||||
if frame.Length() > maxFrameSize {
|
||||
return fmt.Errorf("frame too large: got %d bytes, maximum allowed %d bytes", frame.Length(), maxFrameSize)
|
||||
}
|
||||
|
@ -649,6 +701,13 @@ func (aic *AudioInputClient) SendConfig(config InputIPCConfig) error {
|
|||
return fmt.Errorf("not connected to audio input server")
|
||||
}
|
||||
|
||||
// Validate configuration parameters
|
||||
if err := ValidateInputIPCConfig(config.SampleRate, config.Channels, config.FrameSize); err != nil {
|
||||
logger := logging.GetDefaultLogger().With().Str("component", AudioInputClientComponent).Logger()
|
||||
logger.Error().Err(err).Msg("Configuration validation failed")
|
||||
return fmt.Errorf("input configuration validation failed: %w", err)
|
||||
}
|
||||
|
||||
// Serialize config (simple binary format)
|
||||
data := make([]byte, 12) // 3 * int32
|
||||
binary.LittleEndian.PutUint32(data[0:4], uint32(config.SampleRate))
|
||||
|
@ -735,7 +794,7 @@ func (ais *AudioInputServer) startReaderGoroutine() {
|
|||
baseBackoffDelay := GetConfig().RetryDelay
|
||||
maxBackoffDelay := GetConfig().MaxRetryDelay
|
||||
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "audio-input-reader").Logger()
|
||||
logger := logging.GetDefaultLogger().With().Str("component", AudioInputClientComponent).Logger()
|
||||
|
||||
for {
|
||||
select {
|
||||
|
@ -820,7 +879,7 @@ func (ais *AudioInputServer) startProcessorGoroutine() {
|
|||
defer runtime.UnlockOSThread()
|
||||
|
||||
// Set high priority for audio processing
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "audio-input-processor").Logger()
|
||||
logger := logging.GetDefaultLogger().With().Str("component", AudioInputClientComponent).Logger()
|
||||
if err := SetAudioThreadPriority(); err != nil {
|
||||
logger.Warn().Err(err).Msg("Failed to set audio processing priority")
|
||||
}
|
||||
|
@ -937,7 +996,7 @@ func (ais *AudioInputServer) startMonitorGoroutine() {
|
|||
defer runtime.UnlockOSThread()
|
||||
|
||||
// Set I/O priority for monitoring
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "audio-input-monitor").Logger()
|
||||
logger := logging.GetDefaultLogger().With().Str("component", AudioInputClientComponent).Logger()
|
||||
if err := SetAudioIOThreadPriority(); err != nil {
|
||||
logger.Warn().Err(err).Msg("Failed to set audio I/O priority")
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@ func (aim *AudioInputIPCManager) Start() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
aim.logger.Info().Str("component", AudioInputIPCComponent).Msg("starting component")
|
||||
aim.logger.Debug().Str("component", AudioInputIPCComponent).Msg("starting component")
|
||||
|
||||
err := aim.supervisor.Start()
|
||||
if err != nil {
|
||||
|
@ -49,6 +49,17 @@ func (aim *AudioInputIPCManager) Start() error {
|
|||
FrameSize: GetConfig().InputIPCFrameSize,
|
||||
}
|
||||
|
||||
// Validate configuration before using it
|
||||
if err := ValidateInputIPCConfig(config.SampleRate, config.Channels, config.FrameSize); err != nil {
|
||||
aim.logger.Warn().Err(err).Msg("invalid input IPC config from constants, using defaults")
|
||||
// Use safe defaults if config validation fails
|
||||
config = InputIPCConfig{
|
||||
SampleRate: 48000,
|
||||
Channels: 2,
|
||||
FrameSize: 960,
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for subprocess readiness
|
||||
time.Sleep(GetConfig().LongSleepDuration)
|
||||
|
||||
|
@ -58,7 +69,7 @@ func (aim *AudioInputIPCManager) Start() error {
|
|||
aim.logger.Warn().Err(err).Str("component", AudioInputIPCComponent).Msg("failed to send initial config, will retry later")
|
||||
}
|
||||
|
||||
aim.logger.Info().Str("component", AudioInputIPCComponent).Msg("component started successfully")
|
||||
aim.logger.Debug().Str("component", AudioInputIPCComponent).Msg("component started successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -68,9 +79,9 @@ func (aim *AudioInputIPCManager) Stop() {
|
|||
return
|
||||
}
|
||||
|
||||
aim.logger.Info().Str("component", AudioInputIPCComponent).Msg("stopping component")
|
||||
aim.logger.Debug().Str("component", AudioInputIPCComponent).Msg("stopping component")
|
||||
aim.supervisor.Stop()
|
||||
aim.logger.Info().Str("component", AudioInputIPCComponent).Msg("component stopped")
|
||||
aim.logger.Debug().Str("component", AudioInputIPCComponent).Msg("component stopped")
|
||||
}
|
||||
|
||||
// resetMetrics resets all metrics to zero
|
||||
|
@ -91,6 +102,13 @@ func (aim *AudioInputIPCManager) WriteOpusFrame(frame []byte) error {
|
|||
return nil // Empty frame, ignore
|
||||
}
|
||||
|
||||
// Validate frame data
|
||||
if err := ValidateAudioFrame(frame); err != nil {
|
||||
atomic.AddInt64(&aim.metrics.FramesDropped, 1)
|
||||
aim.logger.Debug().Err(err).Msg("invalid frame data")
|
||||
return err
|
||||
}
|
||||
|
||||
// Start latency measurement
|
||||
startTime := time.Now()
|
||||
|
||||
|
@ -104,7 +122,7 @@ func (aim *AudioInputIPCManager) WriteOpusFrame(frame []byte) error {
|
|||
if err != nil {
|
||||
// Count as dropped frame
|
||||
atomic.AddInt64(&aim.metrics.FramesDropped, 1)
|
||||
aim.logger.Debug().Err(err).Msg("Failed to send frame via IPC")
|
||||
aim.logger.Debug().Err(err).Msg("failed to send frame via IPC")
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -125,6 +143,13 @@ func (aim *AudioInputIPCManager) WriteOpusFrameZeroCopy(frame *ZeroCopyAudioFram
|
|||
return nil // Empty frame, ignore
|
||||
}
|
||||
|
||||
// Validate zero-copy frame
|
||||
if err := ValidateZeroCopyFrame(frame); err != nil {
|
||||
atomic.AddInt64(&aim.metrics.FramesDropped, 1)
|
||||
aim.logger.Debug().Err(err).Msg("invalid zero-copy frame")
|
||||
return err
|
||||
}
|
||||
|
||||
// Start latency measurement
|
||||
startTime := time.Now()
|
||||
|
||||
|
@ -138,7 +163,7 @@ func (aim *AudioInputIPCManager) WriteOpusFrameZeroCopy(frame *ZeroCopyAudioFram
|
|||
if err != nil {
|
||||
// Count as dropped frame
|
||||
atomic.AddInt64(&aim.metrics.FramesDropped, 1)
|
||||
aim.logger.Debug().Err(err).Msg("Failed to send zero-copy frame via IPC")
|
||||
aim.logger.Debug().Err(err).Msg("failed to send zero-copy frame via IPC")
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -166,12 +191,15 @@ func (aim *AudioInputIPCManager) IsReady() bool {
|
|||
// GetMetrics returns current metrics
|
||||
func (aim *AudioInputIPCManager) GetMetrics() AudioInputMetrics {
|
||||
return AudioInputMetrics{
|
||||
FramesSent: atomic.LoadInt64(&aim.metrics.FramesSent),
|
||||
FramesDropped: atomic.LoadInt64(&aim.metrics.FramesDropped),
|
||||
BytesProcessed: atomic.LoadInt64(&aim.metrics.BytesProcessed),
|
||||
ConnectionDrops: atomic.LoadInt64(&aim.metrics.ConnectionDrops),
|
||||
AverageLatency: aim.metrics.AverageLatency,
|
||||
LastFrameTime: aim.metrics.LastFrameTime,
|
||||
FramesSent: atomic.LoadInt64(&aim.metrics.FramesSent),
|
||||
BaseAudioMetrics: BaseAudioMetrics{
|
||||
FramesProcessed: atomic.LoadInt64(&aim.metrics.FramesProcessed),
|
||||
FramesDropped: atomic.LoadInt64(&aim.metrics.FramesDropped),
|
||||
BytesProcessed: atomic.LoadInt64(&aim.metrics.BytesProcessed),
|
||||
ConnectionDrops: atomic.LoadInt64(&aim.metrics.ConnectionDrops),
|
||||
AverageLatency: aim.metrics.AverageLatency,
|
||||
LastFrameTime: aim.metrics.LastFrameTime,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -14,7 +14,10 @@ import (
|
|||
// This should be called from main() when the subprocess is detected
|
||||
func RunAudioInputServer() error {
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "audio-input-server").Logger()
|
||||
logger.Info().Msg("Starting audio input server subprocess")
|
||||
logger.Debug().Msg("audio input server subprocess starting")
|
||||
|
||||
// Initialize validation cache for optimal performance
|
||||
InitValidationCache()
|
||||
|
||||
// Start adaptive buffer management for optimal performance
|
||||
StartAdaptiveBuffering()
|
||||
|
@ -23,7 +26,7 @@ func RunAudioInputServer() error {
|
|||
// Initialize CGO audio system
|
||||
err := CGOAudioPlaybackInit()
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("Failed to initialize CGO audio playback")
|
||||
logger.Error().Err(err).Msg("failed to initialize CGO audio playback")
|
||||
return err
|
||||
}
|
||||
defer CGOAudioPlaybackClose()
|
||||
|
@ -31,18 +34,18 @@ func RunAudioInputServer() error {
|
|||
// Create and start the IPC server
|
||||
server, err := NewAudioInputServer()
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("Failed to create audio input server")
|
||||
logger.Error().Err(err).Msg("failed to create audio input server")
|
||||
return err
|
||||
}
|
||||
defer server.Close()
|
||||
|
||||
err = server.Start()
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("Failed to start audio input server")
|
||||
logger.Error().Err(err).Msg("failed to start audio input server")
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info().Msg("Audio input server started, waiting for connections")
|
||||
logger.Debug().Msg("audio input server started, waiting for connections")
|
||||
|
||||
// Set up signal handling for graceful shutdown
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
@ -54,18 +57,18 @@ func RunAudioInputServer() error {
|
|||
// Wait for shutdown signal
|
||||
select {
|
||||
case sig := <-sigChan:
|
||||
logger.Info().Str("signal", sig.String()).Msg("Received shutdown signal")
|
||||
logger.Info().Str("signal", sig.String()).Msg("received shutdown signal")
|
||||
case <-ctx.Done():
|
||||
logger.Info().Msg("Context cancelled")
|
||||
logger.Debug().Msg("context cancelled")
|
||||
}
|
||||
|
||||
// Graceful shutdown
|
||||
logger.Info().Msg("Shutting down audio input server")
|
||||
logger.Debug().Msg("shutting down audio input server")
|
||||
server.Stop()
|
||||
|
||||
// Give some time for cleanup
|
||||
time.Sleep(GetConfig().DefaultSleepDuration)
|
||||
|
||||
logger.Info().Msg("Audio input server subprocess stopped")
|
||||
logger.Debug().Msg("audio input server subprocess stopped")
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,50 +1,38 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/jetkvm/kvm/internal/logging"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// AudioInputSupervisor manages the audio input server subprocess
|
||||
type AudioInputSupervisor struct {
|
||||
cmd *exec.Cmd
|
||||
cancel context.CancelFunc
|
||||
mtx sync.Mutex
|
||||
running bool
|
||||
logger zerolog.Logger
|
||||
client *AudioInputClient
|
||||
processMonitor *ProcessMonitor
|
||||
*BaseSupervisor
|
||||
client *AudioInputClient
|
||||
}
|
||||
|
||||
// NewAudioInputSupervisor creates a new audio input supervisor
|
||||
func NewAudioInputSupervisor() *AudioInputSupervisor {
|
||||
return &AudioInputSupervisor{
|
||||
logger: logging.GetDefaultLogger().With().Str("component", "audio-input-supervisor").Logger(),
|
||||
BaseSupervisor: NewBaseSupervisor("audio-input-supervisor"),
|
||||
client: NewAudioInputClient(),
|
||||
processMonitor: GetProcessMonitor(),
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts the audio input server subprocess
|
||||
func (ais *AudioInputSupervisor) Start() error {
|
||||
ais.mtx.Lock()
|
||||
defer ais.mtx.Unlock()
|
||||
ais.mutex.Lock()
|
||||
defer ais.mutex.Unlock()
|
||||
|
||||
if ais.running {
|
||||
if ais.IsRunning() {
|
||||
return fmt.Errorf("audio input supervisor already running with PID %d", ais.cmd.Process.Pid)
|
||||
}
|
||||
|
||||
// Create context for subprocess management
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ais.cancel = cancel
|
||||
ais.createContext()
|
||||
|
||||
// Get current executable path
|
||||
execPath, err := os.Executable()
|
||||
|
@ -53,7 +41,7 @@ func (ais *AudioInputSupervisor) Start() error {
|
|||
}
|
||||
|
||||
// Create command for audio input server subprocess
|
||||
cmd := exec.CommandContext(ctx, execPath, "--audio-input-server")
|
||||
cmd := exec.CommandContext(ais.ctx, execPath, "--audio-input-server")
|
||||
cmd.Env = append(os.Environ(),
|
||||
"JETKVM_AUDIO_INPUT_IPC=true", // Enable IPC mode
|
||||
)
|
||||
|
@ -64,13 +52,13 @@ func (ais *AudioInputSupervisor) Start() error {
|
|||
}
|
||||
|
||||
ais.cmd = cmd
|
||||
ais.running = true
|
||||
ais.setRunning(true)
|
||||
|
||||
// Start the subprocess
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
ais.running = false
|
||||
cancel()
|
||||
ais.setRunning(false)
|
||||
ais.cancelContext()
|
||||
return fmt.Errorf("failed to start audio input server process: %w", err)
|
||||
}
|
||||
|
||||
|
@ -90,14 +78,14 @@ func (ais *AudioInputSupervisor) Start() error {
|
|||
|
||||
// Stop stops the audio input server subprocess
|
||||
func (ais *AudioInputSupervisor) Stop() {
|
||||
ais.mtx.Lock()
|
||||
defer ais.mtx.Unlock()
|
||||
ais.mutex.Lock()
|
||||
defer ais.mutex.Unlock()
|
||||
|
||||
if !ais.running {
|
||||
if !ais.IsRunning() {
|
||||
return
|
||||
}
|
||||
|
||||
ais.running = false
|
||||
ais.logSupervisorStop()
|
||||
|
||||
// Disconnect client first
|
||||
if ais.client != nil {
|
||||
|
@ -105,9 +93,7 @@ func (ais *AudioInputSupervisor) Stop() {
|
|||
}
|
||||
|
||||
// Cancel context to signal subprocess to stop
|
||||
if ais.cancel != nil {
|
||||
ais.cancel()
|
||||
}
|
||||
ais.cancelContext()
|
||||
|
||||
// Try graceful termination first
|
||||
if ais.cmd != nil && ais.cmd.Process != nil {
|
||||
|
@ -138,19 +124,14 @@ func (ais *AudioInputSupervisor) Stop() {
|
|||
}
|
||||
}
|
||||
|
||||
ais.setRunning(false)
|
||||
ais.cmd = nil
|
||||
ais.cancel = nil
|
||||
}
|
||||
|
||||
// IsRunning returns whether the supervisor is running
|
||||
func (ais *AudioInputSupervisor) IsRunning() bool {
|
||||
ais.mtx.Lock()
|
||||
defer ais.mtx.Unlock()
|
||||
return ais.running
|
||||
}
|
||||
|
||||
// IsConnected returns whether the client is connected to the audio input server
|
||||
func (ais *AudioInputSupervisor) IsConnected() bool {
|
||||
ais.mutex.Lock()
|
||||
defer ais.mutex.Unlock()
|
||||
if !ais.IsRunning() {
|
||||
return false
|
||||
}
|
||||
|
@ -162,41 +143,11 @@ func (ais *AudioInputSupervisor) GetClient() *AudioInputClient {
|
|||
return ais.client
|
||||
}
|
||||
|
||||
// GetProcessMetrics returns current process metrics if the process is running
|
||||
// GetProcessMetrics returns current process metrics with audio-input-server name
|
||||
func (ais *AudioInputSupervisor) GetProcessMetrics() *ProcessMetrics {
|
||||
ais.mtx.Lock()
|
||||
defer ais.mtx.Unlock()
|
||||
|
||||
if ais.cmd == nil || ais.cmd.Process == nil {
|
||||
// Return default metrics when no process is running
|
||||
return &ProcessMetrics{
|
||||
PID: 0,
|
||||
CPUPercent: 0.0,
|
||||
MemoryRSS: 0,
|
||||
MemoryVMS: 0,
|
||||
MemoryPercent: 0.0,
|
||||
Timestamp: time.Now(),
|
||||
ProcessName: "audio-input-server",
|
||||
}
|
||||
}
|
||||
|
||||
pid := ais.cmd.Process.Pid
|
||||
metrics := ais.processMonitor.GetCurrentMetrics()
|
||||
for _, metric := range metrics {
|
||||
if metric.PID == pid {
|
||||
return &metric
|
||||
}
|
||||
}
|
||||
// Return default metrics if process not found in monitoring
|
||||
return &ProcessMetrics{
|
||||
PID: pid,
|
||||
CPUPercent: 0.0,
|
||||
MemoryRSS: 0,
|
||||
MemoryVMS: 0,
|
||||
MemoryPercent: 0.0,
|
||||
Timestamp: time.Now(),
|
||||
ProcessName: "audio-input-server",
|
||||
}
|
||||
metrics := ais.BaseSupervisor.GetProcessMetrics()
|
||||
metrics.ProcessName = "audio-input-server"
|
||||
return metrics
|
||||
}
|
||||
|
||||
// monitorSubprocess monitors the subprocess and handles unexpected exits
|
||||
|
@ -211,10 +162,10 @@ func (ais *AudioInputSupervisor) monitorSubprocess() {
|
|||
// Remove process from monitoring
|
||||
ais.processMonitor.RemoveProcess(pid)
|
||||
|
||||
ais.mtx.Lock()
|
||||
defer ais.mtx.Unlock()
|
||||
ais.mutex.Lock()
|
||||
defer ais.mutex.Unlock()
|
||||
|
||||
if ais.running {
|
||||
if ais.IsRunning() {
|
||||
// Unexpected exit
|
||||
if err != nil {
|
||||
ais.logger.Error().Err(err).Int("pid", pid).Msg("Audio input server subprocess exited unexpectedly")
|
||||
|
@ -228,7 +179,7 @@ func (ais *AudioInputSupervisor) monitorSubprocess() {
|
|||
}
|
||||
|
||||
// Mark as not running
|
||||
ais.running = false
|
||||
ais.setRunning(false)
|
||||
ais.cmd = nil
|
||||
|
||||
ais.logger.Info().Int("pid", pid).Msg("Audio input server subprocess monitoring stopped")
|
||||
|
|
|
@ -181,12 +181,15 @@ func TestAudioInputManagerMultipleStartStop(t *testing.T) {
|
|||
|
||||
func TestAudioInputMetrics(t *testing.T) {
|
||||
metrics := &AudioInputMetrics{
|
||||
FramesSent: 100,
|
||||
FramesDropped: 5,
|
||||
BytesProcessed: 1024,
|
||||
ConnectionDrops: 2,
|
||||
AverageLatency: time.Millisecond * 10,
|
||||
LastFrameTime: time.Now(),
|
||||
BaseAudioMetrics: BaseAudioMetrics{
|
||||
FramesProcessed: 100,
|
||||
FramesDropped: 5,
|
||||
BytesProcessed: 1024,
|
||||
ConnectionDrops: 2,
|
||||
AverageLatency: time.Millisecond * 10,
|
||||
LastFrameTime: time.Now(),
|
||||
},
|
||||
FramesSent: 100,
|
||||
}
|
||||
|
||||
assert.Equal(t, int64(100), metrics.FramesSent)
|
||||
|
|
|
@ -23,6 +23,13 @@ var (
|
|||
// Output IPC constants are now centralized in config_constants.go
|
||||
// outputMaxFrameSize, outputWriteTimeout, outputMaxDroppedFrames, outputHeaderSize, outputMessagePoolSize
|
||||
|
||||
// OutputIPCConfig represents configuration for audio output
|
||||
type OutputIPCConfig struct {
|
||||
SampleRate int
|
||||
Channels int
|
||||
FrameSize int
|
||||
}
|
||||
|
||||
// OutputMessageType represents the type of IPC message
|
||||
type OutputMessageType uint8
|
||||
|
||||
|
@ -106,7 +113,7 @@ func NewAudioOutputServer() (*AudioOutputServer, error) {
|
|||
|
||||
// Initialize latency monitoring
|
||||
latencyConfig := DefaultLatencyConfig()
|
||||
logger := zerolog.New(os.Stderr).With().Timestamp().Str("component", "audio-server").Logger()
|
||||
logger := zerolog.New(os.Stderr).With().Timestamp().Str("component", AudioOutputServerComponent).Logger()
|
||||
latencyMonitor := NewLatencyMonitor(latencyConfig, logger)
|
||||
|
||||
// Initialize adaptive buffer manager with default config
|
||||
|
@ -160,7 +167,7 @@ func (s *AudioOutputServer) Start() error {
|
|||
|
||||
// acceptConnections accepts incoming connections
|
||||
func (s *AudioOutputServer) acceptConnections() {
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "audio-server").Logger()
|
||||
logger := logging.GetDefaultLogger().With().Str("component", AudioOutputServerComponent).Logger()
|
||||
for s.running {
|
||||
conn, err := s.listener.Accept()
|
||||
if err != nil {
|
||||
|
@ -253,6 +260,14 @@ func (s *AudioOutputServer) Close() error {
|
|||
}
|
||||
|
||||
func (s *AudioOutputServer) SendFrame(frame []byte) error {
|
||||
// Use ultra-fast validation for critical audio path
|
||||
if err := ValidateAudioFrame(frame); err != nil {
|
||||
logger := logging.GetDefaultLogger().With().Str("component", AudioOutputServerComponent).Logger()
|
||||
logger.Error().Err(err).Msg("Frame validation failed")
|
||||
return fmt.Errorf("output frame validation failed: %w", err)
|
||||
}
|
||||
|
||||
// Additional output-specific size check
|
||||
maxFrameSize := GetConfig().OutputMaxFrameSize
|
||||
if len(frame) > maxFrameSize {
|
||||
return fmt.Errorf("output frame size validation failed: got %d bytes, maximum allowed %d bytes", len(frame), maxFrameSize)
|
||||
|
|
|
@ -94,6 +94,13 @@ func DefaultLatencyConfig() LatencyConfig {
|
|||
|
||||
// NewLatencyMonitor creates a new latency monitoring system
|
||||
func NewLatencyMonitor(config LatencyConfig, logger zerolog.Logger) *LatencyMonitor {
|
||||
// Validate latency configuration
|
||||
if err := ValidateLatencyConfig(config); err != nil {
|
||||
// Log validation error and use default configuration
|
||||
logger.Error().Err(err).Msg("Invalid latency configuration provided, using defaults")
|
||||
config = DefaultLatencyConfig()
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
return &LatencyMonitor{
|
||||
|
@ -110,14 +117,14 @@ func NewLatencyMonitor(config LatencyConfig, logger zerolog.Logger) *LatencyMoni
|
|||
func (lm *LatencyMonitor) Start() {
|
||||
lm.wg.Add(1)
|
||||
go lm.monitoringLoop()
|
||||
lm.logger.Info().Msg("Latency monitor started")
|
||||
lm.logger.Debug().Msg("latency monitor started")
|
||||
}
|
||||
|
||||
// Stop stops the latency monitor
|
||||
func (lm *LatencyMonitor) Stop() {
|
||||
lm.cancel()
|
||||
lm.wg.Wait()
|
||||
lm.logger.Info().Msg("Latency monitor stopped")
|
||||
lm.logger.Debug().Msg("latency monitor stopped")
|
||||
}
|
||||
|
||||
// RecordLatency records a new latency measurement
|
||||
|
@ -125,9 +132,6 @@ func (lm *LatencyMonitor) RecordLatency(latency time.Duration, source string) {
|
|||
now := time.Now()
|
||||
latencyNanos := latency.Nanoseconds()
|
||||
|
||||
// Record in granular metrics histogram
|
||||
GetGranularMetricsCollector().RecordProcessingLatency(latency)
|
||||
|
||||
// Update atomic counters
|
||||
atomic.StoreInt64(&lm.currentLatency, latencyNanos)
|
||||
atomic.AddInt64(&lm.latencySamples, 1)
|
||||
|
@ -256,20 +260,20 @@ func (lm *LatencyMonitor) runOptimization() {
|
|||
// Check if current latency exceeds threshold
|
||||
if metrics.Current > lm.config.MaxLatency {
|
||||
needsOptimization = true
|
||||
lm.logger.Warn().Dur("current_latency", metrics.Current).Dur("max_latency", lm.config.MaxLatency).Msg("Latency exceeds maximum threshold")
|
||||
lm.logger.Warn().Dur("current_latency", metrics.Current).Dur("max_latency", lm.config.MaxLatency).Msg("latency exceeds maximum threshold")
|
||||
}
|
||||
|
||||
// Check if average latency is above adaptive threshold
|
||||
adaptiveThreshold := time.Duration(float64(lm.config.TargetLatency.Nanoseconds()) * (1.0 + lm.config.AdaptiveThreshold))
|
||||
if metrics.Average > adaptiveThreshold {
|
||||
needsOptimization = true
|
||||
lm.logger.Info().Dur("average_latency", metrics.Average).Dur("threshold", adaptiveThreshold).Msg("Average latency above adaptive threshold")
|
||||
lm.logger.Debug().Dur("average_latency", metrics.Average).Dur("threshold", adaptiveThreshold).Msg("average latency above adaptive threshold")
|
||||
}
|
||||
|
||||
// Check if jitter is too high
|
||||
if metrics.Jitter > lm.config.JitterThreshold {
|
||||
needsOptimization = true
|
||||
lm.logger.Info().Dur("jitter", metrics.Jitter).Dur("threshold", lm.config.JitterThreshold).Msg("Jitter above threshold")
|
||||
lm.logger.Debug().Dur("jitter", metrics.Jitter).Dur("threshold", lm.config.JitterThreshold).Msg("jitter above threshold")
|
||||
}
|
||||
|
||||
if needsOptimization {
|
||||
|
@ -283,11 +287,11 @@ func (lm *LatencyMonitor) runOptimization() {
|
|||
|
||||
for _, callback := range callbacks {
|
||||
if err := callback(metrics); err != nil {
|
||||
lm.logger.Error().Err(err).Msg("Optimization callback failed")
|
||||
lm.logger.Error().Err(err).Msg("optimization callback failed")
|
||||
}
|
||||
}
|
||||
|
||||
lm.logger.Info().Interface("metrics", metrics).Msg("Latency optimization triggered")
|
||||
lm.logger.Debug().Interface("metrics", metrics).Msg("latency optimization triggered")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,535 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/jetkvm/kvm/internal/logging"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// LatencyProfiler provides comprehensive end-to-end audio latency profiling
|
||||
// with nanosecond precision across the entire WebRTC->IPC->CGO->ALSA pipeline
|
||||
type LatencyProfiler struct {
|
||||
// Atomic counters for thread-safe access (MUST be first for ARM32 alignment)
|
||||
totalMeasurements int64 // Total number of measurements taken
|
||||
webrtcLatencySum int64 // Sum of WebRTC processing latencies (nanoseconds)
|
||||
ipcLatencySum int64 // Sum of IPC communication latencies (nanoseconds)
|
||||
cgoLatencySum int64 // Sum of CGO call latencies (nanoseconds)
|
||||
alsaLatencySum int64 // Sum of ALSA device latencies (nanoseconds)
|
||||
endToEndLatencySum int64 // Sum of complete end-to-end latencies (nanoseconds)
|
||||
validationLatencySum int64 // Sum of validation overhead (nanoseconds)
|
||||
serializationLatencySum int64 // Sum of serialization overhead (nanoseconds)
|
||||
|
||||
// Peak latency tracking
|
||||
maxWebrtcLatency int64 // Maximum WebRTC latency observed (nanoseconds)
|
||||
maxIpcLatency int64 // Maximum IPC latency observed (nanoseconds)
|
||||
maxCgoLatency int64 // Maximum CGO latency observed (nanoseconds)
|
||||
maxAlsaLatency int64 // Maximum ALSA latency observed (nanoseconds)
|
||||
maxEndToEndLatency int64 // Maximum end-to-end latency observed (nanoseconds)
|
||||
|
||||
// Configuration and control
|
||||
config LatencyProfilerConfig
|
||||
logger zerolog.Logger
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
running int32 // Atomic flag for profiler state
|
||||
enabled int32 // Atomic flag for measurement collection
|
||||
|
||||
// Detailed measurement storage
|
||||
measurements []DetailedLatencyMeasurement
|
||||
measurementMutex sync.RWMutex
|
||||
measurementIndex int
|
||||
|
||||
// High-resolution timing
|
||||
timeSource func() int64 // Nanosecond precision time source
|
||||
}
|
||||
|
||||
// LatencyProfilerConfig defines profiler configuration
|
||||
type LatencyProfilerConfig struct {
|
||||
MaxMeasurements int // Maximum measurements to store in memory
|
||||
SamplingRate float64 // Sampling rate (0.0-1.0, 1.0 = profile every frame)
|
||||
ReportingInterval time.Duration // How often to log profiling reports
|
||||
ThresholdWarning time.Duration // Latency threshold for warnings
|
||||
ThresholdCritical time.Duration // Latency threshold for critical alerts
|
||||
EnableDetailedTrace bool // Enable detailed per-component tracing
|
||||
EnableHistogram bool // Enable latency histogram collection
|
||||
}
|
||||
|
||||
// DetailedLatencyMeasurement captures comprehensive latency breakdown
|
||||
type DetailedLatencyMeasurement struct {
|
||||
Timestamp time.Time // When the measurement was taken
|
||||
FrameID uint64 // Unique frame identifier for tracing
|
||||
WebRTCLatency time.Duration // WebRTC processing time
|
||||
IPCLatency time.Duration // IPC communication time
|
||||
CGOLatency time.Duration // CGO call overhead
|
||||
ALSALatency time.Duration // ALSA device processing time
|
||||
ValidationLatency time.Duration // Frame validation overhead
|
||||
SerializationLatency time.Duration // Data serialization overhead
|
||||
EndToEndLatency time.Duration // Complete pipeline latency
|
||||
Source string // Source component (input/output)
|
||||
FrameSize int // Size of the audio frame in bytes
|
||||
CPUUsage float64 // CPU usage at time of measurement
|
||||
MemoryUsage uint64 // Memory usage at time of measurement
|
||||
}
|
||||
|
||||
// LatencyProfileReport contains aggregated profiling results
|
||||
type LatencyProfileReport struct {
|
||||
TotalMeasurements int64 // Total measurements taken
|
||||
TimeRange time.Duration // Time span of measurements
|
||||
|
||||
// Average latencies
|
||||
AvgWebRTCLatency time.Duration
|
||||
AvgIPCLatency time.Duration
|
||||
AvgCGOLatency time.Duration
|
||||
AvgALSALatency time.Duration
|
||||
AvgEndToEndLatency time.Duration
|
||||
AvgValidationLatency time.Duration
|
||||
AvgSerializationLatency time.Duration
|
||||
|
||||
// Peak latencies
|
||||
MaxWebRTCLatency time.Duration
|
||||
MaxIPCLatency time.Duration
|
||||
MaxCGOLatency time.Duration
|
||||
MaxALSALatency time.Duration
|
||||
MaxEndToEndLatency time.Duration
|
||||
|
||||
// Performance analysis
|
||||
BottleneckComponent string // Component with highest average latency
|
||||
LatencyDistribution map[string]int // Histogram of latency ranges
|
||||
Throughput float64 // Frames per second processed
|
||||
}
|
||||
|
||||
// FrameLatencyTracker tracks latency for a single audio frame through the pipeline
|
||||
type FrameLatencyTracker struct {
|
||||
frameID uint64
|
||||
startTime int64 // Nanosecond timestamp
|
||||
webrtcStartTime int64
|
||||
ipcStartTime int64
|
||||
cgoStartTime int64
|
||||
alsaStartTime int64
|
||||
validationStartTime int64
|
||||
serializationStartTime int64
|
||||
frameSize int
|
||||
source string
|
||||
}
|
||||
|
||||
// Global profiler instance
|
||||
var (
|
||||
globalLatencyProfiler unsafe.Pointer // *LatencyProfiler
|
||||
profilerInitialized int32
|
||||
)
|
||||
|
||||
// DefaultLatencyProfilerConfig returns default profiler configuration
|
||||
func DefaultLatencyProfilerConfig() LatencyProfilerConfig {
|
||||
return LatencyProfilerConfig{
|
||||
MaxMeasurements: 10000,
|
||||
SamplingRate: 0.1, // Profile 10% of frames to minimize overhead
|
||||
ReportingInterval: 30 * time.Second,
|
||||
ThresholdWarning: 50 * time.Millisecond,
|
||||
ThresholdCritical: 100 * time.Millisecond,
|
||||
EnableDetailedTrace: false, // Disabled by default for performance
|
||||
EnableHistogram: true,
|
||||
}
|
||||
}
|
||||
|
||||
// NewLatencyProfiler creates a new latency profiler
|
||||
func NewLatencyProfiler(config LatencyProfilerConfig) *LatencyProfiler {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "latency-profiler").Logger()
|
||||
|
||||
// Validate configuration
|
||||
if config.MaxMeasurements <= 0 {
|
||||
config.MaxMeasurements = 10000
|
||||
}
|
||||
if config.SamplingRate < 0.0 || config.SamplingRate > 1.0 {
|
||||
config.SamplingRate = 0.1
|
||||
}
|
||||
if config.ReportingInterval <= 0 {
|
||||
config.ReportingInterval = 30 * time.Second
|
||||
}
|
||||
|
||||
profiler := &LatencyProfiler{
|
||||
config: config,
|
||||
logger: logger,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
measurements: make([]DetailedLatencyMeasurement, config.MaxMeasurements),
|
||||
timeSource: func() int64 { return time.Now().UnixNano() },
|
||||
}
|
||||
|
||||
// Initialize peak latencies to zero
|
||||
atomic.StoreInt64(&profiler.maxWebrtcLatency, 0)
|
||||
atomic.StoreInt64(&profiler.maxIpcLatency, 0)
|
||||
atomic.StoreInt64(&profiler.maxCgoLatency, 0)
|
||||
atomic.StoreInt64(&profiler.maxAlsaLatency, 0)
|
||||
atomic.StoreInt64(&profiler.maxEndToEndLatency, 0)
|
||||
|
||||
return profiler
|
||||
}
|
||||
|
||||
// Start begins latency profiling
|
||||
func (lp *LatencyProfiler) Start() error {
|
||||
if !atomic.CompareAndSwapInt32(&lp.running, 0, 1) {
|
||||
return fmt.Errorf("latency profiler already running")
|
||||
}
|
||||
|
||||
// Enable measurement collection
|
||||
atomic.StoreInt32(&lp.enabled, 1)
|
||||
|
||||
// Start reporting goroutine
|
||||
go lp.reportingLoop()
|
||||
|
||||
lp.logger.Info().Float64("sampling_rate", lp.config.SamplingRate).Msg("latency profiler started")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop stops latency profiling
|
||||
func (lp *LatencyProfiler) Stop() {
|
||||
if !atomic.CompareAndSwapInt32(&lp.running, 1, 0) {
|
||||
return
|
||||
}
|
||||
|
||||
// Disable measurement collection
|
||||
atomic.StoreInt32(&lp.enabled, 0)
|
||||
|
||||
// Cancel context to stop reporting
|
||||
lp.cancel()
|
||||
|
||||
lp.logger.Info().Msg("latency profiler stopped")
|
||||
}
|
||||
|
||||
// IsEnabled returns whether profiling is currently enabled
|
||||
func (lp *LatencyProfiler) IsEnabled() bool {
|
||||
return atomic.LoadInt32(&lp.enabled) == 1
|
||||
}
|
||||
|
||||
// StartFrameTracking begins tracking latency for a new audio frame
|
||||
func (lp *LatencyProfiler) StartFrameTracking(frameID uint64, frameSize int, source string) *FrameLatencyTracker {
|
||||
if !lp.IsEnabled() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Apply sampling rate to reduce profiling overhead
|
||||
if lp.config.SamplingRate < 1.0 {
|
||||
// Simple sampling based on frame ID
|
||||
if float64(frameID%100)/100.0 > lp.config.SamplingRate {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
now := lp.timeSource()
|
||||
return &FrameLatencyTracker{
|
||||
frameID: frameID,
|
||||
startTime: now,
|
||||
frameSize: frameSize,
|
||||
source: source,
|
||||
}
|
||||
}
|
||||
|
||||
// TrackWebRTCStart marks the start of WebRTC processing
|
||||
func (tracker *FrameLatencyTracker) TrackWebRTCStart() {
|
||||
if tracker != nil {
|
||||
tracker.webrtcStartTime = time.Now().UnixNano()
|
||||
}
|
||||
}
|
||||
|
||||
// TrackIPCStart marks the start of IPC communication
|
||||
func (tracker *FrameLatencyTracker) TrackIPCStart() {
|
||||
if tracker != nil {
|
||||
tracker.ipcStartTime = time.Now().UnixNano()
|
||||
}
|
||||
}
|
||||
|
||||
// TrackCGOStart marks the start of CGO processing
|
||||
func (tracker *FrameLatencyTracker) TrackCGOStart() {
|
||||
if tracker != nil {
|
||||
tracker.cgoStartTime = time.Now().UnixNano()
|
||||
}
|
||||
}
|
||||
|
||||
// TrackALSAStart marks the start of ALSA device processing
|
||||
func (tracker *FrameLatencyTracker) TrackALSAStart() {
|
||||
if tracker != nil {
|
||||
tracker.alsaStartTime = time.Now().UnixNano()
|
||||
}
|
||||
}
|
||||
|
||||
// TrackValidationStart marks the start of frame validation
|
||||
func (tracker *FrameLatencyTracker) TrackValidationStart() {
|
||||
if tracker != nil {
|
||||
tracker.validationStartTime = time.Now().UnixNano()
|
||||
}
|
||||
}
|
||||
|
||||
// TrackSerializationStart marks the start of data serialization
|
||||
func (tracker *FrameLatencyTracker) TrackSerializationStart() {
|
||||
if tracker != nil {
|
||||
tracker.serializationStartTime = time.Now().UnixNano()
|
||||
}
|
||||
}
|
||||
|
||||
// FinishTracking completes frame tracking and records the measurement
|
||||
func (lp *LatencyProfiler) FinishTracking(tracker *FrameLatencyTracker) {
|
||||
if tracker == nil || !lp.IsEnabled() {
|
||||
return
|
||||
}
|
||||
|
||||
endTime := lp.timeSource()
|
||||
|
||||
// Calculate component latencies
|
||||
var webrtcLatency, ipcLatency, cgoLatency, alsaLatency, validationLatency, serializationLatency time.Duration
|
||||
|
||||
if tracker.webrtcStartTime > 0 {
|
||||
webrtcLatency = time.Duration(tracker.ipcStartTime - tracker.webrtcStartTime)
|
||||
}
|
||||
if tracker.ipcStartTime > 0 {
|
||||
ipcLatency = time.Duration(tracker.cgoStartTime - tracker.ipcStartTime)
|
||||
}
|
||||
if tracker.cgoStartTime > 0 {
|
||||
cgoLatency = time.Duration(tracker.alsaStartTime - tracker.cgoStartTime)
|
||||
}
|
||||
if tracker.alsaStartTime > 0 {
|
||||
alsaLatency = time.Duration(endTime - tracker.alsaStartTime)
|
||||
}
|
||||
if tracker.validationStartTime > 0 {
|
||||
validationLatency = time.Duration(tracker.ipcStartTime - tracker.validationStartTime)
|
||||
}
|
||||
if tracker.serializationStartTime > 0 {
|
||||
serializationLatency = time.Duration(tracker.cgoStartTime - tracker.serializationStartTime)
|
||||
}
|
||||
|
||||
endToEndLatency := time.Duration(endTime - tracker.startTime)
|
||||
|
||||
// Update atomic counters
|
||||
atomic.AddInt64(&lp.totalMeasurements, 1)
|
||||
atomic.AddInt64(&lp.webrtcLatencySum, webrtcLatency.Nanoseconds())
|
||||
atomic.AddInt64(&lp.ipcLatencySum, ipcLatency.Nanoseconds())
|
||||
atomic.AddInt64(&lp.cgoLatencySum, cgoLatency.Nanoseconds())
|
||||
atomic.AddInt64(&lp.alsaLatencySum, alsaLatency.Nanoseconds())
|
||||
atomic.AddInt64(&lp.endToEndLatencySum, endToEndLatency.Nanoseconds())
|
||||
atomic.AddInt64(&lp.validationLatencySum, validationLatency.Nanoseconds())
|
||||
atomic.AddInt64(&lp.serializationLatencySum, serializationLatency.Nanoseconds())
|
||||
|
||||
// Update peak latencies
|
||||
lp.updatePeakLatency(&lp.maxWebrtcLatency, webrtcLatency.Nanoseconds())
|
||||
lp.updatePeakLatency(&lp.maxIpcLatency, ipcLatency.Nanoseconds())
|
||||
lp.updatePeakLatency(&lp.maxCgoLatency, cgoLatency.Nanoseconds())
|
||||
lp.updatePeakLatency(&lp.maxAlsaLatency, alsaLatency.Nanoseconds())
|
||||
lp.updatePeakLatency(&lp.maxEndToEndLatency, endToEndLatency.Nanoseconds())
|
||||
|
||||
// Store detailed measurement if enabled
|
||||
if lp.config.EnableDetailedTrace {
|
||||
lp.storeMeasurement(DetailedLatencyMeasurement{
|
||||
Timestamp: time.Now(),
|
||||
FrameID: tracker.frameID,
|
||||
WebRTCLatency: webrtcLatency,
|
||||
IPCLatency: ipcLatency,
|
||||
CGOLatency: cgoLatency,
|
||||
ALSALatency: alsaLatency,
|
||||
ValidationLatency: validationLatency,
|
||||
SerializationLatency: serializationLatency,
|
||||
EndToEndLatency: endToEndLatency,
|
||||
Source: tracker.source,
|
||||
FrameSize: tracker.frameSize,
|
||||
CPUUsage: lp.getCurrentCPUUsage(),
|
||||
MemoryUsage: lp.getCurrentMemoryUsage(),
|
||||
})
|
||||
}
|
||||
|
||||
// Check for threshold violations
|
||||
if endToEndLatency > lp.config.ThresholdCritical {
|
||||
lp.logger.Error().Dur("latency", endToEndLatency).Uint64("frame_id", tracker.frameID).
|
||||
Str("source", tracker.source).Msg("critical latency threshold exceeded")
|
||||
} else if endToEndLatency > lp.config.ThresholdWarning {
|
||||
lp.logger.Warn().Dur("latency", endToEndLatency).Uint64("frame_id", tracker.frameID).
|
||||
Str("source", tracker.source).Msg("warning latency threshold exceeded")
|
||||
}
|
||||
}
|
||||
|
||||
// updatePeakLatency atomically updates peak latency if new value is higher
|
||||
func (lp *LatencyProfiler) updatePeakLatency(peakPtr *int64, newLatency int64) {
|
||||
for {
|
||||
current := atomic.LoadInt64(peakPtr)
|
||||
if newLatency <= current || atomic.CompareAndSwapInt64(peakPtr, current, newLatency) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// storeMeasurement stores a detailed measurement in the circular buffer
|
||||
func (lp *LatencyProfiler) storeMeasurement(measurement DetailedLatencyMeasurement) {
|
||||
lp.measurementMutex.Lock()
|
||||
defer lp.measurementMutex.Unlock()
|
||||
|
||||
lp.measurements[lp.measurementIndex] = measurement
|
||||
lp.measurementIndex = (lp.measurementIndex + 1) % len(lp.measurements)
|
||||
}
|
||||
|
||||
// GetReport generates a comprehensive latency profiling report
|
||||
func (lp *LatencyProfiler) GetReport() LatencyProfileReport {
|
||||
totalMeasurements := atomic.LoadInt64(&lp.totalMeasurements)
|
||||
if totalMeasurements == 0 {
|
||||
return LatencyProfileReport{}
|
||||
}
|
||||
|
||||
// Calculate averages
|
||||
avgWebRTC := time.Duration(atomic.LoadInt64(&lp.webrtcLatencySum) / totalMeasurements)
|
||||
avgIPC := time.Duration(atomic.LoadInt64(&lp.ipcLatencySum) / totalMeasurements)
|
||||
avgCGO := time.Duration(atomic.LoadInt64(&lp.cgoLatencySum) / totalMeasurements)
|
||||
avgALSA := time.Duration(atomic.LoadInt64(&lp.alsaLatencySum) / totalMeasurements)
|
||||
avgEndToEnd := time.Duration(atomic.LoadInt64(&lp.endToEndLatencySum) / totalMeasurements)
|
||||
avgValidation := time.Duration(atomic.LoadInt64(&lp.validationLatencySum) / totalMeasurements)
|
||||
avgSerialization := time.Duration(atomic.LoadInt64(&lp.serializationLatencySum) / totalMeasurements)
|
||||
|
||||
// Get peak latencies
|
||||
maxWebRTC := time.Duration(atomic.LoadInt64(&lp.maxWebrtcLatency))
|
||||
maxIPC := time.Duration(atomic.LoadInt64(&lp.maxIpcLatency))
|
||||
maxCGO := time.Duration(atomic.LoadInt64(&lp.maxCgoLatency))
|
||||
maxALSA := time.Duration(atomic.LoadInt64(&lp.maxAlsaLatency))
|
||||
maxEndToEnd := time.Duration(atomic.LoadInt64(&lp.maxEndToEndLatency))
|
||||
|
||||
// Determine bottleneck component
|
||||
bottleneck := "WebRTC"
|
||||
maxAvg := avgWebRTC
|
||||
if avgIPC > maxAvg {
|
||||
bottleneck = "IPC"
|
||||
maxAvg = avgIPC
|
||||
}
|
||||
if avgCGO > maxAvg {
|
||||
bottleneck = "CGO"
|
||||
maxAvg = avgCGO
|
||||
}
|
||||
if avgALSA > maxAvg {
|
||||
bottleneck = "ALSA"
|
||||
}
|
||||
|
||||
return LatencyProfileReport{
|
||||
TotalMeasurements: totalMeasurements,
|
||||
AvgWebRTCLatency: avgWebRTC,
|
||||
AvgIPCLatency: avgIPC,
|
||||
AvgCGOLatency: avgCGO,
|
||||
AvgALSALatency: avgALSA,
|
||||
AvgEndToEndLatency: avgEndToEnd,
|
||||
AvgValidationLatency: avgValidation,
|
||||
AvgSerializationLatency: avgSerialization,
|
||||
MaxWebRTCLatency: maxWebRTC,
|
||||
MaxIPCLatency: maxIPC,
|
||||
MaxCGOLatency: maxCGO,
|
||||
MaxALSALatency: maxALSA,
|
||||
MaxEndToEndLatency: maxEndToEnd,
|
||||
BottleneckComponent: bottleneck,
|
||||
}
|
||||
}
|
||||
|
||||
// reportingLoop periodically logs profiling reports
|
||||
func (lp *LatencyProfiler) reportingLoop() {
|
||||
ticker := time.NewTicker(lp.config.ReportingInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-lp.ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
report := lp.GetReport()
|
||||
if report.TotalMeasurements > 0 {
|
||||
lp.logReport(report)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// logReport logs a comprehensive profiling report
|
||||
func (lp *LatencyProfiler) logReport(report LatencyProfileReport) {
|
||||
lp.logger.Info().
|
||||
Int64("total_measurements", report.TotalMeasurements).
|
||||
Dur("avg_webrtc_latency", report.AvgWebRTCLatency).
|
||||
Dur("avg_ipc_latency", report.AvgIPCLatency).
|
||||
Dur("avg_cgo_latency", report.AvgCGOLatency).
|
||||
Dur("avg_alsa_latency", report.AvgALSALatency).
|
||||
Dur("avg_end_to_end_latency", report.AvgEndToEndLatency).
|
||||
Dur("avg_validation_latency", report.AvgValidationLatency).
|
||||
Dur("avg_serialization_latency", report.AvgSerializationLatency).
|
||||
Dur("max_webrtc_latency", report.MaxWebRTCLatency).
|
||||
Dur("max_ipc_latency", report.MaxIPCLatency).
|
||||
Dur("max_cgo_latency", report.MaxCGOLatency).
|
||||
Dur("max_alsa_latency", report.MaxALSALatency).
|
||||
Dur("max_end_to_end_latency", report.MaxEndToEndLatency).
|
||||
Str("bottleneck_component", report.BottleneckComponent).
|
||||
Msg("latency profiling report")
|
||||
}
|
||||
|
||||
// getCurrentCPUUsage returns current CPU usage percentage
|
||||
func (lp *LatencyProfiler) getCurrentCPUUsage() float64 {
|
||||
// Simplified CPU usage - in production, this would use more sophisticated monitoring
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
return float64(runtime.NumGoroutine()) / 100.0 // Rough approximation
|
||||
}
|
||||
|
||||
// getCurrentMemoryUsage returns current memory usage in bytes
|
||||
func (lp *LatencyProfiler) getCurrentMemoryUsage() uint64 {
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
return m.Alloc
|
||||
}
|
||||
|
||||
// GetGlobalLatencyProfiler returns the global latency profiler instance
|
||||
func GetGlobalLatencyProfiler() *LatencyProfiler {
|
||||
ptr := atomic.LoadPointer(&globalLatencyProfiler)
|
||||
if ptr != nil {
|
||||
return (*LatencyProfiler)(ptr)
|
||||
}
|
||||
|
||||
// Initialize on first use
|
||||
if atomic.CompareAndSwapInt32(&profilerInitialized, 0, 1) {
|
||||
config := DefaultLatencyProfilerConfig()
|
||||
profiler := NewLatencyProfiler(config)
|
||||
atomic.StorePointer(&globalLatencyProfiler, unsafe.Pointer(profiler))
|
||||
return profiler
|
||||
}
|
||||
|
||||
// Another goroutine initialized it, try again
|
||||
ptr = atomic.LoadPointer(&globalLatencyProfiler)
|
||||
if ptr != nil {
|
||||
return (*LatencyProfiler)(ptr)
|
||||
}
|
||||
|
||||
// Fallback: create a new profiler
|
||||
config := DefaultLatencyProfilerConfig()
|
||||
return NewLatencyProfiler(config)
|
||||
}
|
||||
|
||||
// EnableLatencyProfiling enables the global latency profiler
|
||||
func EnableLatencyProfiling() error {
|
||||
profiler := GetGlobalLatencyProfiler()
|
||||
return profiler.Start()
|
||||
}
|
||||
|
||||
// DisableLatencyProfiling disables the global latency profiler
|
||||
func DisableLatencyProfiling() {
|
||||
ptr := atomic.LoadPointer(&globalLatencyProfiler)
|
||||
if ptr != nil {
|
||||
profiler := (*LatencyProfiler)(ptr)
|
||||
profiler.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
// ProfileFrameLatency is a convenience function to profile a single frame's latency
|
||||
func ProfileFrameLatency(frameID uint64, frameSize int, source string, fn func(*FrameLatencyTracker)) {
|
||||
profiler := GetGlobalLatencyProfiler()
|
||||
if !profiler.IsEnabled() {
|
||||
fn(nil)
|
||||
return
|
||||
}
|
||||
|
||||
tracker := profiler.StartFrameTracking(frameID, frameSize, source)
|
||||
defer profiler.FinishTracking(tracker)
|
||||
fn(tracker)
|
||||
}
|
|
@ -0,0 +1,323 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// AudioLoggerStandards provides standardized logging patterns for audio components
|
||||
type AudioLoggerStandards struct {
|
||||
logger zerolog.Logger
|
||||
component string
|
||||
}
|
||||
|
||||
// NewAudioLogger creates a new standardized logger for an audio component
|
||||
func NewAudioLogger(logger zerolog.Logger, component string) *AudioLoggerStandards {
|
||||
return &AudioLoggerStandards{
|
||||
logger: logger.With().Str("component", component).Logger(),
|
||||
component: component,
|
||||
}
|
||||
}
|
||||
|
||||
// Component Lifecycle Logging
|
||||
|
||||
// LogComponentStarting logs component initialization start
|
||||
func (als *AudioLoggerStandards) LogComponentStarting() {
|
||||
als.logger.Debug().Msg("starting component")
|
||||
}
|
||||
|
||||
// LogComponentStarted logs successful component start
|
||||
func (als *AudioLoggerStandards) LogComponentStarted() {
|
||||
als.logger.Debug().Msg("component started successfully")
|
||||
}
|
||||
|
||||
// LogComponentStopping logs component shutdown start
|
||||
func (als *AudioLoggerStandards) LogComponentStopping() {
|
||||
als.logger.Debug().Msg("stopping component")
|
||||
}
|
||||
|
||||
// LogComponentStopped logs successful component stop
|
||||
func (als *AudioLoggerStandards) LogComponentStopped() {
|
||||
als.logger.Debug().Msg("component stopped")
|
||||
}
|
||||
|
||||
// LogComponentReady logs component ready state
|
||||
func (als *AudioLoggerStandards) LogComponentReady() {
|
||||
als.logger.Info().Msg("component ready")
|
||||
}
|
||||
|
||||
// Error Logging with Context
|
||||
|
||||
// LogError logs a general error with context
|
||||
func (als *AudioLoggerStandards) LogError(err error, msg string) {
|
||||
als.logger.Error().Err(err).Msg(msg)
|
||||
}
|
||||
|
||||
// LogErrorWithContext logs an error with additional context fields
|
||||
func (als *AudioLoggerStandards) LogErrorWithContext(err error, msg string, fields map[string]interface{}) {
|
||||
event := als.logger.Error().Err(err)
|
||||
for key, value := range fields {
|
||||
event = event.Interface(key, value)
|
||||
}
|
||||
event.Msg(msg)
|
||||
}
|
||||
|
||||
// LogValidationError logs validation failures with specific context
|
||||
func (als *AudioLoggerStandards) LogValidationError(err error, validationType string, value interface{}) {
|
||||
als.logger.Error().Err(err).
|
||||
Str("validation_type", validationType).
|
||||
Interface("invalid_value", value).
|
||||
Msg("validation failed")
|
||||
}
|
||||
|
||||
// LogConnectionError logs connection-related errors
|
||||
func (als *AudioLoggerStandards) LogConnectionError(err error, endpoint string, retryCount int) {
|
||||
als.logger.Error().Err(err).
|
||||
Str("endpoint", endpoint).
|
||||
Int("retry_count", retryCount).
|
||||
Msg("connection failed")
|
||||
}
|
||||
|
||||
// LogProcessError logs process-related errors with PID context
|
||||
func (als *AudioLoggerStandards) LogProcessError(err error, pid int, msg string) {
|
||||
als.logger.Error().Err(err).
|
||||
Int("pid", pid).
|
||||
Msg(msg)
|
||||
}
|
||||
|
||||
// Performance and Metrics Logging
|
||||
|
||||
// LogPerformanceMetrics logs standardized performance metrics
|
||||
func (als *AudioLoggerStandards) LogPerformanceMetrics(metrics map[string]interface{}) {
|
||||
event := als.logger.Info()
|
||||
for key, value := range metrics {
|
||||
event = event.Interface(key, value)
|
||||
}
|
||||
event.Msg("performance metrics")
|
||||
}
|
||||
|
||||
// LogLatencyMetrics logs latency-specific metrics
|
||||
func (als *AudioLoggerStandards) LogLatencyMetrics(current, average, max time.Duration, jitter time.Duration) {
|
||||
als.logger.Info().
|
||||
Dur("current_latency", current).
|
||||
Dur("average_latency", average).
|
||||
Dur("max_latency", max).
|
||||
Dur("jitter", jitter).
|
||||
Msg("latency metrics")
|
||||
}
|
||||
|
||||
// LogFrameMetrics logs frame processing metrics
|
||||
func (als *AudioLoggerStandards) LogFrameMetrics(processed, dropped int64, rate float64) {
|
||||
als.logger.Info().
|
||||
Int64("frames_processed", processed).
|
||||
Int64("frames_dropped", dropped).
|
||||
Float64("processing_rate", rate).
|
||||
Msg("frame processing metrics")
|
||||
}
|
||||
|
||||
// LogBufferMetrics logs buffer utilization metrics
|
||||
func (als *AudioLoggerStandards) LogBufferMetrics(size, used, peak int, utilizationPercent float64) {
|
||||
als.logger.Info().
|
||||
Int("buffer_size", size).
|
||||
Int("buffer_used", used).
|
||||
Int("buffer_peak", peak).
|
||||
Float64("utilization_percent", utilizationPercent).
|
||||
Msg("buffer metrics")
|
||||
}
|
||||
|
||||
// Warning Logging
|
||||
|
||||
// LogWarning logs a general warning
|
||||
func (als *AudioLoggerStandards) LogWarning(msg string) {
|
||||
als.logger.Warn().Msg(msg)
|
||||
}
|
||||
|
||||
// LogWarningWithError logs a warning with error context
|
||||
func (als *AudioLoggerStandards) LogWarningWithError(err error, msg string) {
|
||||
als.logger.Warn().Err(err).Msg(msg)
|
||||
}
|
||||
|
||||
// LogThresholdWarning logs warnings when thresholds are exceeded
|
||||
func (als *AudioLoggerStandards) LogThresholdWarning(metric string, current, threshold interface{}, msg string) {
|
||||
als.logger.Warn().
|
||||
Str("metric", metric).
|
||||
Interface("current_value", current).
|
||||
Interface("threshold", threshold).
|
||||
Msg(msg)
|
||||
}
|
||||
|
||||
// LogRetryWarning logs retry attempts with context
|
||||
func (als *AudioLoggerStandards) LogRetryWarning(operation string, attempt, maxAttempts int, delay time.Duration) {
|
||||
als.logger.Warn().
|
||||
Str("operation", operation).
|
||||
Int("attempt", attempt).
|
||||
Int("max_attempts", maxAttempts).
|
||||
Dur("retry_delay", delay).
|
||||
Msg("retrying operation")
|
||||
}
|
||||
|
||||
// LogRecoveryWarning logs recovery from error conditions
|
||||
func (als *AudioLoggerStandards) LogRecoveryWarning(condition string, duration time.Duration) {
|
||||
als.logger.Warn().
|
||||
Str("condition", condition).
|
||||
Dur("recovery_time", duration).
|
||||
Msg("recovered from error condition")
|
||||
}
|
||||
|
||||
// Debug and Trace Logging
|
||||
|
||||
// LogDebug logs debug information
|
||||
func (als *AudioLoggerStandards) LogDebug(msg string) {
|
||||
als.logger.Debug().Msg(msg)
|
||||
}
|
||||
|
||||
// LogDebugWithFields logs debug information with structured fields
|
||||
func (als *AudioLoggerStandards) LogDebugWithFields(msg string, fields map[string]interface{}) {
|
||||
event := als.logger.Debug()
|
||||
for key, value := range fields {
|
||||
event = event.Interface(key, value)
|
||||
}
|
||||
event.Msg(msg)
|
||||
}
|
||||
|
||||
// LogOperationTrace logs operation tracing for debugging
|
||||
func (als *AudioLoggerStandards) LogOperationTrace(operation string, duration time.Duration, success bool) {
|
||||
als.logger.Debug().
|
||||
Str("operation", operation).
|
||||
Dur("duration", duration).
|
||||
Bool("success", success).
|
||||
Msg("operation trace")
|
||||
}
|
||||
|
||||
// LogDataFlow logs data flow for debugging
|
||||
func (als *AudioLoggerStandards) LogDataFlow(source, destination string, bytes int, frameCount int) {
|
||||
als.logger.Debug().
|
||||
Str("source", source).
|
||||
Str("destination", destination).
|
||||
Int("bytes", bytes).
|
||||
Int("frame_count", frameCount).
|
||||
Msg("data flow")
|
||||
}
|
||||
|
||||
// Configuration and State Logging
|
||||
|
||||
// LogConfigurationChange logs configuration updates
|
||||
func (als *AudioLoggerStandards) LogConfigurationChange(configType string, oldValue, newValue interface{}) {
|
||||
als.logger.Info().
|
||||
Str("config_type", configType).
|
||||
Interface("old_value", oldValue).
|
||||
Interface("new_value", newValue).
|
||||
Msg("configuration changed")
|
||||
}
|
||||
|
||||
// LogStateTransition logs component state changes
|
||||
func (als *AudioLoggerStandards) LogStateTransition(fromState, toState string, reason string) {
|
||||
als.logger.Info().
|
||||
Str("from_state", fromState).
|
||||
Str("to_state", toState).
|
||||
Str("reason", reason).
|
||||
Msg("state transition")
|
||||
}
|
||||
|
||||
// LogResourceAllocation logs resource allocation/deallocation
|
||||
func (als *AudioLoggerStandards) LogResourceAllocation(resourceType string, allocated bool, amount interface{}) {
|
||||
level := als.logger.Debug()
|
||||
if allocated {
|
||||
level.Str("action", "allocated")
|
||||
} else {
|
||||
level.Str("action", "deallocated")
|
||||
}
|
||||
level.Str("resource_type", resourceType).
|
||||
Interface("amount", amount).
|
||||
Msg("resource allocation")
|
||||
}
|
||||
|
||||
// Network and IPC Logging
|
||||
|
||||
// LogConnectionEvent logs connection lifecycle events
|
||||
func (als *AudioLoggerStandards) LogConnectionEvent(event, endpoint string, connectionID string) {
|
||||
als.logger.Info().
|
||||
Str("event", event).
|
||||
Str("endpoint", endpoint).
|
||||
Str("connection_id", connectionID).
|
||||
Msg("connection event")
|
||||
}
|
||||
|
||||
// LogIPCEvent logs IPC communication events
|
||||
func (als *AudioLoggerStandards) LogIPCEvent(event, socketPath string, bytes int) {
|
||||
als.logger.Debug().
|
||||
Str("event", event).
|
||||
Str("socket_path", socketPath).
|
||||
Int("bytes", bytes).
|
||||
Msg("IPC event")
|
||||
}
|
||||
|
||||
// LogNetworkStats logs network statistics
|
||||
func (als *AudioLoggerStandards) LogNetworkStats(sent, received int64, latency time.Duration, packetLoss float64) {
|
||||
als.logger.Info().
|
||||
Int64("bytes_sent", sent).
|
||||
Int64("bytes_received", received).
|
||||
Dur("network_latency", latency).
|
||||
Float64("packet_loss_percent", packetLoss).
|
||||
Msg("network statistics")
|
||||
}
|
||||
|
||||
// Process and System Logging
|
||||
|
||||
// LogProcessEvent logs process lifecycle events
|
||||
func (als *AudioLoggerStandards) LogProcessEvent(event string, pid int, exitCode *int) {
|
||||
event_log := als.logger.Info().
|
||||
Str("event", event).
|
||||
Int("pid", pid)
|
||||
if exitCode != nil {
|
||||
event_log = event_log.Int("exit_code", *exitCode)
|
||||
}
|
||||
event_log.Msg("process event")
|
||||
}
|
||||
|
||||
// LogSystemResource logs system resource usage
|
||||
func (als *AudioLoggerStandards) LogSystemResource(cpuPercent, memoryMB float64, goroutines int) {
|
||||
als.logger.Info().
|
||||
Float64("cpu_percent", cpuPercent).
|
||||
Float64("memory_mb", memoryMB).
|
||||
Int("goroutines", goroutines).
|
||||
Msg("system resources")
|
||||
}
|
||||
|
||||
// LogPriorityChange logs thread priority changes
|
||||
func (als *AudioLoggerStandards) LogPriorityChange(tid, oldPriority, newPriority int, policy string) {
|
||||
als.logger.Debug().
|
||||
Int("tid", tid).
|
||||
Int("old_priority", oldPriority).
|
||||
Int("new_priority", newPriority).
|
||||
Str("policy", policy).
|
||||
Msg("thread priority changed")
|
||||
}
|
||||
|
||||
// Utility Functions
|
||||
|
||||
// GetLogger returns the underlying zerolog.Logger for advanced usage
|
||||
func (als *AudioLoggerStandards) GetLogger() zerolog.Logger {
|
||||
return als.logger
|
||||
}
|
||||
|
||||
// WithFields returns a new logger with additional persistent fields
|
||||
func (als *AudioLoggerStandards) WithFields(fields map[string]interface{}) *AudioLoggerStandards {
|
||||
event := als.logger.With()
|
||||
for key, value := range fields {
|
||||
event = event.Interface(key, value)
|
||||
}
|
||||
return &AudioLoggerStandards{
|
||||
logger: event.Logger(),
|
||||
component: als.component,
|
||||
}
|
||||
}
|
||||
|
||||
// WithSubComponent creates a logger for a sub-component
|
||||
func (als *AudioLoggerStandards) WithSubComponent(subComponent string) *AudioLoggerStandards {
|
||||
return &AudioLoggerStandards{
|
||||
logger: als.logger.With().Str("sub_component", subComponent).Logger(),
|
||||
component: als.component + "." + subComponent,
|
||||
}
|
||||
}
|
|
@ -156,7 +156,10 @@ func HandleMemoryMetrics(w http.ResponseWriter, r *http.Request) {
|
|||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Header().Set("Cache-Control", "no-cache")
|
||||
|
||||
if err := json.NewEncoder(w).Encode(metrics); err != nil {
|
||||
encoder := json.NewEncoder(w)
|
||||
encoder.SetIndent("", " ")
|
||||
|
||||
if err := encoder.Encode(metrics); err != nil {
|
||||
logger.Error().Err(err).Msg("failed to encode memory metrics")
|
||||
http.Error(w, "Internal server error", http.StatusInternalServerError)
|
||||
return
|
||||
|
@ -185,7 +188,7 @@ func LogMemoryMetrics() {
|
|||
// StartMemoryMetricsLogging starts periodic memory metrics logging
|
||||
func StartMemoryMetricsLogging(interval time.Duration) {
|
||||
logger := getMemoryMetricsLogger()
|
||||
logger.Info().Dur("interval", interval).Msg("starting memory metrics logging")
|
||||
logger.Debug().Dur("interval", interval).Msg("memory metrics logging started")
|
||||
|
||||
go func() {
|
||||
ticker := time.NewTicker(interval)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
@ -100,10 +101,10 @@ var (
|
|||
},
|
||||
)
|
||||
|
||||
audioAverageLatencySeconds = promauto.NewGauge(
|
||||
audioAverageLatencyMilliseconds = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_average_latency_seconds",
|
||||
Help: "Average audio latency in seconds",
|
||||
Name: "jetkvm_audio_average_latency_milliseconds",
|
||||
Help: "Average audio latency in milliseconds",
|
||||
},
|
||||
)
|
||||
|
||||
|
@ -143,10 +144,10 @@ var (
|
|||
},
|
||||
)
|
||||
|
||||
microphoneAverageLatencySeconds = promauto.NewGauge(
|
||||
microphoneAverageLatencyMilliseconds = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_microphone_average_latency_seconds",
|
||||
Help: "Average microphone latency in seconds",
|
||||
Name: "jetkvm_microphone_average_latency_milliseconds",
|
||||
Help: "Average microphone latency in milliseconds",
|
||||
},
|
||||
)
|
||||
|
||||
|
@ -286,6 +287,141 @@ var (
|
|||
},
|
||||
)
|
||||
|
||||
// Device health metrics
|
||||
deviceHealthStatus = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_device_health_status",
|
||||
Help: "Current device health status (0=Healthy, 1=Degraded, 2=Failing, 3=Critical)",
|
||||
},
|
||||
[]string{"device_type"}, // device_type: capture, playback
|
||||
)
|
||||
|
||||
deviceHealthScore = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_device_health_score",
|
||||
Help: "Device health score (0.0-1.0, higher is better)",
|
||||
},
|
||||
[]string{"device_type"}, // device_type: capture, playback
|
||||
)
|
||||
|
||||
deviceConsecutiveErrors = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_device_consecutive_errors",
|
||||
Help: "Number of consecutive errors for device",
|
||||
},
|
||||
[]string{"device_type"}, // device_type: capture, playback
|
||||
)
|
||||
|
||||
deviceTotalErrors = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "jetkvm_audio_device_total_errors",
|
||||
Help: "Total number of errors for device",
|
||||
},
|
||||
[]string{"device_type"}, // device_type: capture, playback
|
||||
)
|
||||
|
||||
deviceLatencySpikes = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "jetkvm_audio_device_latency_spikes_total",
|
||||
Help: "Total number of latency spikes for device",
|
||||
},
|
||||
[]string{"device_type"}, // device_type: capture, playback
|
||||
)
|
||||
|
||||
// Memory metrics
|
||||
memoryHeapAllocBytes = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_memory_heap_alloc_bytes",
|
||||
Help: "Current heap allocation in bytes",
|
||||
},
|
||||
)
|
||||
|
||||
memoryHeapSysBytes = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_memory_heap_sys_bytes",
|
||||
Help: "Total heap system memory in bytes",
|
||||
},
|
||||
)
|
||||
|
||||
memoryHeapObjects = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_memory_heap_objects",
|
||||
Help: "Number of heap objects",
|
||||
},
|
||||
)
|
||||
|
||||
memoryGCCount = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "jetkvm_audio_memory_gc_total",
|
||||
Help: "Total number of garbage collections",
|
||||
},
|
||||
)
|
||||
|
||||
memoryGCCPUFraction = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_memory_gc_cpu_fraction",
|
||||
Help: "Fraction of CPU time spent in garbage collection",
|
||||
},
|
||||
)
|
||||
|
||||
// Buffer pool efficiency metrics
|
||||
bufferPoolHitRate = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_buffer_pool_hit_rate_percent",
|
||||
Help: "Buffer pool hit rate percentage",
|
||||
},
|
||||
[]string{"pool_name"}, // pool_name: frame_pool, control_pool, zero_copy_pool
|
||||
)
|
||||
|
||||
bufferPoolMissRate = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_buffer_pool_miss_rate_percent",
|
||||
Help: "Buffer pool miss rate percentage",
|
||||
},
|
||||
[]string{"pool_name"}, // pool_name: frame_pool, control_pool, zero_copy_pool
|
||||
)
|
||||
|
||||
bufferPoolUtilization = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_buffer_pool_utilization_percent",
|
||||
Help: "Buffer pool utilization percentage",
|
||||
},
|
||||
[]string{"pool_name"}, // pool_name: frame_pool, control_pool, zero_copy_pool
|
||||
)
|
||||
|
||||
bufferPoolThroughput = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_buffer_pool_throughput_ops_per_sec",
|
||||
Help: "Buffer pool throughput in operations per second",
|
||||
},
|
||||
[]string{"pool_name"}, // pool_name: frame_pool, control_pool, zero_copy_pool
|
||||
)
|
||||
|
||||
bufferPoolGetLatency = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_buffer_pool_get_latency_seconds",
|
||||
Help: "Average buffer pool get operation latency in seconds",
|
||||
},
|
||||
[]string{"pool_name"}, // pool_name: frame_pool, control_pool, zero_copy_pool
|
||||
)
|
||||
|
||||
bufferPoolPutLatency = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_buffer_pool_put_latency_seconds",
|
||||
Help: "Average buffer pool put operation latency in seconds",
|
||||
},
|
||||
[]string{"pool_name"}, // pool_name: frame_pool, control_pool, zero_copy_pool
|
||||
)
|
||||
|
||||
// Latency percentile metrics
|
||||
latencyPercentile = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_latency_percentile_milliseconds",
|
||||
Help: "Audio latency percentiles in milliseconds",
|
||||
},
|
||||
[]string{"source", "percentile"}, // source: input, output, processing; percentile: p50, p95, p99, min, max, avg
|
||||
)
|
||||
|
||||
// Metrics update tracking
|
||||
metricsUpdateMutex sync.RWMutex
|
||||
lastMetricsUpdate int64
|
||||
|
@ -299,6 +435,15 @@ var (
|
|||
micFramesDroppedValue int64
|
||||
micBytesProcessedValue int64
|
||||
micConnectionDropsValue int64
|
||||
|
||||
// Atomic counters for device health metrics
|
||||
deviceCaptureErrorsValue int64
|
||||
devicePlaybackErrorsValue int64
|
||||
deviceCaptureSpikesValue int64
|
||||
devicePlaybackSpikesValue int64
|
||||
|
||||
// Atomic counter for memory GC
|
||||
memoryGCCountValue uint32
|
||||
)
|
||||
|
||||
// UnifiedAudioMetrics provides a common structure for both input and output audio streams
|
||||
|
@ -361,7 +506,7 @@ func UpdateAudioMetrics(metrics UnifiedAudioMetrics) {
|
|||
}
|
||||
|
||||
// Update gauges
|
||||
audioAverageLatencySeconds.Set(float64(metrics.AverageLatency.Nanoseconds()) / 1e9)
|
||||
audioAverageLatencyMilliseconds.Set(float64(metrics.AverageLatency.Nanoseconds()) / 1e6)
|
||||
if !metrics.LastFrameTime.IsZero() {
|
||||
audioLastFrameTimestamp.Set(float64(metrics.LastFrameTime.Unix()))
|
||||
}
|
||||
|
@ -392,7 +537,7 @@ func UpdateMicrophoneMetrics(metrics UnifiedAudioMetrics) {
|
|||
}
|
||||
|
||||
// Update gauges
|
||||
microphoneAverageLatencySeconds.Set(float64(metrics.AverageLatency.Nanoseconds()) / 1e9)
|
||||
microphoneAverageLatencyMilliseconds.Set(float64(metrics.AverageLatency.Nanoseconds()) / 1e6)
|
||||
if !metrics.LastFrameTime.IsZero() {
|
||||
microphoneLastFrameTimestamp.Set(float64(metrics.LastFrameTime.Unix()))
|
||||
}
|
||||
|
@ -479,6 +624,95 @@ func UpdateAdaptiveBufferMetrics(inputBufferSize, outputBufferSize int, cpuPerce
|
|||
atomic.StoreInt64(&lastMetricsUpdate, time.Now().Unix())
|
||||
}
|
||||
|
||||
// UpdateSocketBufferMetrics updates socket buffer metrics
|
||||
func UpdateSocketBufferMetrics(component, bufferType string, size, utilization float64, overflowOccurred bool) {
|
||||
metricsUpdateMutex.Lock()
|
||||
defer metricsUpdateMutex.Unlock()
|
||||
|
||||
socketBufferSizeGauge.WithLabelValues(component, bufferType).Set(size)
|
||||
socketBufferUtilizationGauge.WithLabelValues(component, bufferType).Set(utilization)
|
||||
|
||||
if overflowOccurred {
|
||||
socketBufferOverflowCounter.WithLabelValues(component, bufferType).Inc()
|
||||
}
|
||||
|
||||
atomic.StoreInt64(&lastMetricsUpdate, time.Now().Unix())
|
||||
}
|
||||
|
||||
// UpdateDeviceHealthMetrics updates device health metrics
|
||||
func UpdateDeviceHealthMetrics(deviceType string, status int, healthScore float64, consecutiveErrors, totalErrors, latencySpikes int64) {
|
||||
metricsUpdateMutex.Lock()
|
||||
defer metricsUpdateMutex.Unlock()
|
||||
|
||||
deviceHealthStatus.WithLabelValues(deviceType).Set(float64(status))
|
||||
deviceHealthScore.WithLabelValues(deviceType).Set(healthScore)
|
||||
deviceConsecutiveErrors.WithLabelValues(deviceType).Set(float64(consecutiveErrors))
|
||||
|
||||
// Update error counters with delta calculation
|
||||
var prevErrors, prevSpikes int64
|
||||
if deviceType == "capture" {
|
||||
prevErrors = atomic.SwapInt64(&deviceCaptureErrorsValue, totalErrors)
|
||||
prevSpikes = atomic.SwapInt64(&deviceCaptureSpikesValue, latencySpikes)
|
||||
} else {
|
||||
prevErrors = atomic.SwapInt64(&devicePlaybackErrorsValue, totalErrors)
|
||||
prevSpikes = atomic.SwapInt64(&devicePlaybackSpikesValue, latencySpikes)
|
||||
}
|
||||
|
||||
if prevErrors > 0 && totalErrors > prevErrors {
|
||||
deviceTotalErrors.WithLabelValues(deviceType).Add(float64(totalErrors - prevErrors))
|
||||
}
|
||||
if prevSpikes > 0 && latencySpikes > prevSpikes {
|
||||
deviceLatencySpikes.WithLabelValues(deviceType).Add(float64(latencySpikes - prevSpikes))
|
||||
}
|
||||
|
||||
atomic.StoreInt64(&lastMetricsUpdate, time.Now().Unix())
|
||||
}
|
||||
|
||||
// UpdateMemoryMetrics updates memory metrics
|
||||
func UpdateMemoryMetrics() {
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
|
||||
memoryHeapAllocBytes.Set(float64(m.HeapAlloc))
|
||||
memoryHeapSysBytes.Set(float64(m.HeapSys))
|
||||
memoryHeapObjects.Set(float64(m.HeapObjects))
|
||||
memoryGCCPUFraction.Set(m.GCCPUFraction)
|
||||
|
||||
// Update GC count with delta calculation
|
||||
currentGCCount := uint32(m.NumGC)
|
||||
prevGCCount := atomic.SwapUint32(&memoryGCCountValue, currentGCCount)
|
||||
if prevGCCount > 0 && currentGCCount > prevGCCount {
|
||||
memoryGCCount.Add(float64(currentGCCount - prevGCCount))
|
||||
}
|
||||
|
||||
atomic.StoreInt64(&lastMetricsUpdate, time.Now().Unix())
|
||||
}
|
||||
|
||||
// UpdateBufferPoolMetrics updates buffer pool efficiency metrics
|
||||
func UpdateBufferPoolMetrics(poolName string, hitRate, missRate, utilization, throughput, getLatency, putLatency float64) {
|
||||
metricsUpdateMutex.Lock()
|
||||
defer metricsUpdateMutex.Unlock()
|
||||
|
||||
bufferPoolHitRate.WithLabelValues(poolName).Set(hitRate * 100)
|
||||
bufferPoolMissRate.WithLabelValues(poolName).Set(missRate * 100)
|
||||
bufferPoolUtilization.WithLabelValues(poolName).Set(utilization * 100)
|
||||
bufferPoolThroughput.WithLabelValues(poolName).Set(throughput)
|
||||
bufferPoolGetLatency.WithLabelValues(poolName).Set(getLatency)
|
||||
bufferPoolPutLatency.WithLabelValues(poolName).Set(putLatency)
|
||||
|
||||
atomic.StoreInt64(&lastMetricsUpdate, time.Now().Unix())
|
||||
}
|
||||
|
||||
// UpdateLatencyMetrics updates latency percentile metrics
|
||||
func UpdateLatencyMetrics(source, percentile string, latencyMilliseconds float64) {
|
||||
metricsUpdateMutex.Lock()
|
||||
defer metricsUpdateMutex.Unlock()
|
||||
|
||||
latencyPercentile.WithLabelValues(source, percentile).Set(latencyMilliseconds)
|
||||
|
||||
atomic.StoreInt64(&lastMetricsUpdate, time.Now().Unix())
|
||||
}
|
||||
|
||||
// GetLastMetricsUpdate returns the timestamp of the last metrics update
|
||||
func GetLastMetricsUpdate() time.Time {
|
||||
timestamp := atomic.LoadInt64(&lastMetricsUpdate)
|
||||
|
@ -487,31 +721,18 @@ func GetLastMetricsUpdate() time.Time {
|
|||
|
||||
// StartMetricsUpdater starts a goroutine that periodically updates Prometheus metrics
|
||||
func StartMetricsUpdater() {
|
||||
// Start the centralized metrics collector
|
||||
registry := GetMetricsRegistry()
|
||||
registry.StartMetricsCollector()
|
||||
|
||||
// Start a separate goroutine for periodic updates
|
||||
go func() {
|
||||
ticker := time.NewTicker(GetConfig().StatsUpdateInterval) // Update every 5 seconds
|
||||
ticker := time.NewTicker(5 * time.Second) // Update every 5 seconds
|
||||
defer ticker.Stop()
|
||||
|
||||
for range ticker.C {
|
||||
// Update audio output metrics
|
||||
audioMetrics := GetAudioMetrics()
|
||||
UpdateAudioMetrics(convertAudioMetricsToUnified(audioMetrics))
|
||||
|
||||
// Update microphone input metrics
|
||||
micMetrics := GetAudioInputMetrics()
|
||||
UpdateMicrophoneMetrics(convertAudioInputMetricsToUnified(micMetrics))
|
||||
|
||||
// Update microphone subprocess process metrics
|
||||
if inputSupervisor := GetAudioInputIPCSupervisor(); inputSupervisor != nil {
|
||||
if processMetrics := inputSupervisor.GetProcessMetrics(); processMetrics != nil {
|
||||
UpdateMicrophoneProcessMetrics(*processMetrics, inputSupervisor.IsRunning())
|
||||
}
|
||||
}
|
||||
|
||||
// Update audio configuration metrics
|
||||
audioConfig := GetAudioConfig()
|
||||
UpdateAudioConfigMetrics(audioConfig)
|
||||
micConfig := GetMicrophoneConfig()
|
||||
UpdateMicrophoneConfigMetrics(micConfig)
|
||||
// Update memory metrics (not part of centralized registry)
|
||||
UpdateMemoryMetrics()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
|
|
@ -0,0 +1,151 @@
|
|||
//go:build cgo
|
||||
|
||||
package audio
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// MetricsRegistry provides a centralized source of truth for all audio metrics
|
||||
// This eliminates duplication between session-specific and global managers
|
||||
type MetricsRegistry struct {
|
||||
mu sync.RWMutex
|
||||
audioMetrics AudioMetrics
|
||||
audioInputMetrics AudioInputMetrics
|
||||
audioConfig AudioConfig
|
||||
microphoneConfig AudioConfig
|
||||
lastUpdate int64 // Unix timestamp
|
||||
}
|
||||
|
||||
var (
|
||||
globalMetricsRegistry *MetricsRegistry
|
||||
registryOnce sync.Once
|
||||
)
|
||||
|
||||
// GetMetricsRegistry returns the global metrics registry instance
|
||||
func GetMetricsRegistry() *MetricsRegistry {
|
||||
registryOnce.Do(func() {
|
||||
globalMetricsRegistry = &MetricsRegistry{
|
||||
lastUpdate: time.Now().Unix(),
|
||||
}
|
||||
})
|
||||
return globalMetricsRegistry
|
||||
}
|
||||
|
||||
// UpdateAudioMetrics updates the centralized audio output metrics
|
||||
func (mr *MetricsRegistry) UpdateAudioMetrics(metrics AudioMetrics) {
|
||||
mr.mu.Lock()
|
||||
mr.audioMetrics = metrics
|
||||
mr.lastUpdate = time.Now().Unix()
|
||||
mr.mu.Unlock()
|
||||
|
||||
// Update Prometheus metrics directly to avoid circular dependency
|
||||
UpdateAudioMetrics(convertAudioMetricsToUnified(metrics))
|
||||
}
|
||||
|
||||
// UpdateAudioInputMetrics updates the centralized audio input metrics
|
||||
func (mr *MetricsRegistry) UpdateAudioInputMetrics(metrics AudioInputMetrics) {
|
||||
mr.mu.Lock()
|
||||
mr.audioInputMetrics = metrics
|
||||
mr.lastUpdate = time.Now().Unix()
|
||||
mr.mu.Unlock()
|
||||
|
||||
// Update Prometheus metrics directly to avoid circular dependency
|
||||
UpdateMicrophoneMetrics(convertAudioInputMetricsToUnified(metrics))
|
||||
}
|
||||
|
||||
// UpdateAudioConfig updates the centralized audio configuration
|
||||
func (mr *MetricsRegistry) UpdateAudioConfig(config AudioConfig) {
|
||||
mr.mu.Lock()
|
||||
mr.audioConfig = config
|
||||
mr.lastUpdate = time.Now().Unix()
|
||||
mr.mu.Unlock()
|
||||
|
||||
// Update Prometheus metrics directly
|
||||
UpdateAudioConfigMetrics(config)
|
||||
}
|
||||
|
||||
// UpdateMicrophoneConfig updates the centralized microphone configuration
|
||||
func (mr *MetricsRegistry) UpdateMicrophoneConfig(config AudioConfig) {
|
||||
mr.mu.Lock()
|
||||
mr.microphoneConfig = config
|
||||
mr.lastUpdate = time.Now().Unix()
|
||||
mr.mu.Unlock()
|
||||
|
||||
// Update Prometheus metrics directly
|
||||
UpdateMicrophoneConfigMetrics(config)
|
||||
}
|
||||
|
||||
// GetAudioMetrics returns the current audio output metrics
|
||||
func (mr *MetricsRegistry) GetAudioMetrics() AudioMetrics {
|
||||
mr.mu.RLock()
|
||||
defer mr.mu.RUnlock()
|
||||
return mr.audioMetrics
|
||||
}
|
||||
|
||||
// GetAudioInputMetrics returns the current audio input metrics
|
||||
func (mr *MetricsRegistry) GetAudioInputMetrics() AudioInputMetrics {
|
||||
mr.mu.RLock()
|
||||
defer mr.mu.RUnlock()
|
||||
return mr.audioInputMetrics
|
||||
}
|
||||
|
||||
// GetAudioConfig returns the current audio configuration
|
||||
func (mr *MetricsRegistry) GetAudioConfig() AudioConfig {
|
||||
mr.mu.RLock()
|
||||
defer mr.mu.RUnlock()
|
||||
return mr.audioConfig
|
||||
}
|
||||
|
||||
// GetMicrophoneConfig returns the current microphone configuration
|
||||
func (mr *MetricsRegistry) GetMicrophoneConfig() AudioConfig {
|
||||
mr.mu.RLock()
|
||||
defer mr.mu.RUnlock()
|
||||
return mr.microphoneConfig
|
||||
}
|
||||
|
||||
// GetLastUpdate returns the timestamp of the last metrics update
|
||||
func (mr *MetricsRegistry) GetLastUpdate() time.Time {
|
||||
timestamp := atomic.LoadInt64(&mr.lastUpdate)
|
||||
return time.Unix(timestamp, 0)
|
||||
}
|
||||
|
||||
// StartMetricsCollector starts a background goroutine to collect metrics
|
||||
func (mr *MetricsRegistry) StartMetricsCollector() {
|
||||
go func() {
|
||||
ticker := time.NewTicker(1 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for range ticker.C {
|
||||
// Collect from session-specific manager if available
|
||||
if sessionProvider := GetSessionProvider(); sessionProvider != nil && sessionProvider.IsSessionActive() {
|
||||
if inputManager := sessionProvider.GetAudioInputManager(); inputManager != nil {
|
||||
metrics := inputManager.GetMetrics()
|
||||
mr.UpdateAudioInputMetrics(metrics)
|
||||
}
|
||||
} else {
|
||||
// Fallback to global manager if no session is active
|
||||
globalManager := getAudioInputManager()
|
||||
metrics := globalManager.GetMetrics()
|
||||
mr.UpdateAudioInputMetrics(metrics)
|
||||
}
|
||||
|
||||
// Collect audio output metrics directly from global metrics variable to avoid circular dependency
|
||||
audioMetrics := AudioMetrics{
|
||||
FramesReceived: atomic.LoadInt64(&metrics.FramesReceived),
|
||||
FramesDropped: atomic.LoadInt64(&metrics.FramesDropped),
|
||||
BytesProcessed: atomic.LoadInt64(&metrics.BytesProcessed),
|
||||
ConnectionDrops: atomic.LoadInt64(&metrics.ConnectionDrops),
|
||||
LastFrameTime: metrics.LastFrameTime,
|
||||
AverageLatency: metrics.AverageLatency,
|
||||
}
|
||||
mr.UpdateAudioMetrics(audioMetrics)
|
||||
|
||||
// Collect configuration directly from global variables to avoid circular dependency
|
||||
mr.UpdateAudioConfig(currentConfig)
|
||||
mr.UpdateMicrophoneConfig(currentMicrophoneConfig)
|
||||
}
|
||||
}()
|
||||
}
|
|
@ -0,0 +1,211 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/jetkvm/kvm/internal/logging"
|
||||
)
|
||||
|
||||
// AudioOutputIPCManager manages audio output using IPC when enabled
|
||||
type AudioOutputIPCManager struct {
|
||||
*BaseAudioManager
|
||||
server *AudioOutputServer
|
||||
}
|
||||
|
||||
// NewAudioOutputIPCManager creates a new IPC-based audio output manager
|
||||
func NewAudioOutputIPCManager() *AudioOutputIPCManager {
|
||||
return &AudioOutputIPCManager{
|
||||
BaseAudioManager: NewBaseAudioManager(logging.GetDefaultLogger().With().Str("component", AudioOutputIPCComponent).Logger()),
|
||||
}
|
||||
}
|
||||
|
||||
// Start initializes and starts the audio output IPC manager
|
||||
func (aom *AudioOutputIPCManager) Start() error {
|
||||
aom.logComponentStart(AudioOutputIPCComponent)
|
||||
|
||||
// Create and start the IPC server
|
||||
server, err := NewAudioOutputServer()
|
||||
if err != nil {
|
||||
aom.logComponentError(AudioOutputIPCComponent, err, "failed to create IPC server")
|
||||
return err
|
||||
}
|
||||
|
||||
if err := server.Start(); err != nil {
|
||||
aom.logComponentError(AudioOutputIPCComponent, err, "failed to start IPC server")
|
||||
return err
|
||||
}
|
||||
|
||||
aom.server = server
|
||||
aom.setRunning(true)
|
||||
aom.logComponentStarted(AudioOutputIPCComponent)
|
||||
|
||||
// Send initial configuration
|
||||
config := OutputIPCConfig{
|
||||
SampleRate: GetConfig().SampleRate,
|
||||
Channels: GetConfig().Channels,
|
||||
FrameSize: int(GetConfig().AudioQualityMediumFrameSize.Milliseconds()),
|
||||
}
|
||||
|
||||
if err := aom.SendConfig(config); err != nil {
|
||||
aom.logger.Warn().Err(err).Msg("Failed to send initial configuration")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop gracefully shuts down the audio output IPC manager
|
||||
func (aom *AudioOutputIPCManager) Stop() {
|
||||
aom.logComponentStop(AudioOutputIPCComponent)
|
||||
|
||||
if aom.server != nil {
|
||||
aom.server.Stop()
|
||||
aom.server = nil
|
||||
}
|
||||
|
||||
aom.setRunning(false)
|
||||
aom.resetMetrics()
|
||||
aom.logComponentStopped(AudioOutputIPCComponent)
|
||||
}
|
||||
|
||||
// resetMetrics resets all metrics to zero
|
||||
func (aom *AudioOutputIPCManager) resetMetrics() {
|
||||
aom.BaseAudioManager.resetMetrics()
|
||||
}
|
||||
|
||||
// WriteOpusFrame sends an Opus frame to the output server
|
||||
func (aom *AudioOutputIPCManager) WriteOpusFrame(frame *ZeroCopyAudioFrame) error {
|
||||
if !aom.IsRunning() {
|
||||
return fmt.Errorf("audio output IPC manager not running")
|
||||
}
|
||||
|
||||
if aom.server == nil {
|
||||
return fmt.Errorf("audio output server not initialized")
|
||||
}
|
||||
|
||||
// Validate frame before processing
|
||||
if err := ValidateZeroCopyFrame(frame); err != nil {
|
||||
aom.logComponentError(AudioOutputIPCComponent, err, "Frame validation failed")
|
||||
return fmt.Errorf("output frame validation failed: %w", err)
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
|
||||
// Send frame to IPC server
|
||||
if err := aom.server.SendFrame(frame.Data()); err != nil {
|
||||
aom.recordFrameDropped()
|
||||
return err
|
||||
}
|
||||
|
||||
// Update metrics
|
||||
processingTime := time.Since(start)
|
||||
aom.recordFrameProcessed(frame.Length())
|
||||
aom.updateLatency(processingTime)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteOpusFrameZeroCopy writes an Opus audio frame using zero-copy optimization
|
||||
func (aom *AudioOutputIPCManager) WriteOpusFrameZeroCopy(frame *ZeroCopyAudioFrame) error {
|
||||
if !aom.IsRunning() {
|
||||
return fmt.Errorf("audio output IPC manager not running")
|
||||
}
|
||||
|
||||
if aom.server == nil {
|
||||
return fmt.Errorf("audio output server not initialized")
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
|
||||
// Extract frame data
|
||||
frameData := frame.Data()
|
||||
|
||||
// Send frame to IPC server (zero-copy not available, use regular send)
|
||||
if err := aom.server.SendFrame(frameData); err != nil {
|
||||
aom.recordFrameDropped()
|
||||
return err
|
||||
}
|
||||
|
||||
// Update metrics
|
||||
processingTime := time.Since(start)
|
||||
aom.recordFrameProcessed(len(frameData))
|
||||
aom.updateLatency(processingTime)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsReady returns true if the IPC manager is ready to process frames
|
||||
func (aom *AudioOutputIPCManager) IsReady() bool {
|
||||
return aom.IsRunning() && aom.server != nil
|
||||
}
|
||||
|
||||
// GetMetrics returns current audio output metrics
|
||||
func (aom *AudioOutputIPCManager) GetMetrics() AudioOutputMetrics {
|
||||
baseMetrics := aom.getBaseMetrics()
|
||||
return AudioOutputMetrics{
|
||||
FramesReceived: atomic.LoadInt64(&baseMetrics.FramesProcessed), // For output, processed = received
|
||||
BaseAudioMetrics: baseMetrics,
|
||||
}
|
||||
}
|
||||
|
||||
// GetDetailedMetrics returns detailed metrics including server statistics
|
||||
func (aom *AudioOutputIPCManager) GetDetailedMetrics() (AudioOutputMetrics, map[string]interface{}) {
|
||||
metrics := aom.GetMetrics()
|
||||
detailed := make(map[string]interface{})
|
||||
|
||||
if aom.server != nil {
|
||||
total, dropped, bufferSize := aom.server.GetServerStats()
|
||||
detailed["server_total_frames"] = total
|
||||
detailed["server_dropped_frames"] = dropped
|
||||
detailed["server_buffer_size"] = bufferSize
|
||||
detailed["server_frame_rate"] = aom.calculateFrameRate()
|
||||
}
|
||||
|
||||
return metrics, detailed
|
||||
}
|
||||
|
||||
// calculateFrameRate calculates the current frame processing rate
|
||||
func (aom *AudioOutputIPCManager) calculateFrameRate() float64 {
|
||||
baseMetrics := aom.getBaseMetrics()
|
||||
framesProcessed := atomic.LoadInt64(&baseMetrics.FramesProcessed)
|
||||
if framesProcessed == 0 {
|
||||
return 0.0
|
||||
}
|
||||
|
||||
// Calculate rate based on last frame time
|
||||
baseMetrics = aom.getBaseMetrics()
|
||||
if baseMetrics.LastFrameTime.IsZero() {
|
||||
return 0.0
|
||||
}
|
||||
|
||||
elapsed := time.Since(baseMetrics.LastFrameTime)
|
||||
if elapsed.Seconds() == 0 {
|
||||
return 0.0
|
||||
}
|
||||
|
||||
return float64(framesProcessed) / elapsed.Seconds()
|
||||
}
|
||||
|
||||
// SendConfig sends configuration to the IPC server
|
||||
func (aom *AudioOutputIPCManager) SendConfig(config OutputIPCConfig) error {
|
||||
if aom.server == nil {
|
||||
return fmt.Errorf("audio output server not initialized")
|
||||
}
|
||||
|
||||
// Validate configuration parameters
|
||||
if err := ValidateOutputIPCConfig(config.SampleRate, config.Channels, config.FrameSize); err != nil {
|
||||
aom.logger.Error().Err(err).Msg("Configuration validation failed")
|
||||
return fmt.Errorf("output configuration validation failed: %w", err)
|
||||
}
|
||||
|
||||
// Note: AudioOutputServer doesn't have SendConfig method yet
|
||||
// This is a placeholder for future implementation
|
||||
aom.logger.Info().Interface("config", config).Msg("configuration received")
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetServer returns the underlying IPC server (for testing)
|
||||
func (aom *AudioOutputIPCManager) GetServer() *AudioOutputServer {
|
||||
return aom.server
|
||||
}
|
|
@ -2,60 +2,56 @@ package audio
|
|||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/jetkvm/kvm/internal/logging"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// AudioOutputManager manages audio output stream using IPC mode
|
||||
type AudioOutputManager struct {
|
||||
metrics AudioOutputMetrics
|
||||
|
||||
streamer *AudioOutputStreamer
|
||||
logger zerolog.Logger
|
||||
running int32
|
||||
*BaseAudioManager
|
||||
streamer *AudioOutputStreamer
|
||||
framesReceived int64 // Output-specific metric
|
||||
}
|
||||
|
||||
// AudioOutputMetrics tracks output-specific metrics
|
||||
// Atomic fields MUST be first for ARM32 alignment (int64 fields need 8-byte alignment)
|
||||
type AudioOutputMetrics struct {
|
||||
FramesReceived int64
|
||||
FramesDropped int64
|
||||
BytesProcessed int64
|
||||
ConnectionDrops int64
|
||||
LastFrameTime time.Time
|
||||
AverageLatency time.Duration
|
||||
// Atomic int64 field first for proper ARM32 alignment
|
||||
FramesReceived int64 `json:"frames_received"` // Total frames received (output-specific)
|
||||
|
||||
// Embedded struct with atomic fields properly aligned
|
||||
BaseAudioMetrics
|
||||
}
|
||||
|
||||
// NewAudioOutputManager creates a new audio output manager
|
||||
func NewAudioOutputManager() *AudioOutputManager {
|
||||
logger := logging.GetDefaultLogger().With().Str("component", AudioOutputManagerComponent).Logger()
|
||||
streamer, err := NewAudioOutputStreamer()
|
||||
if err != nil {
|
||||
// Log error but continue with nil streamer - will be handled gracefully
|
||||
logger := logging.GetDefaultLogger().With().Str("component", AudioOutputManagerComponent).Logger()
|
||||
logger.Error().Err(err).Msg("Failed to create audio output streamer")
|
||||
}
|
||||
|
||||
return &AudioOutputManager{
|
||||
streamer: streamer,
|
||||
logger: logging.GetDefaultLogger().With().Str("component", AudioOutputManagerComponent).Logger(),
|
||||
BaseAudioManager: NewBaseAudioManager(logger),
|
||||
streamer: streamer,
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts the audio output manager
|
||||
func (aom *AudioOutputManager) Start() error {
|
||||
if !atomic.CompareAndSwapInt32(&aom.running, 0, 1) {
|
||||
if !aom.setRunning(true) {
|
||||
return nil // Already running
|
||||
}
|
||||
|
||||
aom.logger.Info().Str("component", AudioOutputManagerComponent).Msg("starting component")
|
||||
aom.logComponentStart(AudioOutputManagerComponent)
|
||||
|
||||
if aom.streamer == nil {
|
||||
// Try to recreate streamer if it was nil
|
||||
streamer, err := NewAudioOutputStreamer()
|
||||
if err != nil {
|
||||
atomic.StoreInt32(&aom.running, 0)
|
||||
aom.logger.Error().Err(err).Str("component", AudioOutputManagerComponent).Msg("failed to create audio output streamer")
|
||||
aom.setRunning(false)
|
||||
aom.logComponentError(AudioOutputManagerComponent, err, "failed to create audio output streamer")
|
||||
return err
|
||||
}
|
||||
aom.streamer = streamer
|
||||
|
@ -63,44 +59,39 @@ func (aom *AudioOutputManager) Start() error {
|
|||
|
||||
err := aom.streamer.Start()
|
||||
if err != nil {
|
||||
atomic.StoreInt32(&aom.running, 0)
|
||||
aom.setRunning(false)
|
||||
// Reset metrics on failed start
|
||||
aom.resetMetrics()
|
||||
aom.logger.Error().Err(err).Str("component", AudioOutputManagerComponent).Msg("failed to start component")
|
||||
aom.logComponentError(AudioOutputManagerComponent, err, "failed to start component")
|
||||
return err
|
||||
}
|
||||
|
||||
aom.logger.Info().Str("component", AudioOutputManagerComponent).Msg("component started successfully")
|
||||
aom.logComponentStarted(AudioOutputManagerComponent)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop stops the audio output manager
|
||||
func (aom *AudioOutputManager) Stop() {
|
||||
if !atomic.CompareAndSwapInt32(&aom.running, 1, 0) {
|
||||
if !aom.setRunning(false) {
|
||||
return // Already stopped
|
||||
}
|
||||
|
||||
aom.logger.Info().Str("component", AudioOutputManagerComponent).Msg("stopping component")
|
||||
aom.logComponentStop(AudioOutputManagerComponent)
|
||||
|
||||
if aom.streamer != nil {
|
||||
aom.streamer.Stop()
|
||||
}
|
||||
|
||||
aom.logger.Info().Str("component", AudioOutputManagerComponent).Msg("component stopped")
|
||||
aom.logComponentStopped(AudioOutputManagerComponent)
|
||||
}
|
||||
|
||||
// resetMetrics resets all metrics to zero
|
||||
func (aom *AudioOutputManager) resetMetrics() {
|
||||
atomic.StoreInt64(&aom.metrics.FramesReceived, 0)
|
||||
atomic.StoreInt64(&aom.metrics.FramesDropped, 0)
|
||||
atomic.StoreInt64(&aom.metrics.BytesProcessed, 0)
|
||||
atomic.StoreInt64(&aom.metrics.ConnectionDrops, 0)
|
||||
aom.BaseAudioManager.resetMetrics()
|
||||
atomic.StoreInt64(&aom.framesReceived, 0)
|
||||
}
|
||||
|
||||
// IsRunning returns whether the audio output manager is running
|
||||
func (aom *AudioOutputManager) IsRunning() bool {
|
||||
return atomic.LoadInt32(&aom.running) == 1
|
||||
}
|
||||
// Note: IsRunning() is inherited from BaseAudioManager
|
||||
|
||||
// IsReady returns whether the audio output manager is ready to receive frames
|
||||
func (aom *AudioOutputManager) IsReady() bool {
|
||||
|
@ -115,12 +106,8 @@ func (aom *AudioOutputManager) IsReady() bool {
|
|||
// GetMetrics returns current metrics
|
||||
func (aom *AudioOutputManager) GetMetrics() AudioOutputMetrics {
|
||||
return AudioOutputMetrics{
|
||||
FramesReceived: atomic.LoadInt64(&aom.metrics.FramesReceived),
|
||||
FramesDropped: atomic.LoadInt64(&aom.metrics.FramesDropped),
|
||||
BytesProcessed: atomic.LoadInt64(&aom.metrics.BytesProcessed),
|
||||
ConnectionDrops: atomic.LoadInt64(&aom.metrics.ConnectionDrops),
|
||||
AverageLatency: aom.metrics.AverageLatency,
|
||||
LastFrameTime: aom.metrics.LastFrameTime,
|
||||
FramesReceived: atomic.LoadInt64(&aom.framesReceived),
|
||||
BaseAudioMetrics: aom.getBaseMetrics(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -131,6 +118,7 @@ func (aom *AudioOutputManager) GetComprehensiveMetrics() map[string]interface{}
|
|||
comprehensiveMetrics := map[string]interface{}{
|
||||
"manager": map[string]interface{}{
|
||||
"frames_received": baseMetrics.FramesReceived,
|
||||
"frames_processed": baseMetrics.FramesProcessed,
|
||||
"frames_dropped": baseMetrics.FramesDropped,
|
||||
"bytes_processed": baseMetrics.BytesProcessed,
|
||||
"connection_drops": baseMetrics.ConnectionDrops,
|
||||
|
|
|
@ -14,7 +14,10 @@ import (
|
|||
// This should be called from main() when the subprocess is detected
|
||||
func RunAudioOutputServer() error {
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "audio-output-server").Logger()
|
||||
logger.Info().Msg("Starting audio output server subprocess")
|
||||
logger.Debug().Msg("audio output server subprocess starting")
|
||||
|
||||
// Initialize validation cache for optimal performance
|
||||
InitValidationCache()
|
||||
|
||||
// Create audio server
|
||||
server, err := NewAudioOutputServer()
|
||||
|
@ -42,7 +45,7 @@ func RunAudioOutputServer() error {
|
|||
return err
|
||||
}
|
||||
|
||||
logger.Info().Msg("Audio output server started, waiting for connections")
|
||||
logger.Debug().Msg("audio output server started, waiting for connections")
|
||||
|
||||
// Set up signal handling for graceful shutdown
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
@ -54,18 +57,18 @@ func RunAudioOutputServer() error {
|
|||
// Wait for shutdown signal
|
||||
select {
|
||||
case sig := <-sigChan:
|
||||
logger.Info().Str("signal", sig.String()).Msg("Received shutdown signal")
|
||||
logger.Info().Str("signal", sig.String()).Msg("received shutdown signal")
|
||||
case <-ctx.Done():
|
||||
logger.Info().Msg("Context cancelled")
|
||||
logger.Debug().Msg("context cancelled")
|
||||
}
|
||||
|
||||
// Graceful shutdown
|
||||
logger.Info().Msg("Shutting down audio output server")
|
||||
logger.Debug().Msg("shutting down audio output server")
|
||||
StopNonBlockingAudioStreaming()
|
||||
|
||||
// Give some time for cleanup
|
||||
time.Sleep(GetConfig().DefaultSleepDuration)
|
||||
|
||||
logger.Info().Msg("Audio output server subprocess stopped")
|
||||
logger.Debug().Msg("audio output server subprocess stopped")
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -391,6 +391,14 @@ func StartAudioOutputStreaming(send func([]byte)) error {
|
|||
frame := GetAudioFrameBuffer()
|
||||
frame = frame[:n] // Resize to actual frame size
|
||||
copy(frame, buffer[:n])
|
||||
|
||||
// Validate frame before sending
|
||||
if err := ValidateAudioFrame(frame); err != nil {
|
||||
getOutputStreamingLogger().Warn().Err(err).Msg("Frame validation failed, dropping frame")
|
||||
PutAudioFrameBuffer(frame)
|
||||
continue
|
||||
}
|
||||
|
||||
send(frame)
|
||||
// Return buffer to pool after sending
|
||||
PutAudioFrameBuffer(frame)
|
||||
|
|
|
@ -0,0 +1,393 @@
|
|||
//go:build cgo
|
||||
// +build cgo
|
||||
|
||||
package audio
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestPerformanceCriticalPaths tests the most frequently executed code paths
|
||||
// to ensure they remain efficient and don't interfere with KVM functionality
|
||||
func TestPerformanceCriticalPaths(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping performance tests in short mode")
|
||||
}
|
||||
|
||||
// Initialize validation cache for performance testing
|
||||
InitValidationCache()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
testFunc func(t *testing.T)
|
||||
}{
|
||||
{"AudioFrameProcessingLatency", testAudioFrameProcessingLatency},
|
||||
{"MetricsUpdateOverhead", testMetricsUpdateOverhead},
|
||||
{"ConfigurationAccessSpeed", testConfigurationAccessSpeed},
|
||||
{"ValidationFunctionSpeed", testValidationFunctionSpeed},
|
||||
{"MemoryAllocationPatterns", testMemoryAllocationPatterns},
|
||||
{"ConcurrentAccessPerformance", testConcurrentAccessPerformance},
|
||||
{"BufferPoolEfficiency", testBufferPoolEfficiency},
|
||||
{"AtomicOperationOverhead", testAtomicOperationOverhead},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.testFunc(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// testAudioFrameProcessingLatency tests the latency of audio frame processing
|
||||
// This is the most critical path that must not interfere with KVM
|
||||
func testAudioFrameProcessingLatency(t *testing.T) {
|
||||
const (
|
||||
frameCount = 1000
|
||||
maxLatencyPerFrame = 100 * time.Microsecond // Very strict requirement
|
||||
)
|
||||
|
||||
// Create test frame data
|
||||
frameData := make([]byte, 1920) // Typical frame size
|
||||
for i := range frameData {
|
||||
frameData[i] = byte(i % 256)
|
||||
}
|
||||
|
||||
// Measure frame processing latency
|
||||
start := time.Now()
|
||||
for i := 0; i < frameCount; i++ {
|
||||
// Simulate the critical path: validation + metrics update
|
||||
err := ValidateAudioFrame(frameData)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Record frame received (atomic operation)
|
||||
RecordFrameReceived(len(frameData))
|
||||
}
|
||||
elapsed := time.Since(start)
|
||||
|
||||
avgLatencyPerFrame := elapsed / frameCount
|
||||
t.Logf("Average frame processing latency: %v", avgLatencyPerFrame)
|
||||
|
||||
// Ensure frame processing is fast enough to not interfere with KVM
|
||||
assert.Less(t, avgLatencyPerFrame, maxLatencyPerFrame,
|
||||
"Frame processing latency %v exceeds maximum %v - may interfere with KVM",
|
||||
avgLatencyPerFrame, maxLatencyPerFrame)
|
||||
|
||||
// Ensure total processing time is reasonable
|
||||
maxTotalTime := 50 * time.Millisecond
|
||||
assert.Less(t, elapsed, maxTotalTime,
|
||||
"Total processing time %v exceeds maximum %v", elapsed, maxTotalTime)
|
||||
}
|
||||
|
||||
// testMetricsUpdateOverhead tests the overhead of metrics updates
|
||||
func testMetricsUpdateOverhead(t *testing.T) {
|
||||
const iterations = 10000
|
||||
|
||||
// Test RecordFrameReceived performance
|
||||
start := time.Now()
|
||||
for i := 0; i < iterations; i++ {
|
||||
RecordFrameReceived(1024)
|
||||
}
|
||||
recordLatency := time.Since(start) / iterations
|
||||
|
||||
// Test GetAudioMetrics performance
|
||||
start = time.Now()
|
||||
for i := 0; i < iterations; i++ {
|
||||
_ = GetAudioMetrics()
|
||||
}
|
||||
getLatency := time.Since(start) / iterations
|
||||
|
||||
t.Logf("RecordFrameReceived latency: %v", recordLatency)
|
||||
t.Logf("GetAudioMetrics latency: %v", getLatency)
|
||||
|
||||
// Metrics operations should be optimized for JetKVM's ARM Cortex-A7 @ 1GHz
|
||||
// With 256MB RAM, we need to be conservative with performance expectations
|
||||
assert.Less(t, recordLatency, 50*time.Microsecond, "RecordFrameReceived too slow")
|
||||
assert.Less(t, getLatency, 20*time.Microsecond, "GetAudioMetrics too slow")
|
||||
}
|
||||
|
||||
// testConfigurationAccessSpeed tests configuration access performance
|
||||
func testConfigurationAccessSpeed(t *testing.T) {
|
||||
const iterations = 10000
|
||||
|
||||
// Test GetAudioConfig performance
|
||||
start := time.Now()
|
||||
for i := 0; i < iterations; i++ {
|
||||
_ = GetAudioConfig()
|
||||
}
|
||||
configLatency := time.Since(start) / iterations
|
||||
|
||||
// Test GetConfig performance
|
||||
start = time.Now()
|
||||
for i := 0; i < iterations; i++ {
|
||||
_ = GetConfig()
|
||||
}
|
||||
constantsLatency := time.Since(start) / iterations
|
||||
|
||||
t.Logf("GetAudioConfig latency: %v", configLatency)
|
||||
t.Logf("GetConfig latency: %v", constantsLatency)
|
||||
|
||||
// Configuration access should be very fast
|
||||
assert.Less(t, configLatency, 100*time.Nanosecond, "GetAudioConfig too slow")
|
||||
assert.Less(t, constantsLatency, 100*time.Nanosecond, "GetConfig too slow")
|
||||
}
|
||||
|
||||
// testValidationFunctionSpeed tests validation function performance
|
||||
func testValidationFunctionSpeed(t *testing.T) {
|
||||
const iterations = 10000
|
||||
frameData := make([]byte, 1920)
|
||||
|
||||
// Test ValidateAudioFrame (most critical)
|
||||
start := time.Now()
|
||||
for i := 0; i < iterations; i++ {
|
||||
err := ValidateAudioFrame(frameData)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
fastValidationLatency := time.Since(start) / iterations
|
||||
|
||||
// Test ValidateAudioQuality
|
||||
start = time.Now()
|
||||
for i := 0; i < iterations; i++ {
|
||||
err := ValidateAudioQuality(AudioQualityMedium)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
qualityValidationLatency := time.Since(start) / iterations
|
||||
|
||||
// Test ValidateBufferSize
|
||||
start = time.Now()
|
||||
for i := 0; i < iterations; i++ {
|
||||
err := ValidateBufferSize(1024)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
bufferValidationLatency := time.Since(start) / iterations
|
||||
|
||||
t.Logf("ValidateAudioFrame latency: %v", fastValidationLatency)
|
||||
t.Logf("ValidateAudioQuality latency: %v", qualityValidationLatency)
|
||||
t.Logf("ValidateBufferSize latency: %v", bufferValidationLatency)
|
||||
|
||||
// Validation functions optimized for ARM Cortex-A7 single core @ 1GHz
|
||||
// Conservative thresholds to ensure KVM functionality isn't impacted
|
||||
assert.Less(t, fastValidationLatency, 100*time.Microsecond, "ValidateAudioFrame too slow")
|
||||
assert.Less(t, qualityValidationLatency, 50*time.Microsecond, "ValidateAudioQuality too slow")
|
||||
assert.Less(t, bufferValidationLatency, 50*time.Microsecond, "ValidateBufferSize too slow")
|
||||
}
|
||||
|
||||
// testMemoryAllocationPatterns tests memory allocation efficiency
|
||||
func testMemoryAllocationPatterns(t *testing.T) {
|
||||
// Test that frequent operations don't cause excessive allocations
|
||||
var m1, m2 runtime.MemStats
|
||||
runtime.GC()
|
||||
runtime.ReadMemStats(&m1)
|
||||
|
||||
// Perform operations that should minimize allocations
|
||||
for i := 0; i < 1000; i++ {
|
||||
_ = GetAudioConfig()
|
||||
_ = GetAudioMetrics()
|
||||
RecordFrameReceived(1024)
|
||||
_ = ValidateAudioQuality(AudioQualityMedium)
|
||||
}
|
||||
|
||||
runtime.GC()
|
||||
runtime.ReadMemStats(&m2)
|
||||
|
||||
allocations := m2.Mallocs - m1.Mallocs
|
||||
t.Logf("Memory allocations for 1000 operations: %d", allocations)
|
||||
|
||||
// Should have minimal allocations for these hot path operations
|
||||
assert.Less(t, allocations, uint64(100), "Too many memory allocations in hot path")
|
||||
}
|
||||
|
||||
// testConcurrentAccessPerformance tests performance under concurrent access
|
||||
func testConcurrentAccessPerformance(t *testing.T) {
|
||||
const (
|
||||
numGoroutines = 10
|
||||
operationsPerGoroutine = 1000
|
||||
)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
start := time.Now()
|
||||
|
||||
// Launch concurrent goroutines performing audio operations
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
frameData := make([]byte, 1920)
|
||||
|
||||
for j := 0; j < operationsPerGoroutine; j++ {
|
||||
// Simulate concurrent audio processing
|
||||
_ = ValidateAudioFrame(frameData)
|
||||
RecordFrameReceived(len(frameData))
|
||||
_ = GetAudioMetrics()
|
||||
_ = GetAudioConfig()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
elapsed := time.Since(start)
|
||||
|
||||
totalOperations := numGoroutines * operationsPerGoroutine * 4 // 4 operations per iteration
|
||||
avgLatency := elapsed / time.Duration(totalOperations)
|
||||
|
||||
t.Logf("Concurrent access: %d operations in %v (avg: %v per operation)",
|
||||
totalOperations, elapsed, avgLatency)
|
||||
|
||||
// Concurrent access should not significantly degrade performance
|
||||
assert.Less(t, avgLatency, 1*time.Microsecond, "Concurrent access too slow")
|
||||
}
|
||||
|
||||
// testBufferPoolEfficiency tests buffer pool performance
|
||||
func testBufferPoolEfficiency(t *testing.T) {
|
||||
// Test buffer acquisition and release performance
|
||||
const iterations = 1000
|
||||
|
||||
start := time.Now()
|
||||
for i := 0; i < iterations; i++ {
|
||||
// Simulate buffer pool usage (if available)
|
||||
buffer := make([]byte, 1920) // Fallback to allocation
|
||||
_ = buffer
|
||||
// In real implementation, this would be pool.Get() and pool.Put()
|
||||
}
|
||||
elapsed := time.Since(start)
|
||||
|
||||
avgLatency := elapsed / iterations
|
||||
t.Logf("Buffer allocation latency: %v per buffer", avgLatency)
|
||||
|
||||
// Buffer operations should be fast
|
||||
assert.Less(t, avgLatency, 1*time.Microsecond, "Buffer allocation too slow")
|
||||
}
|
||||
|
||||
// testAtomicOperationOverhead tests atomic operation performance
|
||||
func testAtomicOperationOverhead(t *testing.T) {
|
||||
const iterations = 10000
|
||||
var counter int64
|
||||
|
||||
// Test atomic increment performance
|
||||
start := time.Now()
|
||||
for i := 0; i < iterations; i++ {
|
||||
atomic.AddInt64(&counter, 1)
|
||||
}
|
||||
atomicLatency := time.Since(start) / iterations
|
||||
|
||||
// Test atomic load performance
|
||||
start = time.Now()
|
||||
for i := 0; i < iterations; i++ {
|
||||
_ = atomic.LoadInt64(&counter)
|
||||
}
|
||||
loadLatency := time.Since(start) / iterations
|
||||
|
||||
t.Logf("Atomic add latency: %v", atomicLatency)
|
||||
t.Logf("Atomic load latency: %v", loadLatency)
|
||||
|
||||
// Atomic operations on ARM Cortex-A7 - realistic expectations
|
||||
assert.Less(t, atomicLatency, 1*time.Microsecond, "Atomic add too slow")
|
||||
assert.Less(t, loadLatency, 500*time.Nanosecond, "Atomic load too slow")
|
||||
}
|
||||
|
||||
// TestRegressionDetection tests for performance regressions
|
||||
func TestRegressionDetection(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping regression test in short mode")
|
||||
}
|
||||
|
||||
// Baseline performance expectations
|
||||
baselines := map[string]time.Duration{
|
||||
"frame_processing": 100 * time.Microsecond,
|
||||
"metrics_update": 500 * time.Nanosecond,
|
||||
"config_access": 100 * time.Nanosecond,
|
||||
"validation": 200 * time.Nanosecond,
|
||||
}
|
||||
|
||||
// Test frame processing
|
||||
frameData := make([]byte, 1920)
|
||||
start := time.Now()
|
||||
for i := 0; i < 100; i++ {
|
||||
_ = ValidateAudioFrame(frameData)
|
||||
RecordFrameReceived(len(frameData))
|
||||
}
|
||||
frameProcessingTime := time.Since(start) / 100
|
||||
|
||||
// Test metrics update
|
||||
start = time.Now()
|
||||
for i := 0; i < 1000; i++ {
|
||||
RecordFrameReceived(1024)
|
||||
}
|
||||
metricsUpdateTime := time.Since(start) / 1000
|
||||
|
||||
// Test config access
|
||||
start = time.Now()
|
||||
for i := 0; i < 1000; i++ {
|
||||
_ = GetAudioConfig()
|
||||
}
|
||||
configAccessTime := time.Since(start) / 1000
|
||||
|
||||
// Test validation
|
||||
start = time.Now()
|
||||
for i := 0; i < 1000; i++ {
|
||||
_ = ValidateAudioQuality(AudioQualityMedium)
|
||||
}
|
||||
validationTime := time.Since(start) / 1000
|
||||
|
||||
// Performance regression thresholds for JetKVM hardware:
|
||||
// - ARM Cortex-A7 @ 1GHz single core
|
||||
// - 256MB DDR3L RAM
|
||||
// - Must not interfere with primary KVM functionality
|
||||
assert.Less(t, frameProcessingTime, baselines["frame_processing"],
|
||||
"Frame processing regression: %v > %v", frameProcessingTime, baselines["frame_processing"])
|
||||
assert.Less(t, metricsUpdateTime, 100*time.Microsecond,
|
||||
"Metrics update regression: %v > 100μs", metricsUpdateTime)
|
||||
assert.Less(t, configAccessTime, 10*time.Microsecond,
|
||||
"Config access regression: %v > 10μs", configAccessTime)
|
||||
assert.Less(t, validationTime, 10*time.Microsecond,
|
||||
"Validation regression: %v > 10μs", validationTime)
|
||||
|
||||
t.Logf("Performance results:")
|
||||
t.Logf(" Frame processing: %v (baseline: %v)", frameProcessingTime, baselines["frame_processing"])
|
||||
t.Logf(" Metrics update: %v (baseline: %v)", metricsUpdateTime, baselines["metrics_update"])
|
||||
t.Logf(" Config access: %v (baseline: %v)", configAccessTime, baselines["config_access"])
|
||||
t.Logf(" Validation: %v (baseline: %v)", validationTime, baselines["validation"])
|
||||
}
|
||||
|
||||
// TestMemoryLeakDetection tests for memory leaks in critical paths
|
||||
func TestMemoryLeakDetection(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping memory leak test in short mode")
|
||||
}
|
||||
|
||||
var m1, m2 runtime.MemStats
|
||||
|
||||
// Baseline measurement
|
||||
runtime.GC()
|
||||
runtime.ReadMemStats(&m1)
|
||||
|
||||
// Perform many operations that should not leak memory
|
||||
for cycle := 0; cycle < 10; cycle++ {
|
||||
for i := 0; i < 1000; i++ {
|
||||
frameData := make([]byte, 1920)
|
||||
_ = ValidateAudioFrame(frameData)
|
||||
RecordFrameReceived(len(frameData))
|
||||
_ = GetAudioMetrics()
|
||||
_ = GetAudioConfig()
|
||||
}
|
||||
// Force garbage collection between cycles
|
||||
runtime.GC()
|
||||
}
|
||||
|
||||
// Final measurement
|
||||
runtime.GC()
|
||||
runtime.ReadMemStats(&m2)
|
||||
|
||||
memoryGrowth := int64(m2.Alloc) - int64(m1.Alloc)
|
||||
t.Logf("Memory growth after 10,000 operations: %d bytes", memoryGrowth)
|
||||
|
||||
// Memory growth should be minimal (less than 1MB)
|
||||
assert.Less(t, memoryGrowth, int64(1024*1024),
|
||||
"Excessive memory growth detected: %d bytes", memoryGrowth)
|
||||
}
|
|
@ -69,13 +69,13 @@ func (ps *PriorityScheduler) SetThreadPriority(priority int, policy int) error {
|
|||
// If we can't set real-time priority, try nice value instead
|
||||
schedNormal, _, _ := getSchedulingPolicies()
|
||||
if policy != schedNormal {
|
||||
ps.logger.Warn().Int("errno", int(errno)).Msg("Failed to set real-time priority, falling back to nice")
|
||||
ps.logger.Warn().Int("errno", int(errno)).Msg("failed to set real-time priority, falling back to nice")
|
||||
return ps.setNicePriority(priority)
|
||||
}
|
||||
return errno
|
||||
}
|
||||
|
||||
ps.logger.Debug().Int("tid", tid).Int("priority", priority).Int("policy", policy).Msg("Thread priority set")
|
||||
ps.logger.Debug().Int("tid", tid).Int("priority", priority).Int("policy", policy).Msg("thread priority set")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -93,11 +93,11 @@ func (ps *PriorityScheduler) setNicePriority(rtPriority int) error {
|
|||
|
||||
err := syscall.Setpriority(syscall.PRIO_PROCESS, 0, niceValue)
|
||||
if err != nil {
|
||||
ps.logger.Warn().Err(err).Int("nice", niceValue).Msg("Failed to set nice priority")
|
||||
ps.logger.Warn().Err(err).Int("nice", niceValue).Msg("failed to set nice priority")
|
||||
return err
|
||||
}
|
||||
|
||||
ps.logger.Debug().Int("nice", niceValue).Msg("Nice priority set as fallback")
|
||||
ps.logger.Debug().Int("nice", niceValue).Msg("nice priority set as fallback")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -132,13 +132,13 @@ func (ps *PriorityScheduler) ResetPriority() error {
|
|||
// Disable disables priority scheduling (useful for testing or fallback)
|
||||
func (ps *PriorityScheduler) Disable() {
|
||||
ps.enabled = false
|
||||
ps.logger.Info().Msg("Priority scheduling disabled")
|
||||
ps.logger.Debug().Msg("priority scheduling disabled")
|
||||
}
|
||||
|
||||
// Enable enables priority scheduling
|
||||
func (ps *PriorityScheduler) Enable() {
|
||||
ps.enabled = true
|
||||
ps.logger.Info().Msg("Priority scheduling enabled")
|
||||
ps.logger.Debug().Msg("priority scheduling enabled")
|
||||
}
|
||||
|
||||
// Global priority scheduler instance
|
||||
|
|
|
@ -95,7 +95,7 @@ func (pm *ProcessMonitor) Start() {
|
|||
|
||||
pm.running = true
|
||||
go pm.monitorLoop()
|
||||
pm.logger.Info().Msg("Process monitor started")
|
||||
pm.logger.Debug().Msg("process monitor started")
|
||||
}
|
||||
|
||||
// Stop stops monitoring processes
|
||||
|
@ -109,7 +109,7 @@ func (pm *ProcessMonitor) Stop() {
|
|||
|
||||
pm.running = false
|
||||
close(pm.stopChan)
|
||||
pm.logger.Info().Msg("Process monitor stopped")
|
||||
pm.logger.Debug().Msg("process monitor stopped")
|
||||
}
|
||||
|
||||
// AddProcess adds a process to monitor
|
||||
|
|
|
@ -0,0 +1,362 @@
|
|||
//go:build cgo
|
||||
// +build cgo
|
||||
|
||||
package audio
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestRegressionScenarios tests critical edge cases and error conditions
|
||||
// that could cause system instability in production
|
||||
func TestRegressionScenarios(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
testFunc func(t *testing.T)
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "IPCConnectionFailure",
|
||||
testFunc: testIPCConnectionFailureRecovery,
|
||||
description: "Test IPC connection failure and recovery scenarios",
|
||||
},
|
||||
{
|
||||
name: "BufferOverflow",
|
||||
testFunc: testBufferOverflowHandling,
|
||||
description: "Test buffer overflow protection and recovery",
|
||||
},
|
||||
{
|
||||
name: "SupervisorRapidRestart",
|
||||
testFunc: testSupervisorRapidRestartScenario,
|
||||
description: "Test supervisor behavior under rapid restart conditions",
|
||||
},
|
||||
{
|
||||
name: "ConcurrentStartStop",
|
||||
testFunc: testConcurrentStartStopOperations,
|
||||
description: "Test concurrent start/stop operations for race conditions",
|
||||
},
|
||||
{
|
||||
name: "MemoryLeakPrevention",
|
||||
testFunc: testMemoryLeakPrevention,
|
||||
description: "Test memory leak prevention in long-running scenarios",
|
||||
},
|
||||
{
|
||||
name: "ConfigValidationEdgeCases",
|
||||
testFunc: testConfigValidationEdgeCases,
|
||||
description: "Test configuration validation with edge case values",
|
||||
},
|
||||
{
|
||||
name: "AtomicOperationConsistency",
|
||||
testFunc: testAtomicOperationConsistency,
|
||||
description: "Test atomic operations consistency under high concurrency",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Logf("Running regression test: %s - %s", tt.name, tt.description)
|
||||
tt.testFunc(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// testIPCConnectionFailureRecovery tests IPC connection failure scenarios
|
||||
func testIPCConnectionFailureRecovery(t *testing.T) {
|
||||
manager := NewAudioInputIPCManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
// Test start with no IPC server available (should handle gracefully)
|
||||
err := manager.Start()
|
||||
// Should not panic or crash, may return error depending on implementation
|
||||
if err != nil {
|
||||
t.Logf("Expected error when no IPC server available: %v", err)
|
||||
}
|
||||
|
||||
// Test that manager can recover after IPC becomes available
|
||||
if manager.IsRunning() {
|
||||
manager.Stop()
|
||||
}
|
||||
|
||||
// Verify clean state after failure
|
||||
assert.False(t, manager.IsRunning())
|
||||
assert.False(t, manager.IsReady())
|
||||
}
|
||||
|
||||
// testBufferOverflowHandling tests buffer overflow protection
|
||||
func testBufferOverflowHandling(t *testing.T) {
|
||||
// Test with extremely large buffer sizes
|
||||
extremelyLargeSize := 1024 * 1024 * 100 // 100MB
|
||||
err := ValidateBufferSize(extremelyLargeSize)
|
||||
assert.Error(t, err, "Should reject extremely large buffer sizes")
|
||||
|
||||
// Test with negative buffer sizes
|
||||
err = ValidateBufferSize(-1)
|
||||
assert.Error(t, err, "Should reject negative buffer sizes")
|
||||
|
||||
// Test with zero buffer size
|
||||
err = ValidateBufferSize(0)
|
||||
assert.Error(t, err, "Should reject zero buffer size")
|
||||
|
||||
// Test with maximum valid buffer size
|
||||
maxValidSize := GetConfig().SocketMaxBuffer
|
||||
err = ValidateBufferSize(int(maxValidSize))
|
||||
assert.NoError(t, err, "Should accept maximum valid buffer size")
|
||||
}
|
||||
|
||||
// testSupervisorRapidRestartScenario tests supervisor under rapid restart conditions
|
||||
func testSupervisorRapidRestartScenario(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping rapid restart test in short mode")
|
||||
}
|
||||
|
||||
supervisor := NewAudioOutputSupervisor()
|
||||
require.NotNil(t, supervisor)
|
||||
|
||||
// Perform rapid start/stop cycles to test for race conditions
|
||||
for i := 0; i < 10; i++ {
|
||||
err := supervisor.Start()
|
||||
if err != nil {
|
||||
t.Logf("Start attempt %d failed (expected in test environment): %v", i, err)
|
||||
}
|
||||
|
||||
// Very short delay to stress test
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
supervisor.Stop()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
|
||||
// Verify supervisor is in clean state after rapid cycling
|
||||
assert.False(t, supervisor.IsRunning())
|
||||
}
|
||||
|
||||
// testConcurrentStartStopOperations tests concurrent operations for race conditions
|
||||
func testConcurrentStartStopOperations(t *testing.T) {
|
||||
manager := NewAudioInputIPCManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
const numGoroutines = 10
|
||||
|
||||
// Launch multiple goroutines trying to start/stop concurrently
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(2)
|
||||
|
||||
// Start goroutine
|
||||
go func(id int) {
|
||||
defer wg.Done()
|
||||
err := manager.Start()
|
||||
if err != nil {
|
||||
t.Logf("Concurrent start %d: %v", id, err)
|
||||
}
|
||||
}(i)
|
||||
|
||||
// Stop goroutine
|
||||
go func(id int) {
|
||||
defer wg.Done()
|
||||
time.Sleep(5 * time.Millisecond) // Small delay
|
||||
manager.Stop()
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// Ensure final state is consistent
|
||||
manager.Stop() // Final cleanup
|
||||
assert.False(t, manager.IsRunning())
|
||||
}
|
||||
|
||||
// testMemoryLeakPrevention tests for memory leaks in long-running scenarios
|
||||
func testMemoryLeakPrevention(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping memory leak test in short mode")
|
||||
}
|
||||
|
||||
manager := NewAudioInputIPCManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
// Simulate long-running operation with periodic restarts
|
||||
for cycle := 0; cycle < 5; cycle++ {
|
||||
err := manager.Start()
|
||||
if err != nil {
|
||||
t.Logf("Start cycle %d failed (expected): %v", cycle, err)
|
||||
}
|
||||
|
||||
// Simulate some activity
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Get metrics to ensure they're not accumulating indefinitely
|
||||
metrics := manager.GetMetrics()
|
||||
assert.NotNil(t, metrics, "Metrics should be available")
|
||||
|
||||
manager.Stop()
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
|
||||
// Final verification
|
||||
assert.False(t, manager.IsRunning())
|
||||
}
|
||||
|
||||
// testConfigValidationEdgeCases tests configuration validation with edge cases
|
||||
func testConfigValidationEdgeCases(t *testing.T) {
|
||||
// Test sample rate edge cases
|
||||
testCases := []struct {
|
||||
sampleRate int
|
||||
channels int
|
||||
frameSize int
|
||||
shouldPass bool
|
||||
description string
|
||||
}{
|
||||
{0, 2, 960, false, "zero sample rate"},
|
||||
{-1, 2, 960, false, "negative sample rate"},
|
||||
{1, 2, 960, false, "extremely low sample rate"},
|
||||
{999999, 2, 960, false, "extremely high sample rate"},
|
||||
{48000, 0, 960, false, "zero channels"},
|
||||
{48000, -1, 960, false, "negative channels"},
|
||||
{48000, 100, 960, false, "too many channels"},
|
||||
{48000, 2, 0, false, "zero frame size"},
|
||||
{48000, 2, -1, false, "negative frame size"},
|
||||
{48000, 2, 999999, true, "extremely large frame size"},
|
||||
{48000, 2, 960, true, "valid configuration"},
|
||||
{44100, 1, 441, true, "valid mono configuration"},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
err := ValidateInputIPCConfig(tc.sampleRate, tc.channels, tc.frameSize)
|
||||
if tc.shouldPass {
|
||||
assert.NoError(t, err, "Should accept valid config: %s", tc.description)
|
||||
} else {
|
||||
assert.Error(t, err, "Should reject invalid config: %s", tc.description)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// testAtomicOperationConsistency tests atomic operations under high concurrency
|
||||
func testAtomicOperationConsistency(t *testing.T) {
|
||||
var counter int64
|
||||
var wg sync.WaitGroup
|
||||
const numGoroutines = 100
|
||||
const incrementsPerGoroutine = 1000
|
||||
|
||||
// Launch multiple goroutines performing atomic operations
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for j := 0; j < incrementsPerGoroutine; j++ {
|
||||
atomic.AddInt64(&counter, 1)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// Verify final count is correct
|
||||
expected := int64(numGoroutines * incrementsPerGoroutine)
|
||||
actual := atomic.LoadInt64(&counter)
|
||||
assert.Equal(t, expected, actual, "Atomic operations should be consistent")
|
||||
}
|
||||
|
||||
// TestErrorRecoveryScenarios tests various error recovery scenarios
|
||||
func TestErrorRecoveryScenarios(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
testFunc func(t *testing.T)
|
||||
}{
|
||||
{"NetworkConnectionLoss", testNetworkConnectionLossRecovery},
|
||||
{"ProcessCrashRecovery", testProcessCrashRecovery},
|
||||
{"ResourceExhaustionRecovery", testResourceExhaustionRecovery},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.testFunc(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// testNetworkConnectionLossRecovery tests recovery from network connection loss
|
||||
func testNetworkConnectionLossRecovery(t *testing.T) {
|
||||
// Create a temporary socket that we can close to simulate connection loss
|
||||
tempDir := t.TempDir()
|
||||
socketPath := fmt.Sprintf("%s/test_recovery.sock", tempDir)
|
||||
|
||||
// Create and immediately close a socket to test connection failure
|
||||
listener, err := net.Listen("unix", socketPath)
|
||||
if err != nil {
|
||||
t.Skipf("Cannot create test socket: %v", err)
|
||||
}
|
||||
listener.Close() // Close immediately to simulate connection loss
|
||||
|
||||
// Remove socket file to ensure connection will fail
|
||||
os.Remove(socketPath)
|
||||
|
||||
// Test that components handle connection loss gracefully
|
||||
manager := NewAudioInputIPCManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
// This should handle the connection failure gracefully
|
||||
err = manager.Start()
|
||||
if err != nil {
|
||||
t.Logf("Expected connection failure handled: %v", err)
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
manager.Stop()
|
||||
}
|
||||
|
||||
// testProcessCrashRecovery tests recovery from process crashes
|
||||
func testProcessCrashRecovery(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping process crash test in short mode")
|
||||
}
|
||||
|
||||
supervisor := NewAudioOutputSupervisor()
|
||||
require.NotNil(t, supervisor)
|
||||
|
||||
// Start supervisor (will likely fail in test environment, but should handle gracefully)
|
||||
err := supervisor.Start()
|
||||
if err != nil {
|
||||
t.Logf("Supervisor start failed as expected in test environment: %v", err)
|
||||
}
|
||||
|
||||
// Verify supervisor can be stopped cleanly even after start failure
|
||||
supervisor.Stop()
|
||||
assert.False(t, supervisor.IsRunning())
|
||||
}
|
||||
|
||||
// testResourceExhaustionRecovery tests recovery from resource exhaustion
|
||||
func testResourceExhaustionRecovery(t *testing.T) {
|
||||
// Test with resource constraints
|
||||
manager := NewAudioInputIPCManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
// Simulate resource exhaustion by rapid start/stop cycles
|
||||
for i := 0; i < 20; i++ {
|
||||
err := manager.Start()
|
||||
if err != nil {
|
||||
t.Logf("Resource exhaustion cycle %d: %v", i, err)
|
||||
}
|
||||
manager.Stop()
|
||||
// No delay to stress test resource management
|
||||
}
|
||||
|
||||
// Verify system can still function after resource stress
|
||||
err := manager.Start()
|
||||
if err != nil {
|
||||
t.Logf("Final start after resource stress: %v", err)
|
||||
}
|
||||
manager.Stop()
|
||||
assert.False(t, manager.IsRunning())
|
||||
}
|
|
@ -140,17 +140,17 @@ func (r *AudioRelay) relayLoop() {
|
|||
for {
|
||||
select {
|
||||
case <-r.ctx.Done():
|
||||
r.logger.Debug().Msg("Audio relay loop stopping")
|
||||
r.logger.Debug().Msg("audio relay loop stopping")
|
||||
return
|
||||
default:
|
||||
frame, err := r.client.ReceiveFrame()
|
||||
if err != nil {
|
||||
consecutiveErrors++
|
||||
r.logger.Error().Err(err).Int("consecutive_errors", consecutiveErrors).Msg("Error reading frame from audio output server")
|
||||
r.logger.Error().Err(err).Int("consecutive_errors", consecutiveErrors).Msg("error reading frame from audio output server")
|
||||
r.incrementDropped()
|
||||
|
||||
if consecutiveErrors >= maxConsecutiveErrors {
|
||||
r.logger.Error().Msgf("Too many consecutive read errors (%d/%d), stopping audio relay", consecutiveErrors, maxConsecutiveErrors)
|
||||
r.logger.Error().Int("consecutive_errors", consecutiveErrors).Int("max_errors", maxConsecutiveErrors).Msg("too many consecutive read errors, stopping audio relay")
|
||||
return
|
||||
}
|
||||
time.Sleep(GetConfig().ShortSleepDuration)
|
||||
|
@ -159,7 +159,7 @@ func (r *AudioRelay) relayLoop() {
|
|||
|
||||
consecutiveErrors = 0
|
||||
if err := r.forwardToWebRTC(frame); err != nil {
|
||||
r.logger.Warn().Err(err).Msg("Failed to forward frame to WebRTC")
|
||||
r.logger.Warn().Err(err).Msg("failed to forward frame to webrtc")
|
||||
r.incrementDropped()
|
||||
} else {
|
||||
r.incrementRelayed()
|
||||
|
@ -170,6 +170,13 @@ func (r *AudioRelay) relayLoop() {
|
|||
|
||||
// forwardToWebRTC forwards a frame to the WebRTC audio track
|
||||
func (r *AudioRelay) forwardToWebRTC(frame []byte) error {
|
||||
// Use ultra-fast validation for critical audio path
|
||||
if err := ValidateAudioFrame(frame); err != nil {
|
||||
r.incrementDropped()
|
||||
r.logger.Debug().Err(err).Msg("invalid frame data in relay")
|
||||
return err
|
||||
}
|
||||
|
||||
r.mutex.RLock()
|
||||
defer r.mutex.RUnlock()
|
||||
|
||||
|
|
|
@ -4,17 +4,12 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/jetkvm/kvm/internal/logging"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// Restart configuration is now retrieved from centralized config
|
||||
|
@ -36,30 +31,17 @@ func getMaxRestartDelay() time.Duration {
|
|||
|
||||
// AudioOutputSupervisor manages the audio output server subprocess lifecycle
|
||||
type AudioOutputSupervisor struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
logger *zerolog.Logger
|
||||
mutex sync.RWMutex
|
||||
running int32
|
||||
|
||||
// Process management
|
||||
cmd *exec.Cmd
|
||||
processPID int
|
||||
*BaseSupervisor
|
||||
|
||||
// Restart management
|
||||
restartAttempts []time.Time
|
||||
lastExitCode int
|
||||
lastExitTime time.Time
|
||||
|
||||
// Channels for coordination
|
||||
processDone chan struct{}
|
||||
// Channel management
|
||||
stopChan chan struct{}
|
||||
processDone chan struct{}
|
||||
stopChanClosed bool // Track if stopChan is closed
|
||||
processDoneClosed bool // Track if processDone is closed
|
||||
|
||||
// Process monitoring
|
||||
processMonitor *ProcessMonitor
|
||||
|
||||
// Callbacks
|
||||
onProcessStart func(pid int)
|
||||
onProcessExit func(pid int, exitCode int, crashed bool)
|
||||
|
@ -68,16 +50,11 @@ type AudioOutputSupervisor struct {
|
|||
|
||||
// NewAudioOutputSupervisor creates a new audio output server supervisor
|
||||
func NewAudioOutputSupervisor() *AudioOutputSupervisor {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
logger := logging.GetDefaultLogger().With().Str("component", AudioOutputSupervisorComponent).Logger()
|
||||
|
||||
return &AudioOutputSupervisor{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
logger: &logger,
|
||||
processDone: make(chan struct{}),
|
||||
stopChan: make(chan struct{}),
|
||||
processMonitor: GetProcessMonitor(),
|
||||
BaseSupervisor: NewBaseSupervisor("audio-output-supervisor"),
|
||||
restartAttempts: make([]time.Time, 0),
|
||||
stopChan: make(chan struct{}),
|
||||
processDone: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -101,7 +78,8 @@ func (s *AudioOutputSupervisor) Start() error {
|
|||
return fmt.Errorf("audio output supervisor is already running")
|
||||
}
|
||||
|
||||
s.logger.Info().Str("component", AudioOutputSupervisorComponent).Msg("starting component")
|
||||
s.logSupervisorStart()
|
||||
s.createContext()
|
||||
|
||||
// Recreate channels in case they were closed by a previous Stop() call
|
||||
s.mutex.Lock()
|
||||
|
@ -109,12 +87,8 @@ func (s *AudioOutputSupervisor) Start() error {
|
|||
s.stopChan = make(chan struct{})
|
||||
s.stopChanClosed = false // Reset channel closed flag
|
||||
s.processDoneClosed = false // Reset channel closed flag
|
||||
// Recreate context as well since it might have been cancelled
|
||||
s.ctx, s.cancel = context.WithCancel(context.Background())
|
||||
// Reset restart tracking on start
|
||||
s.restartAttempts = s.restartAttempts[:0]
|
||||
s.lastExitCode = 0
|
||||
s.lastExitTime = time.Time{}
|
||||
s.mutex.Unlock()
|
||||
|
||||
// Start the supervision loop
|
||||
|
@ -130,7 +104,7 @@ func (s *AudioOutputSupervisor) Stop() {
|
|||
return // Already stopped
|
||||
}
|
||||
|
||||
s.logger.Info().Str("component", AudioOutputSupervisorComponent).Msg("stopping component")
|
||||
s.logSupervisorStop()
|
||||
|
||||
// Signal stop and wait for cleanup
|
||||
s.mutex.Lock()
|
||||
|
@ -139,7 +113,7 @@ func (s *AudioOutputSupervisor) Stop() {
|
|||
s.stopChanClosed = true
|
||||
}
|
||||
s.mutex.Unlock()
|
||||
s.cancel()
|
||||
s.cancelContext()
|
||||
|
||||
// Wait for process to exit
|
||||
select {
|
||||
|
@ -153,61 +127,11 @@ func (s *AudioOutputSupervisor) Stop() {
|
|||
s.logger.Info().Str("component", AudioOutputSupervisorComponent).Msg("component stopped")
|
||||
}
|
||||
|
||||
// IsRunning returns true if the supervisor is running
|
||||
func (s *AudioOutputSupervisor) IsRunning() bool {
|
||||
return atomic.LoadInt32(&s.running) == 1
|
||||
}
|
||||
|
||||
// GetProcessPID returns the current process PID (0 if not running)
|
||||
func (s *AudioOutputSupervisor) GetProcessPID() int {
|
||||
s.mutex.RLock()
|
||||
defer s.mutex.RUnlock()
|
||||
return s.processPID
|
||||
}
|
||||
|
||||
// GetLastExitInfo returns information about the last process exit
|
||||
func (s *AudioOutputSupervisor) GetLastExitInfo() (exitCode int, exitTime time.Time) {
|
||||
s.mutex.RLock()
|
||||
defer s.mutex.RUnlock()
|
||||
return s.lastExitCode, s.lastExitTime
|
||||
}
|
||||
|
||||
// GetProcessMetrics returns current process metrics if the process is running
|
||||
// GetProcessMetrics returns current process metrics with audio-output-server name
|
||||
func (s *AudioOutputSupervisor) GetProcessMetrics() *ProcessMetrics {
|
||||
s.mutex.RLock()
|
||||
pid := s.processPID
|
||||
s.mutex.RUnlock()
|
||||
|
||||
if pid == 0 {
|
||||
// Return default metrics when no process is running
|
||||
return &ProcessMetrics{
|
||||
PID: 0,
|
||||
CPUPercent: 0.0,
|
||||
MemoryRSS: 0,
|
||||
MemoryVMS: 0,
|
||||
MemoryPercent: 0.0,
|
||||
Timestamp: time.Now(),
|
||||
ProcessName: "audio-output-server",
|
||||
}
|
||||
}
|
||||
|
||||
metrics := s.processMonitor.GetCurrentMetrics()
|
||||
for _, metric := range metrics {
|
||||
if metric.PID == pid {
|
||||
return &metric
|
||||
}
|
||||
}
|
||||
|
||||
// Return default metrics if process not found in monitor
|
||||
return &ProcessMetrics{
|
||||
PID: pid,
|
||||
CPUPercent: 0.0,
|
||||
MemoryRSS: 0,
|
||||
MemoryVMS: 0,
|
||||
MemoryPercent: 0.0,
|
||||
Timestamp: time.Now(),
|
||||
ProcessName: "audio-output-server",
|
||||
}
|
||||
metrics := s.BaseSupervisor.GetProcessMetrics()
|
||||
metrics.ProcessName = "audio-output-server"
|
||||
return metrics
|
||||
}
|
||||
|
||||
// supervisionLoop is the main supervision loop
|
||||
|
@ -364,10 +288,10 @@ func (s *AudioOutputSupervisor) waitForProcessExit() {
|
|||
s.processMonitor.RemoveProcess(pid)
|
||||
|
||||
if crashed {
|
||||
s.logger.Error().Int("pid", pid).Int("exit_code", exitCode).Msg("audio server process crashed")
|
||||
s.logger.Error().Int("pid", pid).Int("exit_code", exitCode).Msg("audio output server process crashed")
|
||||
s.recordRestartAttempt()
|
||||
} else {
|
||||
s.logger.Info().Int("pid", pid).Msg("audio server process exited gracefully")
|
||||
s.logger.Info().Int("pid", pid).Msg("audio output server process exited gracefully")
|
||||
}
|
||||
|
||||
if s.onProcessExit != nil {
|
||||
|
@ -386,11 +310,11 @@ func (s *AudioOutputSupervisor) terminateProcess() {
|
|||
return
|
||||
}
|
||||
|
||||
s.logger.Info().Int("pid", pid).Msg("terminating audio server process")
|
||||
s.logger.Info().Int("pid", pid).Msg("terminating audio output server process")
|
||||
|
||||
// Send SIGTERM first
|
||||
if err := cmd.Process.Signal(syscall.SIGTERM); err != nil {
|
||||
s.logger.Warn().Err(err).Int("pid", pid).Msg("failed to send SIGTERM")
|
||||
s.logger.Warn().Err(err).Int("pid", pid).Msg("failed to send SIGTERM to audio output server process")
|
||||
}
|
||||
|
||||
// Wait for graceful shutdown
|
||||
|
|
|
@ -1,7 +1,11 @@
|
|||
//go:build cgo || arm
|
||||
// +build cgo arm
|
||||
|
||||
package audio
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
@ -10,6 +14,8 @@ var (
|
|||
ErrInvalidAudioQuality = errors.New("invalid audio quality level")
|
||||
ErrInvalidFrameSize = errors.New("invalid frame size")
|
||||
ErrInvalidFrameData = errors.New("invalid frame data")
|
||||
ErrFrameDataEmpty = errors.New("invalid frame data: frame data is empty")
|
||||
ErrFrameDataTooLarge = errors.New("invalid frame data: exceeds maximum")
|
||||
ErrInvalidBufferSize = errors.New("invalid buffer size")
|
||||
ErrInvalidPriority = errors.New("invalid priority value")
|
||||
ErrInvalidLatency = errors.New("invalid latency value")
|
||||
|
@ -18,30 +24,18 @@ var (
|
|||
ErrInvalidMetricsInterval = errors.New("invalid metrics interval")
|
||||
ErrInvalidSampleRate = errors.New("invalid sample rate")
|
||||
ErrInvalidChannels = errors.New("invalid channels")
|
||||
ErrInvalidBitrate = errors.New("invalid bitrate")
|
||||
ErrInvalidFrameDuration = errors.New("invalid frame duration")
|
||||
ErrInvalidOffset = errors.New("invalid offset")
|
||||
ErrInvalidLength = errors.New("invalid length")
|
||||
)
|
||||
|
||||
// ValidateAudioQuality validates audio quality enum values
|
||||
// ValidateAudioQuality validates audio quality enum values with enhanced checks
|
||||
func ValidateAudioQuality(quality AudioQuality) error {
|
||||
switch quality {
|
||||
case AudioQualityLow, AudioQualityMedium, AudioQualityHigh, AudioQualityUltra:
|
||||
return nil
|
||||
default:
|
||||
return ErrInvalidAudioQuality
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateFrameData validates audio frame data
|
||||
func ValidateFrameData(data []byte) error {
|
||||
if len(data) == 0 {
|
||||
return ErrInvalidFrameData
|
||||
}
|
||||
// Use a reasonable default if config is not available
|
||||
maxFrameSize := 4096
|
||||
if config := GetConfig(); config != nil {
|
||||
maxFrameSize = config.MaxAudioFrameSize
|
||||
}
|
||||
if len(data) > maxFrameSize {
|
||||
return ErrInvalidFrameSize
|
||||
// Validate enum range
|
||||
if quality < AudioQualityLow || quality > AudioQualityUltra {
|
||||
return fmt.Errorf("%w: quality value %d outside valid range [%d, %d]",
|
||||
ErrInvalidAudioQuality, int(quality), int(AudioQualityLow), int(AudioQualityUltra))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -55,73 +49,63 @@ func ValidateZeroCopyFrame(frame *ZeroCopyAudioFrame) error {
|
|||
if len(data) == 0 {
|
||||
return ErrInvalidFrameData
|
||||
}
|
||||
// Use a reasonable default if config is not available
|
||||
maxFrameSize := 4096
|
||||
if config := GetConfig(); config != nil {
|
||||
maxFrameSize = config.MaxAudioFrameSize
|
||||
}
|
||||
// Use config value
|
||||
maxFrameSize := GetConfig().MaxAudioFrameSize
|
||||
if len(data) > maxFrameSize {
|
||||
return ErrInvalidFrameSize
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateBufferSize validates buffer size parameters
|
||||
// ValidateBufferSize validates buffer size parameters with enhanced boundary checks
|
||||
func ValidateBufferSize(size int) error {
|
||||
if size <= 0 {
|
||||
return ErrInvalidBufferSize
|
||||
return fmt.Errorf("%w: buffer size %d must be positive", ErrInvalidBufferSize, size)
|
||||
}
|
||||
// Use a reasonable default if config is not available
|
||||
maxBuffer := 262144 // 256KB default
|
||||
if config := GetConfig(); config != nil {
|
||||
maxBuffer = config.SocketMaxBuffer
|
||||
}
|
||||
if size > maxBuffer {
|
||||
return ErrInvalidBufferSize
|
||||
config := GetConfig()
|
||||
// Use SocketMaxBuffer as the upper limit for general buffer validation
|
||||
// This allows for socket buffers while still preventing extremely large allocations
|
||||
if size > config.SocketMaxBuffer {
|
||||
return fmt.Errorf("%w: buffer size %d exceeds maximum %d",
|
||||
ErrInvalidBufferSize, size, config.SocketMaxBuffer)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateThreadPriority validates thread priority values
|
||||
// ValidateThreadPriority validates thread priority values with system limits
|
||||
func ValidateThreadPriority(priority int) error {
|
||||
// Use reasonable defaults if config is not available
|
||||
minPriority := -20
|
||||
maxPriority := 99
|
||||
if config := GetConfig(); config != nil {
|
||||
minPriority = config.MinNiceValue
|
||||
maxPriority = config.RTAudioHighPriority
|
||||
}
|
||||
const minPriority, maxPriority = -20, 19
|
||||
if priority < minPriority || priority > maxPriority {
|
||||
return ErrInvalidPriority
|
||||
return fmt.Errorf("%w: priority %d outside valid range [%d, %d]",
|
||||
ErrInvalidPriority, priority, minPriority, maxPriority)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateLatency validates latency values
|
||||
// ValidateLatency validates latency duration values with reasonable bounds
|
||||
func ValidateLatency(latency time.Duration) error {
|
||||
if latency < 0 {
|
||||
return ErrInvalidLatency
|
||||
return fmt.Errorf("%w: latency %v cannot be negative", ErrInvalidLatency, latency)
|
||||
}
|
||||
// Use a reasonable default if config is not available
|
||||
maxLatency := 500 * time.Millisecond
|
||||
if config := GetConfig(); config != nil {
|
||||
maxLatency = config.MaxLatency
|
||||
config := GetConfig()
|
||||
minLatency := time.Millisecond // Minimum reasonable latency
|
||||
if latency > 0 && latency < minLatency {
|
||||
return fmt.Errorf("%w: latency %v below minimum %v",
|
||||
ErrInvalidLatency, latency, minLatency)
|
||||
}
|
||||
if latency > maxLatency {
|
||||
return ErrInvalidLatency
|
||||
if latency > config.MaxLatency {
|
||||
return fmt.Errorf("%w: latency %v exceeds maximum %v",
|
||||
ErrInvalidLatency, latency, config.MaxLatency)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateMetricsInterval validates metrics update interval
|
||||
func ValidateMetricsInterval(interval time.Duration) error {
|
||||
// Use reasonable defaults if config is not available
|
||||
minInterval := 100 * time.Millisecond
|
||||
maxInterval := 10 * time.Second
|
||||
if config := GetConfig(); config != nil {
|
||||
minInterval = config.MinMetricsUpdateInterval
|
||||
maxInterval = config.MaxMetricsUpdateInterval
|
||||
}
|
||||
// Use config values
|
||||
config := GetConfig()
|
||||
minInterval := config.MinMetricsUpdateInterval
|
||||
maxInterval := config.MaxMetricsUpdateInterval
|
||||
if interval < minInterval {
|
||||
return ErrInvalidMetricsInterval
|
||||
}
|
||||
|
@ -143,10 +127,7 @@ func ValidateAdaptiveBufferConfig(minSize, maxSize, defaultSize int) error {
|
|||
return ErrInvalidBufferSize
|
||||
}
|
||||
// Validate against global limits
|
||||
maxBuffer := 262144 // 256KB default
|
||||
if config := GetConfig(); config != nil {
|
||||
maxBuffer = config.SocketMaxBuffer
|
||||
}
|
||||
maxBuffer := GetConfig().SocketMaxBuffer
|
||||
if maxSize > maxBuffer {
|
||||
return ErrInvalidBufferSize
|
||||
}
|
||||
|
@ -155,15 +136,11 @@ func ValidateAdaptiveBufferConfig(minSize, maxSize, defaultSize int) error {
|
|||
|
||||
// ValidateInputIPCConfig validates input IPC configuration
|
||||
func ValidateInputIPCConfig(sampleRate, channels, frameSize int) error {
|
||||
// Use reasonable defaults if config is not available
|
||||
minSampleRate := 8000
|
||||
maxSampleRate := 48000
|
||||
maxChannels := 8
|
||||
if config := GetConfig(); config != nil {
|
||||
minSampleRate = config.MinSampleRate
|
||||
maxSampleRate = config.MaxSampleRate
|
||||
maxChannels = config.MaxChannels
|
||||
}
|
||||
// Use config values
|
||||
config := GetConfig()
|
||||
minSampleRate := config.MinSampleRate
|
||||
maxSampleRate := config.MaxSampleRate
|
||||
maxChannels := config.MaxChannels
|
||||
if sampleRate < minSampleRate || sampleRate > maxSampleRate {
|
||||
return ErrInvalidSampleRate
|
||||
}
|
||||
|
@ -175,3 +152,196 @@ func ValidateInputIPCConfig(sampleRate, channels, frameSize int) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateOutputIPCConfig validates output IPC configuration
|
||||
func ValidateOutputIPCConfig(sampleRate, channels, frameSize int) error {
|
||||
// Use config values
|
||||
config := GetConfig()
|
||||
minSampleRate := config.MinSampleRate
|
||||
maxSampleRate := config.MaxSampleRate
|
||||
maxChannels := config.MaxChannels
|
||||
if sampleRate < minSampleRate || sampleRate > maxSampleRate {
|
||||
return ErrInvalidSampleRate
|
||||
}
|
||||
if channels < 1 || channels > maxChannels {
|
||||
return ErrInvalidChannels
|
||||
}
|
||||
if frameSize <= 0 {
|
||||
return ErrInvalidFrameSize
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateLatencyConfig validates latency monitor configuration
|
||||
func ValidateLatencyConfig(config LatencyConfig) error {
|
||||
if err := ValidateLatency(config.TargetLatency); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ValidateLatency(config.MaxLatency); err != nil {
|
||||
return err
|
||||
}
|
||||
if config.TargetLatency >= config.MaxLatency {
|
||||
return ErrInvalidLatency
|
||||
}
|
||||
if err := ValidateMetricsInterval(config.OptimizationInterval); err != nil {
|
||||
return err
|
||||
}
|
||||
if config.HistorySize <= 0 {
|
||||
return ErrInvalidBufferSize
|
||||
}
|
||||
if config.JitterThreshold < 0 {
|
||||
return ErrInvalidLatency
|
||||
}
|
||||
if config.AdaptiveThreshold < 0 || config.AdaptiveThreshold > 1.0 {
|
||||
return ErrInvalidConfiguration
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateSampleRate validates audio sample rate values
|
||||
func ValidateSampleRate(sampleRate int) error {
|
||||
if sampleRate <= 0 {
|
||||
return fmt.Errorf("%w: sample rate %d must be positive", ErrInvalidSampleRate, sampleRate)
|
||||
}
|
||||
config := GetConfig()
|
||||
validRates := config.ValidSampleRates
|
||||
for _, rate := range validRates {
|
||||
if sampleRate == rate {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("%w: sample rate %d not in supported rates %v",
|
||||
ErrInvalidSampleRate, sampleRate, validRates)
|
||||
}
|
||||
|
||||
// ValidateChannelCount validates audio channel count
|
||||
func ValidateChannelCount(channels int) error {
|
||||
if channels <= 0 {
|
||||
return fmt.Errorf("%w: channel count %d must be positive", ErrInvalidChannels, channels)
|
||||
}
|
||||
config := GetConfig()
|
||||
if channels > config.MaxChannels {
|
||||
return fmt.Errorf("%w: channel count %d exceeds maximum %d",
|
||||
ErrInvalidChannels, channels, config.MaxChannels)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateBitrate validates audio bitrate values (expects kbps)
|
||||
func ValidateBitrate(bitrate int) error {
|
||||
if bitrate <= 0 {
|
||||
return fmt.Errorf("%w: bitrate %d must be positive", ErrInvalidBitrate, bitrate)
|
||||
}
|
||||
config := GetConfig()
|
||||
// Convert kbps to bps for comparison with config limits
|
||||
bitrateInBps := bitrate * 1000
|
||||
if bitrateInBps < config.MinOpusBitrate {
|
||||
return fmt.Errorf("%w: bitrate %d kbps (%d bps) below minimum %d bps",
|
||||
ErrInvalidBitrate, bitrate, bitrateInBps, config.MinOpusBitrate)
|
||||
}
|
||||
if bitrateInBps > config.MaxOpusBitrate {
|
||||
return fmt.Errorf("%w: bitrate %d kbps (%d bps) exceeds maximum %d bps",
|
||||
ErrInvalidBitrate, bitrate, bitrateInBps, config.MaxOpusBitrate)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateFrameDuration validates frame duration values
|
||||
func ValidateFrameDuration(duration time.Duration) error {
|
||||
if duration <= 0 {
|
||||
return fmt.Errorf("%w: frame duration %v must be positive", ErrInvalidFrameDuration, duration)
|
||||
}
|
||||
config := GetConfig()
|
||||
if duration < config.MinFrameDuration {
|
||||
return fmt.Errorf("%w: frame duration %v below minimum %v",
|
||||
ErrInvalidFrameDuration, duration, config.MinFrameDuration)
|
||||
}
|
||||
if duration > config.MaxFrameDuration {
|
||||
return fmt.Errorf("%w: frame duration %v exceeds maximum %v",
|
||||
ErrInvalidFrameDuration, duration, config.MaxFrameDuration)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateAudioConfigComplete performs comprehensive audio configuration validation
|
||||
func ValidateAudioConfigComplete(config AudioConfig) error {
|
||||
if err := ValidateAudioQuality(config.Quality); err != nil {
|
||||
return fmt.Errorf("quality validation failed: %w", err)
|
||||
}
|
||||
if err := ValidateBitrate(config.Bitrate); err != nil {
|
||||
return fmt.Errorf("bitrate validation failed: %w", err)
|
||||
}
|
||||
if err := ValidateSampleRate(config.SampleRate); err != nil {
|
||||
return fmt.Errorf("sample rate validation failed: %w", err)
|
||||
}
|
||||
if err := ValidateChannelCount(config.Channels); err != nil {
|
||||
return fmt.Errorf("channel count validation failed: %w", err)
|
||||
}
|
||||
if err := ValidateFrameDuration(config.FrameSize); err != nil {
|
||||
return fmt.Errorf("frame duration validation failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateAudioConfigConstants validates audio configuration constants
|
||||
func ValidateAudioConfigConstants(config *AudioConfigConstants) error {
|
||||
// Validate that audio quality constants are within valid ranges
|
||||
for _, quality := range []AudioQuality{AudioQualityLow, AudioQualityMedium, AudioQualityHigh, AudioQualityUltra} {
|
||||
if err := ValidateAudioQuality(quality); err != nil {
|
||||
return fmt.Errorf("invalid audio quality constant %v: %w", quality, err)
|
||||
}
|
||||
}
|
||||
// Validate configuration values if config is provided
|
||||
if config != nil {
|
||||
if config.MaxFrameSize <= 0 {
|
||||
return fmt.Errorf("invalid MaxFrameSize: %d", config.MaxFrameSize)
|
||||
}
|
||||
if config.SampleRate <= 0 {
|
||||
return fmt.Errorf("invalid SampleRate: %d", config.SampleRate)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Cached max frame size to avoid function call overhead in hot paths
|
||||
var cachedMaxFrameSize int
|
||||
|
||||
// Note: Validation cache is initialized on first use to avoid init function
|
||||
|
||||
// InitValidationCache initializes cached validation values with actual config
|
||||
func InitValidationCache() {
|
||||
cachedMaxFrameSize = GetConfig().MaxAudioFrameSize
|
||||
}
|
||||
|
||||
// ValidateAudioFrame provides optimized validation for audio frame data
|
||||
// This is the primary validation function used in all audio processing paths
|
||||
//
|
||||
// Performance optimizations:
|
||||
// - Uses cached config value to eliminate function call overhead
|
||||
// - Single branch condition for optimal CPU pipeline efficiency
|
||||
// - Inlined length checks for minimal overhead
|
||||
//
|
||||
//go:inline
|
||||
func ValidateAudioFrame(data []byte) error {
|
||||
// Initialize cache on first use if not already done
|
||||
if cachedMaxFrameSize == 0 {
|
||||
InitValidationCache()
|
||||
}
|
||||
// Optimized validation with pre-allocated error messages for minimal overhead
|
||||
dataLen := len(data)
|
||||
if dataLen == 0 {
|
||||
return ErrFrameDataEmpty
|
||||
}
|
||||
if dataLen > cachedMaxFrameSize {
|
||||
return ErrFrameDataTooLarge
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WrapWithMetadata wraps error with metadata for enhanced validation context
|
||||
func WrapWithMetadata(err error, component, operation string, metadata map[string]interface{}) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("%s.%s: %w (metadata: %+v)", component, operation, err, metadata)
|
||||
}
|
||||
|
|
|
@ -1,290 +0,0 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// Enhanced validation errors with more specific context
|
||||
var (
|
||||
ErrInvalidFrameLength = errors.New("invalid frame length")
|
||||
ErrFrameDataCorrupted = errors.New("frame data appears corrupted")
|
||||
ErrBufferAlignment = errors.New("buffer alignment invalid")
|
||||
ErrInvalidSampleFormat = errors.New("invalid sample format")
|
||||
ErrInvalidTimestamp = errors.New("invalid timestamp")
|
||||
ErrConfigurationMismatch = errors.New("configuration mismatch")
|
||||
ErrResourceExhaustion = errors.New("resource exhaustion detected")
|
||||
ErrInvalidPointer = errors.New("invalid pointer")
|
||||
ErrBufferOverflow = errors.New("buffer overflow detected")
|
||||
ErrInvalidState = errors.New("invalid state")
|
||||
)
|
||||
|
||||
// ValidationLevel defines the level of validation to perform
|
||||
type ValidationLevel int
|
||||
|
||||
const (
|
||||
ValidationMinimal ValidationLevel = iota // Only critical safety checks
|
||||
ValidationStandard // Standard validation for production
|
||||
ValidationStrict // Comprehensive validation for debugging
|
||||
)
|
||||
|
||||
// ValidationConfig controls validation behavior
|
||||
type ValidationConfig struct {
|
||||
Level ValidationLevel
|
||||
EnableRangeChecks bool
|
||||
EnableAlignmentCheck bool
|
||||
EnableDataIntegrity bool
|
||||
MaxValidationTime time.Duration
|
||||
}
|
||||
|
||||
// GetValidationConfig returns the current validation configuration
|
||||
func GetValidationConfig() ValidationConfig {
|
||||
return ValidationConfig{
|
||||
Level: ValidationStandard,
|
||||
EnableRangeChecks: true,
|
||||
EnableAlignmentCheck: true,
|
||||
EnableDataIntegrity: false, // Disabled by default for performance
|
||||
MaxValidationTime: 5 * time.Second, // Default validation timeout
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateAudioFrameFast performs minimal validation for performance-critical paths
|
||||
func ValidateAudioFrameFast(data []byte) error {
|
||||
if len(data) == 0 {
|
||||
return ErrInvalidFrameData
|
||||
}
|
||||
|
||||
// Quick bounds check using config constants
|
||||
maxSize := GetConfig().MaxAudioFrameSize
|
||||
if len(data) > maxSize {
|
||||
return fmt.Errorf("%w: frame size %d exceeds maximum %d", ErrInvalidFrameSize, len(data), maxSize)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateAudioFrameComprehensive performs thorough validation
|
||||
func ValidateAudioFrameComprehensive(data []byte, expectedSampleRate int, expectedChannels int) error {
|
||||
validationConfig := GetValidationConfig()
|
||||
start := time.Now()
|
||||
|
||||
// Timeout protection for validation
|
||||
defer func() {
|
||||
if time.Since(start) > validationConfig.MaxValidationTime {
|
||||
// Log validation timeout but don't fail
|
||||
getValidationLogger().Warn().Dur("duration", time.Since(start)).Msg("validation timeout exceeded")
|
||||
}
|
||||
}()
|
||||
|
||||
// Basic validation first
|
||||
if err := ValidateAudioFrameFast(data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Range validation
|
||||
if validationConfig.EnableRangeChecks {
|
||||
config := GetConfig()
|
||||
minFrameSize := 64 // Minimum reasonable frame size
|
||||
if len(data) < minFrameSize {
|
||||
return fmt.Errorf("%w: frame size %d below minimum %d", ErrInvalidFrameSize, len(data), minFrameSize)
|
||||
}
|
||||
|
||||
// Validate frame length matches expected sample format
|
||||
expectedFrameSize := (expectedSampleRate * expectedChannels * 2) / 1000 * int(config.AudioQualityMediumFrameSize/time.Millisecond)
|
||||
tolerance := 512 // Frame size tolerance in bytes
|
||||
if abs(len(data)-expectedFrameSize) > tolerance {
|
||||
return fmt.Errorf("%w: frame size %d doesn't match expected %d (±%d)", ErrInvalidFrameLength, len(data), expectedFrameSize, tolerance)
|
||||
}
|
||||
}
|
||||
|
||||
// Alignment validation for ARM32 compatibility
|
||||
if validationConfig.EnableAlignmentCheck {
|
||||
if uintptr(unsafe.Pointer(&data[0]))%4 != 0 {
|
||||
return fmt.Errorf("%w: buffer not 4-byte aligned for ARM32", ErrBufferAlignment)
|
||||
}
|
||||
}
|
||||
|
||||
// Data integrity checks (expensive, only for debugging)
|
||||
if validationConfig.EnableDataIntegrity && validationConfig.Level == ValidationStrict {
|
||||
if err := validateAudioDataIntegrity(data, expectedChannels); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateZeroCopyFrameEnhanced performs enhanced zero-copy frame validation
|
||||
func ValidateZeroCopyFrameEnhanced(frame *ZeroCopyAudioFrame) error {
|
||||
if frame == nil {
|
||||
return fmt.Errorf("%w: frame is nil", ErrInvalidPointer)
|
||||
}
|
||||
|
||||
// Check reference count validity
|
||||
frame.mutex.RLock()
|
||||
refCount := frame.refCount
|
||||
length := frame.length
|
||||
capacity := frame.capacity
|
||||
frame.mutex.RUnlock()
|
||||
|
||||
if refCount <= 0 {
|
||||
return fmt.Errorf("%w: invalid reference count %d", ErrInvalidState, refCount)
|
||||
}
|
||||
|
||||
if length < 0 || capacity < 0 {
|
||||
return fmt.Errorf("%w: negative length (%d) or capacity (%d)", ErrInvalidState, length, capacity)
|
||||
}
|
||||
|
||||
if length > capacity {
|
||||
return fmt.Errorf("%w: length %d exceeds capacity %d", ErrBufferOverflow, length, capacity)
|
||||
}
|
||||
|
||||
// Validate the underlying data
|
||||
data := frame.Data()
|
||||
return ValidateAudioFrameFast(data)
|
||||
}
|
||||
|
||||
// ValidateBufferBounds performs bounds checking with overflow protection
|
||||
func ValidateBufferBounds(buffer []byte, offset, length int) error {
|
||||
if buffer == nil {
|
||||
return fmt.Errorf("%w: buffer is nil", ErrInvalidPointer)
|
||||
}
|
||||
|
||||
if offset < 0 {
|
||||
return fmt.Errorf("%w: negative offset %d", ErrInvalidState, offset)
|
||||
}
|
||||
|
||||
if length < 0 {
|
||||
return fmt.Errorf("%w: negative length %d", ErrInvalidState, length)
|
||||
}
|
||||
|
||||
// Check for integer overflow
|
||||
if offset > len(buffer) {
|
||||
return fmt.Errorf("%w: offset %d exceeds buffer length %d", ErrBufferOverflow, offset, len(buffer))
|
||||
}
|
||||
|
||||
// Safe addition check for overflow
|
||||
if offset+length < offset || offset+length > len(buffer) {
|
||||
return fmt.Errorf("%w: range [%d:%d] exceeds buffer length %d", ErrBufferOverflow, offset, offset+length, len(buffer))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateAudioConfiguration performs comprehensive configuration validation
|
||||
func ValidateAudioConfiguration(config AudioConfig) error {
|
||||
if err := ValidateAudioQuality(config.Quality); err != nil {
|
||||
return fmt.Errorf("quality validation failed: %w", err)
|
||||
}
|
||||
|
||||
configConstants := GetConfig()
|
||||
|
||||
// Validate bitrate ranges
|
||||
minBitrate := 6000 // Minimum Opus bitrate
|
||||
maxBitrate := 510000 // Maximum Opus bitrate
|
||||
if config.Bitrate < minBitrate || config.Bitrate > maxBitrate {
|
||||
return fmt.Errorf("%w: bitrate %d outside valid range [%d, %d]", ErrInvalidConfiguration, config.Bitrate, minBitrate, maxBitrate)
|
||||
}
|
||||
|
||||
// Validate sample rate
|
||||
validSampleRates := []int{8000, 12000, 16000, 24000, 48000}
|
||||
validSampleRate := false
|
||||
for _, rate := range validSampleRates {
|
||||
if config.SampleRate == rate {
|
||||
validSampleRate = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !validSampleRate {
|
||||
return fmt.Errorf("%w: sample rate %d not in supported rates %v", ErrInvalidSampleRate, config.SampleRate, validSampleRates)
|
||||
}
|
||||
|
||||
// Validate channels
|
||||
if config.Channels < 1 || config.Channels > configConstants.MaxChannels {
|
||||
return fmt.Errorf("%w: channels %d outside valid range [1, %d]", ErrInvalidChannels, config.Channels, configConstants.MaxChannels)
|
||||
}
|
||||
|
||||
// Validate frame size
|
||||
minFrameSize := 10 * time.Millisecond // Minimum frame duration
|
||||
maxFrameSize := 100 * time.Millisecond // Maximum frame duration
|
||||
if config.FrameSize < minFrameSize || config.FrameSize > maxFrameSize {
|
||||
return fmt.Errorf("%w: frame size %v outside valid range [%v, %v]", ErrInvalidConfiguration, config.FrameSize, minFrameSize, maxFrameSize)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateResourceLimits checks if system resources are within acceptable limits
|
||||
func ValidateResourceLimits() error {
|
||||
config := GetConfig()
|
||||
|
||||
// Check buffer pool sizes
|
||||
framePoolStats := GetAudioBufferPoolStats()
|
||||
if framePoolStats.FramePoolSize > int64(config.MaxPoolSize*2) {
|
||||
return fmt.Errorf("%w: frame pool size %d exceeds safe limit %d", ErrResourceExhaustion, framePoolStats.FramePoolSize, config.MaxPoolSize*2)
|
||||
}
|
||||
|
||||
// Check zero-copy pool allocation count
|
||||
zeroCopyStats := GetGlobalZeroCopyPoolStats()
|
||||
if zeroCopyStats.AllocationCount > int64(config.MaxPoolSize*3) {
|
||||
return fmt.Errorf("%w: zero-copy allocations %d exceed safe limit %d", ErrResourceExhaustion, zeroCopyStats.AllocationCount, config.MaxPoolSize*3)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateAudioDataIntegrity performs expensive data integrity checks
|
||||
func validateAudioDataIntegrity(data []byte, channels int) error {
|
||||
if len(data)%2 != 0 {
|
||||
return fmt.Errorf("%w: odd number of bytes for 16-bit samples", ErrInvalidSampleFormat)
|
||||
}
|
||||
|
||||
if len(data)%(channels*2) != 0 {
|
||||
return fmt.Errorf("%w: data length %d not aligned to channel count %d", ErrInvalidSampleFormat, len(data), channels)
|
||||
}
|
||||
|
||||
// Check for obvious corruption patterns (all zeros, all max values)
|
||||
sampleCount := len(data) / 2
|
||||
zeroCount := 0
|
||||
maxCount := 0
|
||||
|
||||
for i := 0; i < len(data); i += 2 {
|
||||
sample := int16(data[i]) | int16(data[i+1])<<8
|
||||
switch sample {
|
||||
case 0:
|
||||
zeroCount++
|
||||
case 32767, -32768:
|
||||
maxCount++
|
||||
}
|
||||
}
|
||||
|
||||
// Flag suspicious patterns
|
||||
if zeroCount > sampleCount*9/10 {
|
||||
return fmt.Errorf("%w: %d%% zero samples suggests silence or corruption", ErrFrameDataCorrupted, (zeroCount*100)/sampleCount)
|
||||
}
|
||||
|
||||
if maxCount > sampleCount/10 {
|
||||
return fmt.Errorf("%w: %d%% max-value samples suggests clipping or corruption", ErrFrameDataCorrupted, (maxCount*100)/sampleCount)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Helper function for absolute value
|
||||
func abs(x int) int {
|
||||
if x < 0 {
|
||||
return -x
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// getValidationLogger returns a logger for validation operations
|
||||
func getValidationLogger() *zerolog.Logger {
|
||||
// Return a basic logger for validation
|
||||
logger := zerolog.New(nil).With().Timestamp().Logger()
|
||||
return &logger
|
||||
}
|
|
@ -0,0 +1,541 @@
|
|||
//go:build cgo
|
||||
// +build cgo
|
||||
|
||||
package audio
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestValidationFunctions provides comprehensive testing of all validation functions
|
||||
// to ensure they catch breaking changes and regressions effectively
|
||||
func TestValidationFunctions(t *testing.T) {
|
||||
// Initialize validation cache for testing
|
||||
InitValidationCache()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
testFunc func(t *testing.T)
|
||||
}{
|
||||
{"AudioQualityValidation", testAudioQualityValidation},
|
||||
{"FrameDataValidation", testFrameDataValidation},
|
||||
{"BufferSizeValidation", testBufferSizeValidation},
|
||||
{"ThreadPriorityValidation", testThreadPriorityValidation},
|
||||
{"LatencyValidation", testLatencyValidation},
|
||||
{"MetricsIntervalValidation", testMetricsIntervalValidation},
|
||||
{"SampleRateValidation", testSampleRateValidation},
|
||||
{"ChannelCountValidation", testChannelCountValidation},
|
||||
{"BitrateValidation", testBitrateValidation},
|
||||
{"FrameDurationValidation", testFrameDurationValidation},
|
||||
{"IPCConfigValidation", testIPCConfigValidation},
|
||||
{"AdaptiveBufferConfigValidation", testAdaptiveBufferConfigValidation},
|
||||
{"AudioConfigCompleteValidation", testAudioConfigCompleteValidation},
|
||||
{"ZeroCopyFrameValidation", testZeroCopyFrameValidation},
|
||||
{"AudioFrameFastValidation", testAudioFrameFastValidation},
|
||||
{"ErrorWrappingValidation", testErrorWrappingValidation},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.testFunc(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// testAudioQualityValidation tests audio quality validation with boundary conditions
|
||||
func testAudioQualityValidation(t *testing.T) {
|
||||
// Test valid quality levels
|
||||
validQualities := []AudioQuality{AudioQualityLow, AudioQualityMedium, AudioQualityHigh, AudioQualityUltra}
|
||||
for _, quality := range validQualities {
|
||||
err := ValidateAudioQuality(quality)
|
||||
assert.NoError(t, err, "Valid quality %d should pass validation", quality)
|
||||
}
|
||||
|
||||
// Test invalid quality levels
|
||||
invalidQualities := []AudioQuality{-1, 4, 100, -100}
|
||||
for _, quality := range invalidQualities {
|
||||
err := ValidateAudioQuality(quality)
|
||||
assert.Error(t, err, "Invalid quality %d should fail validation", quality)
|
||||
assert.Contains(t, err.Error(), "invalid audio quality level", "Error should mention audio quality")
|
||||
}
|
||||
}
|
||||
|
||||
// testFrameDataValidation tests frame data validation with various edge cases using modern validation
|
||||
func testFrameDataValidation(t *testing.T) {
|
||||
config := GetConfig()
|
||||
|
||||
// Test empty data
|
||||
err := ValidateAudioFrame([]byte{})
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "frame data is empty")
|
||||
|
||||
// Test data above maximum size
|
||||
largeData := make([]byte, config.MaxAudioFrameSize+1)
|
||||
err = ValidateAudioFrame(largeData)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "exceeds maximum")
|
||||
|
||||
// Test valid data
|
||||
validData := make([]byte, 1000) // Within bounds
|
||||
if len(validData) <= config.MaxAudioFrameSize {
|
||||
err = ValidateAudioFrame(validData)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// testBufferSizeValidation tests buffer size validation
|
||||
func testBufferSizeValidation(t *testing.T) {
|
||||
config := GetConfig()
|
||||
|
||||
// Test negative and zero sizes
|
||||
invalidSizes := []int{-1, -100, 0}
|
||||
for _, size := range invalidSizes {
|
||||
err := ValidateBufferSize(size)
|
||||
assert.Error(t, err, "Buffer size %d should be invalid", size)
|
||||
assert.Contains(t, err.Error(), "must be positive")
|
||||
}
|
||||
|
||||
// Test size exceeding maximum
|
||||
err := ValidateBufferSize(config.SocketMaxBuffer + 1)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "exceeds maximum")
|
||||
|
||||
// Test valid sizes
|
||||
validSizes := []int{1, 1024, 4096, config.SocketMaxBuffer}
|
||||
for _, size := range validSizes {
|
||||
err := ValidateBufferSize(size)
|
||||
assert.NoError(t, err, "Buffer size %d should be valid", size)
|
||||
}
|
||||
}
|
||||
|
||||
// testThreadPriorityValidation tests thread priority validation
|
||||
func testThreadPriorityValidation(t *testing.T) {
|
||||
// Test valid priorities
|
||||
validPriorities := []int{-20, -10, 0, 10, 19}
|
||||
for _, priority := range validPriorities {
|
||||
err := ValidateThreadPriority(priority)
|
||||
assert.NoError(t, err, "Priority %d should be valid", priority)
|
||||
}
|
||||
|
||||
// Test invalid priorities
|
||||
invalidPriorities := []int{-21, -100, 20, 100}
|
||||
for _, priority := range invalidPriorities {
|
||||
err := ValidateThreadPriority(priority)
|
||||
assert.Error(t, err, "Priority %d should be invalid", priority)
|
||||
assert.Contains(t, err.Error(), "outside valid range")
|
||||
}
|
||||
}
|
||||
|
||||
// testLatencyValidation tests latency validation
|
||||
func testLatencyValidation(t *testing.T) {
|
||||
config := GetConfig()
|
||||
|
||||
// Test negative latency
|
||||
err := ValidateLatency(-1 * time.Millisecond)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "cannot be negative")
|
||||
|
||||
// Test zero latency (should be valid)
|
||||
err = ValidateLatency(0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Test very small positive latency
|
||||
err = ValidateLatency(500 * time.Microsecond)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "below minimum")
|
||||
|
||||
// Test latency exceeding maximum
|
||||
err = ValidateLatency(config.MaxLatency + time.Second)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "exceeds maximum")
|
||||
|
||||
// Test valid latencies
|
||||
validLatencies := []time.Duration{
|
||||
1 * time.Millisecond,
|
||||
10 * time.Millisecond,
|
||||
100 * time.Millisecond,
|
||||
config.MaxLatency,
|
||||
}
|
||||
for _, latency := range validLatencies {
|
||||
err := ValidateLatency(latency)
|
||||
assert.NoError(t, err, "Latency %v should be valid", latency)
|
||||
}
|
||||
}
|
||||
|
||||
// testMetricsIntervalValidation tests metrics interval validation
|
||||
func testMetricsIntervalValidation(t *testing.T) {
|
||||
config := GetConfig()
|
||||
|
||||
// Test interval below minimum
|
||||
err := ValidateMetricsInterval(config.MinMetricsUpdateInterval - time.Millisecond)
|
||||
assert.Error(t, err)
|
||||
|
||||
// Test interval above maximum
|
||||
err = ValidateMetricsInterval(config.MaxMetricsUpdateInterval + time.Second)
|
||||
assert.Error(t, err)
|
||||
|
||||
// Test valid intervals
|
||||
validIntervals := []time.Duration{
|
||||
config.MinMetricsUpdateInterval,
|
||||
config.MaxMetricsUpdateInterval,
|
||||
(config.MinMetricsUpdateInterval + config.MaxMetricsUpdateInterval) / 2,
|
||||
}
|
||||
for _, interval := range validIntervals {
|
||||
err := ValidateMetricsInterval(interval)
|
||||
assert.NoError(t, err, "Interval %v should be valid", interval)
|
||||
}
|
||||
}
|
||||
|
||||
// testSampleRateValidation tests sample rate validation
|
||||
func testSampleRateValidation(t *testing.T) {
|
||||
config := GetConfig()
|
||||
|
||||
// Test negative and zero sample rates
|
||||
invalidRates := []int{-1, -48000, 0}
|
||||
for _, rate := range invalidRates {
|
||||
err := ValidateSampleRate(rate)
|
||||
assert.Error(t, err, "Sample rate %d should be invalid", rate)
|
||||
assert.Contains(t, err.Error(), "must be positive")
|
||||
}
|
||||
|
||||
// Test unsupported sample rates
|
||||
unsupportedRates := []int{1000, 12345, 96001}
|
||||
for _, rate := range unsupportedRates {
|
||||
err := ValidateSampleRate(rate)
|
||||
assert.Error(t, err, "Sample rate %d should be unsupported", rate)
|
||||
assert.Contains(t, err.Error(), "not in supported rates")
|
||||
}
|
||||
|
||||
// Test valid sample rates
|
||||
for _, rate := range config.ValidSampleRates {
|
||||
err := ValidateSampleRate(rate)
|
||||
assert.NoError(t, err, "Sample rate %d should be valid", rate)
|
||||
}
|
||||
}
|
||||
|
||||
// testChannelCountValidation tests channel count validation
|
||||
func testChannelCountValidation(t *testing.T) {
|
||||
config := GetConfig()
|
||||
|
||||
// Test invalid channel counts
|
||||
invalidCounts := []int{-1, -10, 0}
|
||||
for _, count := range invalidCounts {
|
||||
err := ValidateChannelCount(count)
|
||||
assert.Error(t, err, "Channel count %d should be invalid", count)
|
||||
assert.Contains(t, err.Error(), "must be positive")
|
||||
}
|
||||
|
||||
// Test channel count exceeding maximum
|
||||
err := ValidateChannelCount(config.MaxChannels + 1)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "exceeds maximum")
|
||||
|
||||
// Test valid channel counts
|
||||
validCounts := []int{1, 2, config.MaxChannels}
|
||||
for _, count := range validCounts {
|
||||
err := ValidateChannelCount(count)
|
||||
assert.NoError(t, err, "Channel count %d should be valid", count)
|
||||
}
|
||||
}
|
||||
|
||||
// testBitrateValidation tests bitrate validation
|
||||
func testBitrateValidation(t *testing.T) {
|
||||
// Test invalid bitrates
|
||||
invalidBitrates := []int{-1, -1000, 0}
|
||||
for _, bitrate := range invalidBitrates {
|
||||
err := ValidateBitrate(bitrate)
|
||||
assert.Error(t, err, "Bitrate %d should be invalid", bitrate)
|
||||
assert.Contains(t, err.Error(), "must be positive")
|
||||
}
|
||||
|
||||
// Test bitrate below minimum (in kbps)
|
||||
err := ValidateBitrate(5) // 5 kbps = 5000 bps < 6000 bps minimum
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "below minimum")
|
||||
|
||||
// Test bitrate above maximum (in kbps)
|
||||
err = ValidateBitrate(511) // 511 kbps = 511000 bps > 510000 bps maximum
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "exceeds maximum")
|
||||
|
||||
// Test valid bitrates (in kbps)
|
||||
validBitrates := []int{
|
||||
6, // 6 kbps = 6000 bps (minimum)
|
||||
64, // Medium quality preset
|
||||
128, // High quality preset
|
||||
192, // Ultra quality preset
|
||||
510, // 510 kbps = 510000 bps (maximum)
|
||||
}
|
||||
for _, bitrate := range validBitrates {
|
||||
err := ValidateBitrate(bitrate)
|
||||
assert.NoError(t, err, "Bitrate %d kbps should be valid", bitrate)
|
||||
}
|
||||
}
|
||||
|
||||
// testFrameDurationValidation tests frame duration validation
|
||||
func testFrameDurationValidation(t *testing.T) {
|
||||
config := GetConfig()
|
||||
|
||||
// Test invalid durations
|
||||
invalidDurations := []time.Duration{-1 * time.Millisecond, -1 * time.Second, 0}
|
||||
for _, duration := range invalidDurations {
|
||||
err := ValidateFrameDuration(duration)
|
||||
assert.Error(t, err, "Duration %v should be invalid", duration)
|
||||
assert.Contains(t, err.Error(), "must be positive")
|
||||
}
|
||||
|
||||
// Test duration below minimum
|
||||
err := ValidateFrameDuration(config.MinFrameDuration - time.Microsecond)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "below minimum")
|
||||
|
||||
// Test duration above maximum
|
||||
err = ValidateFrameDuration(config.MaxFrameDuration + time.Second)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "exceeds maximum")
|
||||
|
||||
// Test valid durations
|
||||
validDurations := []time.Duration{
|
||||
config.MinFrameDuration,
|
||||
config.MaxFrameDuration,
|
||||
20 * time.Millisecond, // Common frame duration
|
||||
}
|
||||
for _, duration := range validDurations {
|
||||
err := ValidateFrameDuration(duration)
|
||||
assert.NoError(t, err, "Duration %v should be valid", duration)
|
||||
}
|
||||
}
|
||||
|
||||
// testIPCConfigValidation tests IPC configuration validation
|
||||
func testIPCConfigValidation(t *testing.T) {
|
||||
config := GetConfig()
|
||||
|
||||
// Test invalid configurations for input IPC
|
||||
invalidConfigs := []struct {
|
||||
sampleRate, channels, frameSize int
|
||||
description string
|
||||
}{
|
||||
{0, 2, 960, "zero sample rate"},
|
||||
{48000, 0, 960, "zero channels"},
|
||||
{48000, 2, 0, "zero frame size"},
|
||||
{config.MinSampleRate - 1, 2, 960, "sample rate below minimum"},
|
||||
{config.MaxSampleRate + 1, 2, 960, "sample rate above maximum"},
|
||||
{48000, config.MaxChannels + 1, 960, "too many channels"},
|
||||
{48000, -1, 960, "negative channels"},
|
||||
{48000, 2, -1, "negative frame size"},
|
||||
}
|
||||
|
||||
for _, tc := range invalidConfigs {
|
||||
// Test input IPC validation
|
||||
err := ValidateInputIPCConfig(tc.sampleRate, tc.channels, tc.frameSize)
|
||||
assert.Error(t, err, "Input IPC config should be invalid: %s", tc.description)
|
||||
|
||||
// Test output IPC validation
|
||||
err = ValidateOutputIPCConfig(tc.sampleRate, tc.channels, tc.frameSize)
|
||||
assert.Error(t, err, "Output IPC config should be invalid: %s", tc.description)
|
||||
}
|
||||
|
||||
// Test valid configuration
|
||||
err := ValidateInputIPCConfig(48000, 2, 960)
|
||||
assert.NoError(t, err)
|
||||
err = ValidateOutputIPCConfig(48000, 2, 960)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
// testAdaptiveBufferConfigValidation tests adaptive buffer configuration validation
|
||||
func testAdaptiveBufferConfigValidation(t *testing.T) {
|
||||
config := GetConfig()
|
||||
|
||||
// Test invalid configurations
|
||||
invalidConfigs := []struct {
|
||||
minSize, maxSize, defaultSize int
|
||||
description string
|
||||
}{
|
||||
{0, 1024, 512, "zero min size"},
|
||||
{-1, 1024, 512, "negative min size"},
|
||||
{512, 0, 256, "zero max size"},
|
||||
{512, -1, 256, "negative max size"},
|
||||
{512, 1024, 0, "zero default size"},
|
||||
{512, 1024, -1, "negative default size"},
|
||||
{1024, 512, 768, "min >= max"},
|
||||
{512, 1024, 256, "default < min"},
|
||||
{512, 1024, 2048, "default > max"},
|
||||
{512, config.SocketMaxBuffer + 1, 1024, "max exceeds global limit"},
|
||||
}
|
||||
|
||||
for _, tc := range invalidConfigs {
|
||||
err := ValidateAdaptiveBufferConfig(tc.minSize, tc.maxSize, tc.defaultSize)
|
||||
assert.Error(t, err, "Config should be invalid: %s", tc.description)
|
||||
}
|
||||
|
||||
// Test valid configuration
|
||||
err := ValidateAdaptiveBufferConfig(512, 4096, 1024)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
// testAudioConfigCompleteValidation tests complete audio configuration validation
|
||||
func testAudioConfigCompleteValidation(t *testing.T) {
|
||||
// Test valid configuration using actual preset values
|
||||
validConfig := AudioConfig{
|
||||
Quality: AudioQualityMedium,
|
||||
Bitrate: 64, // kbps - matches medium quality preset
|
||||
SampleRate: 48000,
|
||||
Channels: 2,
|
||||
FrameSize: 20 * time.Millisecond,
|
||||
}
|
||||
err := ValidateAudioConfigComplete(validConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Test invalid quality
|
||||
invalidQualityConfig := validConfig
|
||||
invalidQualityConfig.Quality = AudioQuality(99)
|
||||
err = ValidateAudioConfigComplete(invalidQualityConfig)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "quality validation failed")
|
||||
|
||||
// Test invalid bitrate
|
||||
invalidBitrateConfig := validConfig
|
||||
invalidBitrateConfig.Bitrate = -1
|
||||
err = ValidateAudioConfigComplete(invalidBitrateConfig)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "bitrate validation failed")
|
||||
|
||||
// Test invalid sample rate
|
||||
invalidSampleRateConfig := validConfig
|
||||
invalidSampleRateConfig.SampleRate = 12345
|
||||
err = ValidateAudioConfigComplete(invalidSampleRateConfig)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "sample rate validation failed")
|
||||
|
||||
// Test invalid channels
|
||||
invalidChannelsConfig := validConfig
|
||||
invalidChannelsConfig.Channels = 0
|
||||
err = ValidateAudioConfigComplete(invalidChannelsConfig)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "channel count validation failed")
|
||||
|
||||
// Test invalid frame duration
|
||||
invalidFrameDurationConfig := validConfig
|
||||
invalidFrameDurationConfig.FrameSize = -1 * time.Millisecond
|
||||
err = ValidateAudioConfigComplete(invalidFrameDurationConfig)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "frame duration validation failed")
|
||||
}
|
||||
|
||||
// testZeroCopyFrameValidation tests zero-copy frame validation
|
||||
func testZeroCopyFrameValidation(t *testing.T) {
|
||||
// Test nil frame
|
||||
err := ValidateZeroCopyFrame(nil)
|
||||
assert.Error(t, err)
|
||||
|
||||
// Note: We can't easily test ZeroCopyAudioFrame without creating actual instances
|
||||
// This would require more complex setup, but the validation logic is tested
|
||||
}
|
||||
|
||||
// testAudioFrameFastValidation tests fast audio frame validation
|
||||
func testAudioFrameFastValidation(t *testing.T) {
|
||||
config := GetConfig()
|
||||
|
||||
// Test empty data
|
||||
err := ValidateAudioFrame([]byte{})
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "frame data is empty")
|
||||
|
||||
// Test data exceeding maximum size
|
||||
largeData := make([]byte, config.MaxAudioFrameSize+1)
|
||||
err = ValidateAudioFrame(largeData)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "exceeds maximum")
|
||||
|
||||
// Test valid data
|
||||
validData := make([]byte, 1000)
|
||||
err = ValidateAudioFrame(validData)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
// testErrorWrappingValidation tests error wrapping functionality
|
||||
func testErrorWrappingValidation(t *testing.T) {
|
||||
// Test wrapping nil error
|
||||
wrapped := WrapWithMetadata(nil, "component", "operation", map[string]interface{}{"key": "value"})
|
||||
assert.Nil(t, wrapped)
|
||||
|
||||
// Test wrapping actual error
|
||||
originalErr := assert.AnError
|
||||
metadata := map[string]interface{}{
|
||||
"frame_size": 1024,
|
||||
"quality": "high",
|
||||
}
|
||||
wrapped = WrapWithMetadata(originalErr, "audio", "decode", metadata)
|
||||
require.NotNil(t, wrapped)
|
||||
assert.Contains(t, wrapped.Error(), "audio.decode")
|
||||
assert.Contains(t, wrapped.Error(), "assert.AnError")
|
||||
assert.Contains(t, wrapped.Error(), "metadata")
|
||||
assert.Contains(t, wrapped.Error(), "frame_size")
|
||||
assert.Contains(t, wrapped.Error(), "quality")
|
||||
}
|
||||
|
||||
// TestValidationIntegration tests validation functions working together
|
||||
func TestValidationIntegration(t *testing.T) {
|
||||
// Test that validation functions work correctly with actual audio configurations
|
||||
presets := GetAudioQualityPresets()
|
||||
require.NotEmpty(t, presets)
|
||||
|
||||
for quality, config := range presets {
|
||||
t.Run(fmt.Sprintf("Quality_%d", quality), func(t *testing.T) {
|
||||
// Validate the preset configuration
|
||||
err := ValidateAudioConfigComplete(config)
|
||||
assert.NoError(t, err, "Preset configuration for quality %d should be valid", quality)
|
||||
|
||||
// Validate individual components
|
||||
err = ValidateAudioQuality(config.Quality)
|
||||
assert.NoError(t, err, "Quality should be valid")
|
||||
|
||||
err = ValidateBitrate(config.Bitrate)
|
||||
assert.NoError(t, err, "Bitrate should be valid")
|
||||
|
||||
err = ValidateSampleRate(config.SampleRate)
|
||||
assert.NoError(t, err, "Sample rate should be valid")
|
||||
|
||||
err = ValidateChannelCount(config.Channels)
|
||||
assert.NoError(t, err, "Channel count should be valid")
|
||||
|
||||
err = ValidateFrameDuration(config.FrameSize)
|
||||
assert.NoError(t, err, "Frame duration should be valid")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestValidationPerformance ensures validation functions are efficient
|
||||
func TestValidationPerformance(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping performance test in short mode")
|
||||
}
|
||||
|
||||
// Initialize validation cache for performance testing
|
||||
InitValidationCache()
|
||||
|
||||
// Test that validation functions complete quickly
|
||||
start := time.Now()
|
||||
iterations := 10000
|
||||
|
||||
for i := 0; i < iterations; i++ {
|
||||
_ = ValidateAudioQuality(AudioQualityMedium)
|
||||
_ = ValidateBufferSize(1024)
|
||||
_ = ValidateChannelCount(2)
|
||||
_ = ValidateSampleRate(48000)
|
||||
_ = ValidateBitrate(96) // 96 kbps
|
||||
}
|
||||
|
||||
elapsed := time.Since(start)
|
||||
perIteration := elapsed / time.Duration(iterations)
|
||||
|
||||
// Performance expectations for JetKVM (ARM Cortex-A7 @ 1GHz, 256MB RAM)
|
||||
// Audio processing must not interfere with primary KVM functionality
|
||||
assert.Less(t, perIteration, 200*time.Microsecond, "Validation should not impact KVM performance")
|
||||
t.Logf("Validation performance: %v per iteration", perIteration)
|
||||
}
|
|
@ -192,6 +192,7 @@ func (p *ZeroCopyFramePool) Get() *ZeroCopyAudioFrame {
|
|||
frame.data = frame.data[:0]
|
||||
frame.mutex.Unlock()
|
||||
|
||||
wasHit = true // Pool hit
|
||||
atomic.AddInt64(&p.hitCount, 1)
|
||||
return frame
|
||||
}
|
||||
|
|
3
main.go
3
main.go
|
@ -32,6 +32,9 @@ func runAudioServer() {
|
|||
}
|
||||
|
||||
func startAudioSubprocess() error {
|
||||
// Initialize validation cache for optimal performance
|
||||
audio.InitValidationCache()
|
||||
|
||||
// Start adaptive buffer management for optimal performance
|
||||
audio.StartAdaptiveBuffering()
|
||||
|
||||
|
|
|
@ -0,0 +1,38 @@
|
|||
import { cx } from "@/cva.config";
|
||||
|
||||
interface AudioConfig {
|
||||
Quality: number;
|
||||
Bitrate: number;
|
||||
SampleRate: number;
|
||||
Channels: number;
|
||||
FrameSize: string;
|
||||
}
|
||||
|
||||
interface AudioConfigDisplayProps {
|
||||
config: AudioConfig;
|
||||
variant?: 'default' | 'success' | 'info';
|
||||
className?: string;
|
||||
}
|
||||
|
||||
const variantStyles = {
|
||||
default: "bg-slate-50 text-slate-600 dark:bg-slate-700 dark:text-slate-400",
|
||||
success: "bg-green-50 text-green-600 dark:bg-green-900/20 dark:text-green-400",
|
||||
info: "bg-blue-50 text-blue-600 dark:bg-blue-900/20 dark:text-blue-400"
|
||||
};
|
||||
|
||||
export function AudioConfigDisplay({ config, variant = 'default', className }: AudioConfigDisplayProps) {
|
||||
return (
|
||||
<div className={cx(
|
||||
"rounded-md p-2 text-xs",
|
||||
variantStyles[variant],
|
||||
className
|
||||
)}>
|
||||
<div className="grid grid-cols-2 gap-1">
|
||||
<span>Sample Rate: {config.SampleRate}Hz</span>
|
||||
<span>Channels: {config.Channels}</span>
|
||||
<span>Bitrate: {config.Bitrate}kbps</span>
|
||||
<span>Frame: {config.FrameSize}</span>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
|
@ -470,7 +470,7 @@ export default function AudioMetricsDashboard() {
|
|||
<div className="mb-2 flex items-center gap-2">
|
||||
<MdMic className="h-4 w-4 text-green-600 dark:text-green-400" />
|
||||
<span className="font-medium text-slate-900 dark:text-slate-100">
|
||||
Microphone Input Config
|
||||
Audio Input Config
|
||||
</span>
|
||||
</div>
|
||||
<div className="space-y-2 text-sm">
|
||||
|
@ -503,6 +503,8 @@ export default function AudioMetricsDashboard() {
|
|||
)}
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
{/* Subprocess Resource Usage - Histogram View */}
|
||||
<div className="grid grid-cols-1 md:grid-cols-2 gap-4">
|
||||
{/* Audio Output Subprocess */}
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
import { cx } from "@/cva.config";
|
||||
|
||||
interface AudioMetrics {
|
||||
frames_dropped: number;
|
||||
// Add other metrics properties as needed
|
||||
}
|
||||
|
||||
interface AudioStatusIndicatorProps {
|
||||
metrics?: AudioMetrics;
|
||||
label: string;
|
||||
className?: string;
|
||||
}
|
||||
|
||||
export function AudioStatusIndicator({ metrics, label, className }: AudioStatusIndicatorProps) {
|
||||
const hasIssues = metrics && metrics.frames_dropped > 0;
|
||||
|
||||
return (
|
||||
<div className={cx(
|
||||
"text-center p-2 bg-slate-50 dark:bg-slate-800 rounded",
|
||||
className
|
||||
)}>
|
||||
<div className={cx(
|
||||
"font-medium",
|
||||
hasIssues
|
||||
? "text-red-600 dark:text-red-400"
|
||||
: "text-green-600 dark:text-green-400"
|
||||
)}>
|
||||
{hasIssues ? "Issues" : "Good"}
|
||||
</div>
|
||||
<div className="text-slate-500 dark:text-slate-400">{label}</div>
|
||||
</div>
|
||||
);
|
||||
}
|
|
@ -1,9 +1,11 @@
|
|||
import { useEffect, useState } from "react";
|
||||
import { MdVolumeOff, MdVolumeUp, MdGraphicEq, MdMic, MdMicOff, MdRefresh } from "react-icons/md";
|
||||
import { LuActivity, LuSettings, LuSignal } from "react-icons/lu";
|
||||
import { LuActivity, LuSignal } from "react-icons/lu";
|
||||
|
||||
import { Button } from "@components/Button";
|
||||
import { AudioLevelMeter } from "@components/AudioLevelMeter";
|
||||
import { AudioConfigDisplay } from "@components/AudioConfigDisplay";
|
||||
import { AudioStatusIndicator } from "@components/AudioStatusIndicator";
|
||||
import { cx } from "@/cva.config";
|
||||
import { useUiStore } from "@/hooks/stores";
|
||||
import { useAudioDevices } from "@/hooks/useAudioDevices";
|
||||
|
@ -11,7 +13,6 @@ import { useAudioLevel } from "@/hooks/useAudioLevel";
|
|||
import { useAudioEvents } from "@/hooks/useAudioEvents";
|
||||
import api from "@/api";
|
||||
import notifications from "@/notifications";
|
||||
import { AUDIO_CONFIG } from "@/config/constants";
|
||||
import audioQualityService from "@/services/audioQualityService";
|
||||
|
||||
// Type for microphone error
|
||||
|
@ -54,7 +55,7 @@ interface AudioControlPopoverProps {
|
|||
export default function AudioControlPopover({ microphone, open }: AudioControlPopoverProps) {
|
||||
const [currentConfig, setCurrentConfig] = useState<AudioConfig | null>(null);
|
||||
const [currentMicrophoneConfig, setCurrentMicrophoneConfig] = useState<AudioConfig | null>(null);
|
||||
const [showAdvanced, setShowAdvanced] = useState(false);
|
||||
|
||||
const [isLoading, setIsLoading] = useState(false);
|
||||
|
||||
// Add cache flags to prevent unnecessary API calls
|
||||
|
@ -274,17 +275,7 @@ export default function AudioControlPopover({ microphone, open }: AudioControlPo
|
|||
}
|
||||
};
|
||||
|
||||
const formatBytes = (bytes: number) => {
|
||||
if (bytes === 0) return "0 B";
|
||||
const k = 1024;
|
||||
const sizes = ["B", "KB", "MB", "GB"];
|
||||
const i = Math.floor(Math.log(bytes) / Math.log(k));
|
||||
return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + " " + sizes[i];
|
||||
};
|
||||
|
||||
const formatNumber = (num: number) => {
|
||||
return new Intl.NumberFormat().format(num);
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="w-full max-w-md rounded-lg border border-slate-200 bg-white p-4 shadow-lg dark:border-slate-700 dark:bg-slate-800">
|
||||
|
@ -523,14 +514,10 @@ export default function AudioControlPopover({ microphone, open }: AudioControlPo
|
|||
</div>
|
||||
|
||||
{currentMicrophoneConfig && (
|
||||
<div className="rounded-md bg-green-50 p-2 text-xs text-green-600 dark:bg-green-900/20 dark:text-green-400">
|
||||
<div className="grid grid-cols-2 gap-1">
|
||||
<span>Sample Rate: {currentMicrophoneConfig.SampleRate}Hz</span>
|
||||
<span>Channels: {currentMicrophoneConfig.Channels}</span>
|
||||
<span>Bitrate: {currentMicrophoneConfig.Bitrate}kbps</span>
|
||||
<span>Frame: {currentMicrophoneConfig.FrameSize}</span>
|
||||
</div>
|
||||
</div>
|
||||
<AudioConfigDisplay
|
||||
config={currentMicrophoneConfig}
|
||||
variant="success"
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
@ -564,164 +551,45 @@ export default function AudioControlPopover({ microphone, open }: AudioControlPo
|
|||
</div>
|
||||
|
||||
{currentConfig && (
|
||||
<div className="rounded-md bg-slate-50 p-2 text-xs text-slate-600 dark:bg-slate-700 dark:text-slate-400">
|
||||
<div className="grid grid-cols-2 gap-1">
|
||||
<span>Sample Rate: {currentConfig.SampleRate}Hz</span>
|
||||
<span>Channels: {currentConfig.Channels}</span>
|
||||
<span>Bitrate: {currentConfig.Bitrate}kbps</span>
|
||||
<span>Frame: {currentConfig.FrameSize}</span>
|
||||
<AudioConfigDisplay
|
||||
config={currentConfig}
|
||||
variant="default"
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Quick Status Summary */}
|
||||
<div className="rounded-lg border border-slate-200 p-3 dark:border-slate-600">
|
||||
<div className="flex items-center gap-2 mb-2">
|
||||
<LuActivity className="h-4 w-4 text-slate-600 dark:text-slate-400" />
|
||||
<span className="font-medium text-slate-900 dark:text-slate-100">
|
||||
Quick Status
|
||||
</span>
|
||||
</div>
|
||||
|
||||
{metrics ? (
|
||||
<div className="grid grid-cols-2 gap-3 text-xs">
|
||||
<AudioStatusIndicator
|
||||
metrics={metrics}
|
||||
label="Audio Output"
|
||||
/>
|
||||
|
||||
{micMetrics && (
|
||||
<AudioStatusIndicator
|
||||
metrics={micMetrics}
|
||||
label="Microphone"
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
) : (
|
||||
<div className="text-center py-2">
|
||||
<div className="text-sm text-slate-500 dark:text-slate-400">
|
||||
No data available
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Advanced Controls Toggle */}
|
||||
<button
|
||||
onClick={() => setShowAdvanced(!showAdvanced)}
|
||||
className="flex w-full items-center justify-between rounded-md border border-slate-200 p-2 text-sm font-medium text-slate-700 hover:bg-slate-50 dark:border-slate-600 dark:text-slate-300 dark:hover:bg-slate-700"
|
||||
>
|
||||
<div className="flex items-center gap-2">
|
||||
<LuSettings className="h-4 w-4" />
|
||||
<span>Advanced Metrics</span>
|
||||
</div>
|
||||
<span className={cx(
|
||||
"transition-transform",
|
||||
showAdvanced ? "rotate-180" : "rotate-0"
|
||||
)}>
|
||||
▼
|
||||
</span>
|
||||
</button>
|
||||
|
||||
{/* Advanced Metrics */}
|
||||
{showAdvanced && (
|
||||
<div className="space-y-3 rounded-lg border border-slate-200 p-3 dark:border-slate-600">
|
||||
<div className="flex items-center gap-2">
|
||||
<LuActivity className="h-4 w-4 text-slate-600 dark:text-slate-400" />
|
||||
<span className="font-medium text-slate-900 dark:text-slate-100">
|
||||
Performance Metrics
|
||||
</span>
|
||||
</div>
|
||||
|
||||
{metrics ? (
|
||||
<>
|
||||
<div className="mb-4">
|
||||
<h4 className="text-sm font-medium text-slate-700 dark:text-slate-300 mb-2">Audio Output</h4>
|
||||
<div className="grid grid-cols-2 gap-3 text-xs">
|
||||
<div className="space-y-1">
|
||||
<div className="text-slate-500 dark:text-slate-400">Frames Received</div>
|
||||
<div className="font-mono text-green-600 dark:text-green-400">
|
||||
{formatNumber(metrics.frames_received)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="space-y-1">
|
||||
<div className="text-slate-500 dark:text-slate-400">Frames Dropped</div>
|
||||
<div className={cx(
|
||||
"font-mono",
|
||||
metrics.frames_dropped > 0
|
||||
? "text-red-600 dark:text-red-400"
|
||||
: "text-green-600 dark:text-green-400"
|
||||
)}>
|
||||
{formatNumber(metrics.frames_dropped)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="space-y-1">
|
||||
<div className="text-slate-500 dark:text-slate-400">Data Processed</div>
|
||||
<div className="font-mono text-blue-600 dark:text-blue-400">
|
||||
{formatBytes(metrics.bytes_processed)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="space-y-1">
|
||||
<div className="text-slate-500 dark:text-slate-400">Connection Drops</div>
|
||||
<div className={cx(
|
||||
"font-mono",
|
||||
metrics.connection_drops > 0
|
||||
? "text-red-600 dark:text-red-400"
|
||||
: "text-green-600 dark:text-green-400"
|
||||
)}>
|
||||
{formatNumber(metrics.connection_drops)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{micMetrics && (
|
||||
<div className="mb-4">
|
||||
<h4 className="text-sm font-medium text-slate-700 dark:text-slate-300 mb-2">Microphone Input</h4>
|
||||
<div className="grid grid-cols-2 gap-3 text-xs">
|
||||
<div className="space-y-1">
|
||||
<div className="text-slate-500 dark:text-slate-400">Frames Sent</div>
|
||||
<div className="font-mono text-green-600 dark:text-green-400">
|
||||
{formatNumber(micMetrics.frames_sent)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="space-y-1">
|
||||
<div className="text-slate-500 dark:text-slate-400">Frames Dropped</div>
|
||||
<div className={cx(
|
||||
"font-mono",
|
||||
micMetrics.frames_dropped > 0
|
||||
? "text-red-600 dark:text-red-400"
|
||||
: "text-green-600 dark:text-green-400"
|
||||
)}>
|
||||
{formatNumber(micMetrics.frames_dropped)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="space-y-1">
|
||||
<div className="text-slate-500 dark:text-slate-400">Data Processed</div>
|
||||
<div className="font-mono text-blue-600 dark:text-blue-400">
|
||||
{formatBytes(micMetrics.bytes_processed)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="space-y-1">
|
||||
<div className="text-slate-500 dark:text-slate-400">Connection Drops</div>
|
||||
<div className={cx(
|
||||
"font-mono",
|
||||
micMetrics.connection_drops > 0
|
||||
? "text-red-600 dark:text-red-400"
|
||||
: "text-green-600 dark:text-green-400"
|
||||
)}>
|
||||
{formatNumber(micMetrics.connection_drops)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{metrics.frames_received > 0 && (
|
||||
<div className="mt-3 rounded-md bg-slate-50 p-2 dark:bg-slate-700">
|
||||
<div className="text-xs text-slate-500 dark:text-slate-400">Drop Rate</div>
|
||||
<div className={cx(
|
||||
"font-mono text-sm",
|
||||
((metrics.frames_dropped / metrics.frames_received) * AUDIO_CONFIG.PERCENTAGE_MULTIPLIER) > AUDIO_CONFIG.DROP_RATE_CRITICAL_THRESHOLD
|
||||
? "text-red-600 dark:text-red-400"
|
||||
: ((metrics.frames_dropped / metrics.frames_received) * AUDIO_CONFIG.PERCENTAGE_MULTIPLIER) > AUDIO_CONFIG.DROP_RATE_WARNING_THRESHOLD
|
||||
? "text-yellow-600 dark:text-yellow-400"
|
||||
: "text-green-600 dark:text-green-400"
|
||||
)}>
|
||||
{((metrics.frames_dropped / metrics.frames_received) * AUDIO_CONFIG.PERCENTAGE_MULTIPLIER).toFixed(AUDIO_CONFIG.PERCENTAGE_DECIMAL_PLACES)}%
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div className="text-xs text-slate-500 dark:text-slate-400">
|
||||
Last updated: {new Date().toLocaleTimeString()}
|
||||
</div>
|
||||
</>
|
||||
) : (
|
||||
<div className="text-center py-4">
|
||||
<div className="text-sm text-slate-500 dark:text-slate-400">
|
||||
Loading metrics...
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Audio Metrics Dashboard Button */}
|
||||
<div className="pt-2 border-t border-slate-200 dark:border-slate-600">
|
||||
<div className="flex justify-center">
|
||||
|
|
|
@ -89,62 +89,17 @@ export const AUDIO_CONFIG = {
|
|||
SYNC_DEBOUNCE_MS: 1000, // debounce state synchronization
|
||||
AUDIO_TEST_TIMEOUT: 100, // ms - timeout for audio testing
|
||||
|
||||
// Audio Output Quality Bitrates (matching backend config_constants.go)
|
||||
OUTPUT_QUALITY_BITRATES: {
|
||||
LOW: 32, // AudioQualityLowOutputBitrate
|
||||
MEDIUM: 64, // AudioQualityMediumOutputBitrate
|
||||
HIGH: 128, // AudioQualityHighOutputBitrate
|
||||
ULTRA: 192, // AudioQualityUltraOutputBitrate
|
||||
// NOTE: Audio quality presets (bitrates, sample rates, channels, frame sizes)
|
||||
// are now fetched dynamically from the backend API via audioQualityService
|
||||
// to eliminate duplication with backend config_constants.go
|
||||
|
||||
// Default Quality Labels - will be updated dynamically by audioQualityService
|
||||
DEFAULT_QUALITY_LABELS: {
|
||||
0: "Low",
|
||||
1: "Medium",
|
||||
2: "High",
|
||||
3: "Ultra",
|
||||
} as const,
|
||||
// Audio Input Quality Bitrates (matching backend config_constants.go)
|
||||
INPUT_QUALITY_BITRATES: {
|
||||
LOW: 16, // AudioQualityLowInputBitrate
|
||||
MEDIUM: 32, // AudioQualityMediumInputBitrate
|
||||
HIGH: 64, // AudioQualityHighInputBitrate
|
||||
ULTRA: 96, // AudioQualityUltraInputBitrate
|
||||
} as const,
|
||||
// Sample Rates (matching backend config_constants.go)
|
||||
QUALITY_SAMPLE_RATES: {
|
||||
LOW: 22050, // AudioQualityLowSampleRate
|
||||
MEDIUM: 44100, // AudioQualityMediumSampleRate
|
||||
HIGH: 48000, // Default SampleRate
|
||||
ULTRA: 48000, // Default SampleRate
|
||||
} as const,
|
||||
// Microphone Sample Rates
|
||||
MIC_QUALITY_SAMPLE_RATES: {
|
||||
LOW: 16000, // AudioQualityMicLowSampleRate
|
||||
MEDIUM: 44100, // AudioQualityMediumSampleRate
|
||||
HIGH: 48000, // Default SampleRate
|
||||
ULTRA: 48000, // Default SampleRate
|
||||
} as const,
|
||||
// Channels (matching backend config_constants.go)
|
||||
QUALITY_CHANNELS: {
|
||||
LOW: 1, // AudioQualityLowChannels (mono)
|
||||
MEDIUM: 2, // AudioQualityMediumChannels (stereo)
|
||||
HIGH: 2, // AudioQualityHighChannels (stereo)
|
||||
ULTRA: 2, // AudioQualityUltraChannels (stereo)
|
||||
} as const,
|
||||
// Frame Sizes in milliseconds (matching backend config_constants.go)
|
||||
QUALITY_FRAME_SIZES: {
|
||||
LOW: 40, // AudioQualityLowFrameSize (40ms)
|
||||
MEDIUM: 20, // AudioQualityMediumFrameSize (20ms)
|
||||
HIGH: 20, // AudioQualityHighFrameSize (20ms)
|
||||
ULTRA: 10, // AudioQualityUltraFrameSize (10ms)
|
||||
} as const,
|
||||
// Updated Quality Labels with correct output bitrates
|
||||
QUALITY_LABELS: {
|
||||
0: "Low (32 kbps)",
|
||||
1: "Medium (64 kbps)",
|
||||
2: "High (128 kbps)",
|
||||
3: "Ultra (192 kbps)",
|
||||
} as const,
|
||||
// Legacy support - keeping for backward compatibility
|
||||
QUALITY_BITRATES: {
|
||||
LOW: 32,
|
||||
MEDIUM: 64,
|
||||
HIGH: 128,
|
||||
ULTRA: 192, // Updated to match backend
|
||||
},
|
||||
|
||||
// Audio Analysis
|
||||
ANALYSIS_FFT_SIZE: 256, // for detailed audio analysis
|
||||
|
|
18
web.go
18
web.go
|
@ -212,7 +212,8 @@ func setupRouter() *gin.Engine {
|
|||
})
|
||||
|
||||
protected.GET("/audio/metrics", func(c *gin.Context) {
|
||||
metrics := audio.GetAudioMetrics()
|
||||
registry := audio.GetMetricsRegistry()
|
||||
metrics := registry.GetAudioMetrics()
|
||||
c.JSON(200, gin.H{
|
||||
"frames_received": metrics.FramesReceived,
|
||||
"frames_dropped": metrics.FramesDropped,
|
||||
|
@ -399,19 +400,8 @@ func setupRouter() *gin.Engine {
|
|||
})
|
||||
|
||||
protected.GET("/microphone/metrics", func(c *gin.Context) {
|
||||
if currentSession == nil || currentSession.AudioInputManager == nil {
|
||||
c.JSON(200, gin.H{
|
||||
"frames_sent": 0,
|
||||
"frames_dropped": 0,
|
||||
"bytes_processed": 0,
|
||||
"last_frame_time": "",
|
||||
"connection_drops": 0,
|
||||
"average_latency": "0.0ms",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
metrics := currentSession.AudioInputManager.GetMetrics()
|
||||
registry := audio.GetMetricsRegistry()
|
||||
metrics := registry.GetAudioInputMetrics()
|
||||
c.JSON(200, gin.H{
|
||||
"frames_sent": metrics.FramesSent,
|
||||
"frames_dropped": metrics.FramesDropped,
|
||||
|
|
Loading…
Reference in New Issue