mirror of https://github.com/jetkvm/kvm.git
refactor(audio): standardize log levels and messages across components
- Change Info logs to Debug for routine operations - Standardize log message formatting to lowercase - Improve error message clarity and consistency - Add new metrics for device health and memory monitoring - Simplify config constants documentation
This commit is contained in:
parent
dc2db8ed2d
commit
50e04192bf
|
@ -108,7 +108,7 @@ func NewAdaptiveBufferManager(config AdaptiveBufferConfig) *AdaptiveBufferManage
|
|||
logger := logging.GetDefaultLogger().With().Str("component", "adaptive-buffer").Logger()
|
||||
|
||||
if err := ValidateAdaptiveBufferConfig(config.MinBufferSize, config.MaxBufferSize, config.DefaultBufferSize); err != nil {
|
||||
logger.Error().Err(err).Msg("Invalid adaptive buffer config, using defaults")
|
||||
logger.Warn().Err(err).Msg("invalid adaptive buffer config, using defaults")
|
||||
config = DefaultAdaptiveBufferConfig()
|
||||
}
|
||||
|
||||
|
@ -130,14 +130,14 @@ func NewAdaptiveBufferManager(config AdaptiveBufferConfig) *AdaptiveBufferManage
|
|||
func (abm *AdaptiveBufferManager) Start() {
|
||||
abm.wg.Add(1)
|
||||
go abm.adaptationLoop()
|
||||
abm.logger.Info().Msg("Adaptive buffer manager started")
|
||||
abm.logger.Info().Msg("adaptive buffer manager started")
|
||||
}
|
||||
|
||||
// Stop stops the adaptive buffer management
|
||||
func (abm *AdaptiveBufferManager) Stop() {
|
||||
abm.cancel()
|
||||
abm.wg.Wait()
|
||||
abm.logger.Info().Msg("Adaptive buffer manager stopped")
|
||||
abm.logger.Info().Msg("adaptive buffer manager stopped")
|
||||
}
|
||||
|
||||
// GetInputBufferSize returns the current recommended input buffer size
|
||||
|
|
|
@ -72,14 +72,14 @@ func NewAdaptiveOptimizer(latencyMonitor *LatencyMonitor, bufferManager *Adaptiv
|
|||
func (ao *AdaptiveOptimizer) Start() {
|
||||
ao.wg.Add(1)
|
||||
go ao.optimizationLoop()
|
||||
ao.logger.Info().Msg("Adaptive optimizer started")
|
||||
ao.logger.Debug().Msg("adaptive optimizer started")
|
||||
}
|
||||
|
||||
// Stop stops the adaptive optimizer
|
||||
func (ao *AdaptiveOptimizer) Stop() {
|
||||
ao.cancel()
|
||||
ao.wg.Wait()
|
||||
ao.logger.Info().Msg("Adaptive optimizer stopped")
|
||||
ao.logger.Debug().Msg("adaptive optimizer stopped")
|
||||
}
|
||||
|
||||
// initializeStrategies sets up the available optimization strategies
|
||||
|
@ -178,9 +178,9 @@ func (ao *AdaptiveOptimizer) checkStability() {
|
|||
if metrics.Current > ao.config.RollbackThreshold {
|
||||
currentLevel := int(atomic.LoadInt64(&ao.optimizationLevel))
|
||||
if currentLevel > 0 {
|
||||
ao.logger.Warn().Dur("current_latency", metrics.Current).Dur("threshold", ao.config.RollbackThreshold).Msg("Rolling back optimizations due to excessive latency")
|
||||
ao.logger.Warn().Dur("current_latency", metrics.Current).Dur("threshold", ao.config.RollbackThreshold).Msg("rolling back optimizations due to excessive latency")
|
||||
if err := ao.decreaseOptimization(currentLevel - 1); err != nil {
|
||||
ao.logger.Error().Err(err).Msg("Failed to decrease optimization level")
|
||||
ao.logger.Error().Err(err).Msg("failed to decrease optimization level")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -230,8 +230,8 @@ func SetAudioQuality(quality AudioQuality) {
|
|||
// Validate audio quality parameter
|
||||
if err := ValidateAudioQuality(quality); err != nil {
|
||||
// Log validation error but don't fail - maintain backward compatibility
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "AudioConfig").Logger()
|
||||
logger.Error().Err(err).Int("quality", int(quality)).Msg("Invalid audio quality provided, ignoring")
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "audio").Logger()
|
||||
logger.Warn().Err(err).Int("quality", int(quality)).Msg("invalid audio quality, using current config")
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -251,8 +251,8 @@ func SetMicrophoneQuality(quality AudioQuality) {
|
|||
// Validate audio quality parameter
|
||||
if err := ValidateAudioQuality(quality); err != nil {
|
||||
// Log validation error but don't fail - maintain backward compatibility
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "MicrophoneConfig").Logger()
|
||||
logger.Error().Err(err).Int("quality", int(quality)).Msg("Invalid microphone quality provided, ignoring")
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "audio").Logger()
|
||||
logger.Warn().Err(err).Int("quality", int(quality)).Msg("invalid microphone quality, using current config")
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -94,7 +94,7 @@ func testAudioQualityPresetsConsistency(t *testing.T) {
|
|||
// Verify presets have expected structure
|
||||
for i, preset := range presets {
|
||||
t.Logf("Audio preset %d: %+v", i, preset)
|
||||
|
||||
|
||||
// Each preset should have reasonable values
|
||||
assert.GreaterOrEqual(t, preset.Bitrate, 0, "Bitrate should be non-negative")
|
||||
assert.Greater(t, preset.SampleRate, 0, "Sample rate should be positive")
|
||||
|
@ -119,7 +119,7 @@ func testMicrophoneQualityPresetsConsistency(t *testing.T) {
|
|||
// Verify presets have expected structure
|
||||
for i, preset := range presets {
|
||||
t.Logf("Microphone preset %d: %+v", i, preset)
|
||||
|
||||
|
||||
// Each preset should have reasonable values
|
||||
assert.GreaterOrEqual(t, preset.Bitrate, 0, "Bitrate should be non-negative")
|
||||
assert.Greater(t, preset.SampleRate, 0, "Sample rate should be positive")
|
||||
|
@ -225,11 +225,11 @@ func testQualityPresetsImmutability(t *testing.T) {
|
|||
|
||||
// Verify each preset is identical
|
||||
for quality := range presets1 {
|
||||
assert.Equal(t, presets1[quality].Bitrate, presets2[quality].Bitrate,
|
||||
assert.Equal(t, presets1[quality].Bitrate, presets2[quality].Bitrate,
|
||||
"Preset %v bitrate should be consistent", quality)
|
||||
assert.Equal(t, presets1[quality].SampleRate, presets2[quality].SampleRate,
|
||||
assert.Equal(t, presets1[quality].SampleRate, presets2[quality].SampleRate,
|
||||
"Preset %v sample rate should be consistent", quality)
|
||||
assert.Equal(t, presets1[quality].Channels, presets2[quality].Channels,
|
||||
assert.Equal(t, presets1[quality].Channels, presets2[quality].Channels,
|
||||
"Preset %v channels should be consistent", quality)
|
||||
}
|
||||
|
||||
|
@ -240,11 +240,11 @@ func testQualityPresetsImmutability(t *testing.T) {
|
|||
require.Equal(t, len(micPresets1), len(micPresets2), "Microphone preset count should be consistent")
|
||||
|
||||
for quality := range micPresets1 {
|
||||
assert.Equal(t, micPresets1[quality].Bitrate, micPresets2[quality].Bitrate,
|
||||
assert.Equal(t, micPresets1[quality].Bitrate, micPresets2[quality].Bitrate,
|
||||
"Microphone preset %v bitrate should be consistent", quality)
|
||||
assert.Equal(t, micPresets1[quality].SampleRate, micPresets2[quality].SampleRate,
|
||||
assert.Equal(t, micPresets1[quality].SampleRate, micPresets2[quality].SampleRate,
|
||||
"Microphone preset %v sample rate should be consistent", quality)
|
||||
assert.Equal(t, micPresets1[quality].Channels, micPresets2[quality].Channels,
|
||||
assert.Equal(t, micPresets1[quality].Channels, micPresets2[quality].Channels,
|
||||
"Microphone preset %v channels should be consistent", quality)
|
||||
}
|
||||
}
|
||||
|
@ -252,7 +252,7 @@ func testQualityPresetsImmutability(t *testing.T) {
|
|||
// TestQualityValidationRemovalRegression tests that validation removal doesn't cause regressions
|
||||
func TestQualityValidationRemovalRegression(t *testing.T) {
|
||||
// This test ensures that removing validation from GET endpoints doesn't break functionality
|
||||
|
||||
|
||||
// Test that presets are still accessible
|
||||
audioPresets := GetAudioQualityPresets()
|
||||
assert.NotNil(t, audioPresets, "Audio presets should be accessible after validation removal")
|
||||
|
@ -314,4 +314,4 @@ func TestPerformanceAfterValidationRemoval(t *testing.T) {
|
|||
maxExpectedDuration := time.Second // Very generous limit
|
||||
assert.Less(t, audioDuration, maxExpectedDuration, "Audio preset access should be fast")
|
||||
assert.Less(t, micDuration, maxExpectedDuration, "Microphone preset access should be fast")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -96,22 +96,22 @@ func (bam *BaseAudioManager) updateLatency(latency time.Duration) {
|
|||
|
||||
// logComponentStart logs component start with consistent format
|
||||
func (bam *BaseAudioManager) logComponentStart(component string) {
|
||||
bam.logger.Info().Str("component", component).Msg("starting component")
|
||||
bam.logger.Debug().Str("component", component).Msg("starting component")
|
||||
}
|
||||
|
||||
// logComponentStarted logs component started with consistent format
|
||||
func (bam *BaseAudioManager) logComponentStarted(component string) {
|
||||
bam.logger.Info().Str("component", component).Msg("component started successfully")
|
||||
bam.logger.Debug().Str("component", component).Msg("component started successfully")
|
||||
}
|
||||
|
||||
// logComponentStop logs component stop with consistent format
|
||||
func (bam *BaseAudioManager) logComponentStop(component string) {
|
||||
bam.logger.Info().Str("component", component).Msg("stopping component")
|
||||
bam.logger.Debug().Str("component", component).Msg("stopping component")
|
||||
}
|
||||
|
||||
// logComponentStopped logs component stopped with consistent format
|
||||
func (bam *BaseAudioManager) logComponentStopped(component string) {
|
||||
bam.logger.Info().Str("component", component).Msg("component stopped")
|
||||
bam.logger.Debug().Str("component", component).Msg("component stopped")
|
||||
}
|
||||
|
||||
// logComponentError logs component error with consistent format
|
||||
|
|
|
@ -63,12 +63,12 @@ func NewBatchAudioProcessor(batchSize int, batchDuration time.Duration) *BatchAu
|
|||
// Validate input parameters
|
||||
if err := ValidateBufferSize(batchSize); err != nil {
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "batch-audio").Logger()
|
||||
logger.Error().Err(err).Int("batchSize", batchSize).Msg("Invalid batch size provided, using default")
|
||||
logger.Warn().Err(err).Int("batchSize", batchSize).Msg("invalid batch size, using default")
|
||||
batchSize = GetConfig().BatchProcessorFramesPerBatch
|
||||
}
|
||||
if batchDuration <= 0 {
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "batch-audio").Logger()
|
||||
logger.Error().Dur("batchDuration", batchDuration).Msg("Invalid batch duration provided, using default")
|
||||
logger.Warn().Dur("batchDuration", batchDuration).Msg("invalid batch duration, using default")
|
||||
batchDuration = GetConfig().BatchProcessingDelay
|
||||
}
|
||||
|
||||
|
@ -131,7 +131,7 @@ func (bap *BatchAudioProcessor) Stop() {
|
|||
func (bap *BatchAudioProcessor) BatchReadEncode(buffer []byte) (int, error) {
|
||||
// Validate buffer before processing
|
||||
if err := ValidateBufferSize(len(buffer)); err != nil {
|
||||
bap.logger.Debug().Err(err).Msg("Invalid buffer for batch processing")
|
||||
bap.logger.Debug().Err(err).Msg("invalid buffer for batch processing")
|
||||
return 0, err
|
||||
}
|
||||
|
||||
|
@ -220,12 +220,12 @@ func (bap *BatchAudioProcessor) processBatchRead(batch []batchReadRequest) {
|
|||
|
||||
// Set high priority for batch audio processing
|
||||
if err := SetAudioThreadPriority(); err != nil {
|
||||
bap.logger.Warn().Err(err).Msg("Failed to set batch audio processing priority")
|
||||
bap.logger.Warn().Err(err).Msg("failed to set batch audio processing priority")
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := ResetThreadPriority(); err != nil {
|
||||
bap.logger.Warn().Err(err).Msg("Failed to reset thread priority")
|
||||
bap.logger.Warn().Err(err).Msg("failed to reset thread priority")
|
||||
}
|
||||
runtime.UnlockOSThread()
|
||||
atomic.StoreInt32(&bap.threadPinned, 0)
|
||||
|
|
|
@ -29,7 +29,7 @@ func NewAudioBufferPool(bufferSize int) *AudioBufferPool {
|
|||
if err := ValidateBufferSize(bufferSize); err != nil {
|
||||
// Log validation error and use default value
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "AudioBufferPool").Logger()
|
||||
logger.Error().Err(err).Int("bufferSize", bufferSize).Msg("Invalid buffer size provided, using default")
|
||||
logger.Warn().Err(err).Int("bufferSize", bufferSize).Msg("invalid buffer size, using default")
|
||||
bufferSize = GetConfig().AudioFramePoolSize
|
||||
}
|
||||
|
||||
|
|
|
@ -8,210 +8,53 @@ import (
|
|||
|
||||
// AudioConfigConstants centralizes all hardcoded values used across audio components.
|
||||
// This configuration system allows runtime tuning of audio performance, quality, and resource usage.
|
||||
// Each constant is documented with its purpose, usage location, and impact on system behavior.
|
||||
type AudioConfigConstants struct {
|
||||
// Audio Quality Presets
|
||||
// MaxAudioFrameSize defines the maximum size of an audio frame in bytes.
|
||||
// Used in: buffer_pool.go, adaptive_buffer.go
|
||||
// Impact: Higher values allow larger audio chunks but increase memory usage and latency.
|
||||
// Typical range: 1024-8192 bytes. Default 4096 provides good balance.
|
||||
MaxAudioFrameSize int
|
||||
MaxAudioFrameSize int // Maximum audio frame size in bytes (default: 4096)
|
||||
|
||||
// Opus Encoding Parameters - Core codec settings for audio compression
|
||||
// OpusBitrate sets the target bitrate for Opus encoding in bits per second.
|
||||
// Used in: cgo_audio.go for encoder initialization
|
||||
// Impact: Higher bitrates improve audio quality but increase bandwidth usage.
|
||||
// Range: 6000-510000 bps. 128000 (128kbps) provides high quality for most use cases.
|
||||
OpusBitrate int
|
||||
// Opus Encoding Parameters
|
||||
OpusBitrate int // Target bitrate for Opus encoding in bps (default: 128000)
|
||||
OpusComplexity int // Computational complexity 0-10 (default: 10 for best quality)
|
||||
OpusVBR int // Variable Bit Rate: 0=CBR, 1=VBR (default: 1)
|
||||
OpusVBRConstraint int // VBR constraint: 0=unconstrained, 1=constrained (default: 0)
|
||||
OpusDTX int // Discontinuous Transmission: 0=disabled, 1=enabled (default: 0)
|
||||
|
||||
// OpusComplexity controls the computational complexity of Opus encoding (0-10).
|
||||
// Used in: cgo_audio.go for encoder configuration
|
||||
// Impact: Higher values improve quality but increase CPU usage and encoding latency.
|
||||
// Range: 0-10. Value 10 provides best quality, 0 fastest encoding.
|
||||
OpusComplexity int
|
||||
// Audio Parameters
|
||||
SampleRate int // Audio sampling frequency in Hz (default: 48000)
|
||||
Channels int // Number of audio channels: 1=mono, 2=stereo (default: 2)
|
||||
FrameSize int // Samples per audio frame (default: 960 for 20ms at 48kHz)
|
||||
MaxPacketSize int // Maximum encoded packet size in bytes (default: 4000)
|
||||
|
||||
// OpusVBR enables Variable Bit Rate encoding (0=CBR, 1=VBR).
|
||||
// Used in: cgo_audio.go for encoder mode selection
|
||||
// Impact: VBR (1) adapts bitrate to content complexity, improving efficiency.
|
||||
// CBR (0) maintains constant bitrate for predictable bandwidth usage.
|
||||
OpusVBR int
|
||||
// Audio Quality Bitrates (kbps)
|
||||
AudioQualityLowOutputBitrate int // Low-quality output bitrate (default: 32)
|
||||
AudioQualityLowInputBitrate int // Low-quality input bitrate (default: 16)
|
||||
AudioQualityMediumOutputBitrate int // Medium-quality output bitrate (default: 64)
|
||||
AudioQualityMediumInputBitrate int // Medium-quality input bitrate (default: 32)
|
||||
AudioQualityHighOutputBitrate int // High-quality output bitrate (default: 128)
|
||||
AudioQualityHighInputBitrate int // High-quality input bitrate (default: 64)
|
||||
AudioQualityUltraOutputBitrate int // Ultra-quality output bitrate (default: 192)
|
||||
AudioQualityUltraInputBitrate int // Ultra-quality input bitrate (default: 96)
|
||||
|
||||
// OpusVBRConstraint enables constrained VBR mode (0=unconstrained, 1=constrained).
|
||||
// Used in: cgo_audio.go when VBR is enabled
|
||||
// Impact: Constrained VBR (1) limits bitrate variation for more predictable bandwidth.
|
||||
// Unconstrained (0) allows full bitrate adaptation for optimal quality.
|
||||
OpusVBRConstraint int
|
||||
// Audio Quality Sample Rates (Hz)
|
||||
AudioQualityLowSampleRate int // Low-quality sample rate (default: 22050)
|
||||
AudioQualityMediumSampleRate int // Medium-quality sample rate (default: 44100)
|
||||
AudioQualityMicLowSampleRate int // Low-quality microphone sample rate (default: 16000)
|
||||
|
||||
// OpusDTX enables Discontinuous Transmission (0=disabled, 1=enabled).
|
||||
// Used in: cgo_audio.go for encoder optimization
|
||||
// Impact: DTX (1) reduces bandwidth during silence but may cause audio artifacts.
|
||||
// Disabled (0) maintains constant transmission for consistent quality.
|
||||
OpusDTX int
|
||||
// Audio Quality Frame Sizes
|
||||
AudioQualityLowFrameSize time.Duration // Low-quality frame duration (default: 40ms)
|
||||
AudioQualityMediumFrameSize time.Duration // Medium-quality frame duration (default: 20ms)
|
||||
AudioQualityHighFrameSize time.Duration // High-quality frame duration (default: 20ms)
|
||||
|
||||
// Audio Parameters - Fundamental audio stream characteristics
|
||||
// SampleRate defines the number of audio samples per second in Hz.
|
||||
// Used in: All audio processing components
|
||||
// Impact: Higher rates improve frequency response but increase processing load.
|
||||
// Common values: 16000 (voice), 44100 (CD quality), 48000 (professional).
|
||||
SampleRate int
|
||||
AudioQualityUltraFrameSize time.Duration // Ultra-quality frame duration (default: 10ms)
|
||||
|
||||
// Channels specifies the number of audio channels (1=mono, 2=stereo).
|
||||
// Used in: All audio processing and encoding/decoding operations
|
||||
// Impact: Stereo (2) provides spatial audio but doubles bandwidth and processing.
|
||||
// Mono (1) reduces resource usage but loses spatial information.
|
||||
Channels int
|
||||
// Audio Quality Channels
|
||||
AudioQualityLowChannels int // Low-quality channel count (default: 1)
|
||||
AudioQualityMediumChannels int // Medium-quality channel count (default: 2)
|
||||
AudioQualityHighChannels int // High-quality channel count (default: 2)
|
||||
AudioQualityUltraChannels int // Ultra-quality channel count (default: 2)
|
||||
|
||||
// FrameSize defines the number of samples per audio frame.
|
||||
// Used in: Opus encoding/decoding, buffer management
|
||||
// Impact: Larger frames reduce overhead but increase latency.
|
||||
// Must match Opus frame sizes: 120, 240, 480, 960, 1920, 2880 samples.
|
||||
FrameSize int
|
||||
|
||||
// MaxPacketSize sets the maximum size of encoded audio packets in bytes.
|
||||
// Used in: Network transmission, buffer allocation
|
||||
// Impact: Larger packets reduce network overhead but increase burst bandwidth.
|
||||
// Should accommodate worst-case Opus output plus protocol headers.
|
||||
MaxPacketSize int
|
||||
|
||||
// Audio Quality Bitrates - Predefined quality presets for different use cases
|
||||
// These bitrates are used in audio.go for quality level selection
|
||||
// Impact: Higher bitrates improve audio fidelity but increase bandwidth usage
|
||||
|
||||
// AudioQualityLowOutputBitrate defines bitrate for low-quality audio output (kbps).
|
||||
// Used in: audio.go for bandwidth-constrained scenarios
|
||||
// Impact: Minimal bandwidth usage but reduced audio quality. Suitable for voice-only.
|
||||
// Default 32kbps provides acceptable voice quality with very low bandwidth.
|
||||
AudioQualityLowOutputBitrate int
|
||||
|
||||
// AudioQualityLowInputBitrate defines bitrate for low-quality audio input (kbps).
|
||||
// Used in: audio.go for microphone input in low-bandwidth scenarios
|
||||
// Impact: Reduces upload bandwidth but may affect voice clarity.
|
||||
// Default 16kbps suitable for basic voice communication.
|
||||
AudioQualityLowInputBitrate int
|
||||
|
||||
// AudioQualityMediumOutputBitrate defines bitrate for medium-quality audio output (kbps).
|
||||
// Used in: audio.go for balanced quality/bandwidth scenarios
|
||||
// Impact: Good balance between quality and bandwidth usage.
|
||||
// Default 64kbps provides clear voice and acceptable music quality.
|
||||
AudioQualityMediumOutputBitrate int
|
||||
|
||||
// AudioQualityMediumInputBitrate defines bitrate for medium-quality audio input (kbps).
|
||||
// Used in: audio.go for microphone input with balanced quality
|
||||
// Impact: Better voice quality than low setting with moderate bandwidth usage.
|
||||
// Default 32kbps suitable for clear voice communication.
|
||||
AudioQualityMediumInputBitrate int
|
||||
|
||||
// AudioQualityHighOutputBitrate defines bitrate for high-quality audio output (kbps).
|
||||
// Used in: audio.go for high-fidelity audio scenarios
|
||||
// Impact: Excellent audio quality but higher bandwidth requirements.
|
||||
// Default 128kbps provides near-CD quality for music and crystal-clear voice.
|
||||
AudioQualityHighOutputBitrate int
|
||||
|
||||
// AudioQualityHighInputBitrate defines bitrate for high-quality audio input (kbps).
|
||||
// Used in: audio.go for high-quality microphone capture
|
||||
// Impact: Superior voice quality but increased upload bandwidth usage.
|
||||
// Default 64kbps suitable for professional voice communication.
|
||||
AudioQualityHighInputBitrate int
|
||||
|
||||
// AudioQualityUltraOutputBitrate defines bitrate for ultra-high-quality audio output (kbps).
|
||||
// Used in: audio.go for maximum quality scenarios
|
||||
// Impact: Maximum audio fidelity but highest bandwidth consumption.
|
||||
// Default 192kbps provides studio-quality audio for critical applications.
|
||||
AudioQualityUltraOutputBitrate int
|
||||
|
||||
// AudioQualityUltraInputBitrate defines bitrate for ultra-high-quality audio input (kbps).
|
||||
// Used in: audio.go for maximum quality microphone capture
|
||||
// Impact: Best possible voice quality but maximum upload bandwidth usage.
|
||||
// Default 96kbps suitable for broadcast-quality voice communication.
|
||||
AudioQualityUltraInputBitrate int
|
||||
|
||||
// Audio Quality Sample Rates - Frequency sampling rates for different quality levels
|
||||
// Used in: audio.go for configuring audio capture and playback sample rates
|
||||
// Impact: Higher sample rates capture more frequency detail but increase processing load
|
||||
|
||||
// AudioQualityLowSampleRate defines sample rate for low-quality audio (Hz).
|
||||
// Used in: audio.go for bandwidth-constrained scenarios
|
||||
// Impact: Reduces frequency response but minimizes processing and bandwidth.
|
||||
// Default 22050Hz captures frequencies up to 11kHz, adequate for voice.
|
||||
AudioQualityLowSampleRate int
|
||||
|
||||
// AudioQualityMediumSampleRate defines sample rate for medium-quality audio (Hz).
|
||||
// Used in: audio.go for balanced quality scenarios
|
||||
// Impact: Good frequency response with moderate processing requirements.
|
||||
// Default 44100Hz (CD quality) captures frequencies up to 22kHz.
|
||||
AudioQualityMediumSampleRate int
|
||||
|
||||
// AudioQualityMicLowSampleRate defines sample rate for low-quality microphone input (Hz).
|
||||
// Used in: audio.go for microphone capture in constrained scenarios
|
||||
// Impact: Optimized for voice communication with minimal processing overhead.
|
||||
// Default 16000Hz captures voice frequencies (300-3400Hz) efficiently.
|
||||
AudioQualityMicLowSampleRate int
|
||||
|
||||
// Audio Quality Frame Sizes - Duration of audio frames for different quality levels
|
||||
// Used in: audio.go for configuring Opus frame duration
|
||||
// Impact: Larger frames reduce overhead but increase latency and memory usage
|
||||
|
||||
// AudioQualityLowFrameSize defines frame duration for low-quality audio.
|
||||
// Used in: audio.go for low-latency scenarios with minimal processing
|
||||
// Impact: Longer frames reduce CPU overhead but increase audio latency.
|
||||
// Default 40ms provides good efficiency for voice communication.
|
||||
AudioQualityLowFrameSize time.Duration
|
||||
|
||||
// AudioQualityMediumFrameSize defines frame duration for medium-quality audio.
|
||||
// Used in: audio.go for balanced latency and efficiency
|
||||
// Impact: Moderate frame size balances latency and processing efficiency.
|
||||
// Default 20ms provides good balance for most applications.
|
||||
AudioQualityMediumFrameSize time.Duration
|
||||
|
||||
// AudioQualityHighFrameSize defines frame duration for high-quality audio.
|
||||
// Used in: audio.go for high-quality scenarios
|
||||
// Impact: Optimized frame size for high-quality encoding efficiency.
|
||||
// Default 20ms maintains low latency while supporting high bitrates.
|
||||
AudioQualityHighFrameSize time.Duration
|
||||
|
||||
// AudioQualityUltraFrameSize defines frame duration for ultra-quality audio.
|
||||
// Used in: audio.go for maximum quality scenarios
|
||||
// Impact: Smaller frames reduce latency but increase processing overhead.
|
||||
// Default 10ms provides minimal latency for real-time applications.
|
||||
AudioQualityUltraFrameSize time.Duration
|
||||
|
||||
// Audio Quality Channels - Channel configuration for different quality levels
|
||||
// Used in: audio.go for configuring mono/stereo audio
|
||||
// Impact: Stereo doubles bandwidth and processing but provides spatial audio
|
||||
|
||||
// AudioQualityLowChannels defines channel count for low-quality audio.
|
||||
// Used in: audio.go for bandwidth-constrained scenarios
|
||||
// Impact: Mono (1) minimizes bandwidth and processing for voice communication.
|
||||
// Default 1 (mono) suitable for voice-only applications.
|
||||
AudioQualityLowChannels int
|
||||
|
||||
// AudioQualityMediumChannels defines channel count for medium-quality audio.
|
||||
// Used in: audio.go for balanced quality scenarios
|
||||
// Impact: Stereo (2) provides spatial audio with moderate bandwidth increase.
|
||||
// Default 2 (stereo) suitable for general audio applications.
|
||||
AudioQualityMediumChannels int
|
||||
|
||||
// AudioQualityHighChannels defines channel count for high-quality audio.
|
||||
// Used in: audio.go for high-fidelity scenarios
|
||||
// Impact: Stereo (2) essential for high-quality music and spatial audio.
|
||||
// Default 2 (stereo) required for full audio experience.
|
||||
AudioQualityHighChannels int
|
||||
|
||||
// AudioQualityUltraChannels defines channel count for ultra-quality audio.
|
||||
// Used in: audio.go for maximum quality scenarios
|
||||
// Impact: Stereo (2) mandatory for studio-quality audio reproduction.
|
||||
// Default 2 (stereo) provides full spatial audio fidelity.
|
||||
AudioQualityUltraChannels int
|
||||
|
||||
// CGO Audio Constants - Low-level C library configuration for audio processing
|
||||
// These constants are passed to C code in cgo_audio.go for native audio operations
|
||||
// Impact: Direct control over native audio library behavior and performance
|
||||
|
||||
// CGOOpusBitrate sets the bitrate for native Opus encoder (bits per second).
|
||||
// Used in: cgo_audio.go update_audio_constants() function
|
||||
// Impact: Controls quality vs bandwidth tradeoff in native encoding.
|
||||
// Default 96000 (96kbps) provides good quality for real-time applications.
|
||||
CGOOpusBitrate int
|
||||
// CGO Audio Constants
|
||||
CGOOpusBitrate int // Native Opus encoder bitrate in bps (default: 96000)
|
||||
|
||||
// CGOOpusComplexity sets computational complexity for native Opus encoder (0-10).
|
||||
// Used in: cgo_audio.go for native encoder configuration
|
||||
|
|
|
@ -130,7 +130,7 @@ func (dhm *DeviceHealthMonitor) Start() error {
|
|||
return fmt.Errorf("device health monitor already running")
|
||||
}
|
||||
|
||||
dhm.logger.Info().Msg("starting device health monitor")
|
||||
dhm.logger.Debug().Msg("device health monitor starting")
|
||||
atomic.StoreInt32(&dhm.monitoringEnabled, 1)
|
||||
|
||||
go dhm.monitoringLoop()
|
||||
|
@ -143,7 +143,7 @@ func (dhm *DeviceHealthMonitor) Stop() {
|
|||
return
|
||||
}
|
||||
|
||||
dhm.logger.Info().Msg("stopping device health monitor")
|
||||
dhm.logger.Debug().Msg("device health monitor stopping")
|
||||
atomic.StoreInt32(&dhm.monitoringEnabled, 0)
|
||||
|
||||
close(dhm.stopChan)
|
||||
|
@ -152,7 +152,7 @@ func (dhm *DeviceHealthMonitor) Stop() {
|
|||
// Wait for monitoring loop to finish
|
||||
select {
|
||||
case <-dhm.doneChan:
|
||||
dhm.logger.Info().Msg("device health monitor stopped")
|
||||
dhm.logger.Debug().Msg("device health monitor stopped")
|
||||
case <-time.After(time.Duration(dhm.config.SupervisorTimeout)):
|
||||
dhm.logger.Warn().Msg("device health monitor stop timeout")
|
||||
}
|
||||
|
@ -163,7 +163,7 @@ func (dhm *DeviceHealthMonitor) RegisterRecoveryCallback(component string, callb
|
|||
dhm.callbackMutex.Lock()
|
||||
defer dhm.callbackMutex.Unlock()
|
||||
dhm.recoveryCallbacks[component] = callback
|
||||
dhm.logger.Info().Str("component", component).Msg("registered recovery callback")
|
||||
dhm.logger.Debug().Str("component", component).Msg("registered recovery callback")
|
||||
}
|
||||
|
||||
// RecordError records an error for health tracking
|
||||
|
|
|
@ -144,7 +144,7 @@ func (aeb *AudioEventBroadcaster) Subscribe(connectionID string, conn *websocket
|
|||
logger: logger,
|
||||
}
|
||||
|
||||
aeb.logger.Info().Str("connectionID", connectionID).Msg("audio events subscription added")
|
||||
aeb.logger.Debug().Str("connectionID", connectionID).Msg("audio events subscription added")
|
||||
|
||||
// Send initial state to new subscriber
|
||||
go aeb.sendInitialState(connectionID)
|
||||
|
@ -156,7 +156,7 @@ func (aeb *AudioEventBroadcaster) Unsubscribe(connectionID string) {
|
|||
defer aeb.mutex.Unlock()
|
||||
|
||||
delete(aeb.subscribers, connectionID)
|
||||
aeb.logger.Info().Str("connectionID", connectionID).Msg("audio events subscription removed")
|
||||
aeb.logger.Debug().Str("connectionID", connectionID).Msg("audio events subscription removed")
|
||||
}
|
||||
|
||||
// BroadcastAudioMuteChanged broadcasts audio mute state changes
|
||||
|
|
|
@ -301,8 +301,8 @@ func (ais *AudioInputServer) acceptConnections() {
|
|||
if err != nil {
|
||||
if ais.running {
|
||||
// Log error and continue accepting
|
||||
logger := logging.GetDefaultLogger().With().Str("component", AudioInputServerComponent).Logger()
|
||||
logger.Warn().Err(err).Msg("Failed to accept connection, retrying")
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "audio-input").Logger()
|
||||
logger.Warn().Err(err).Msg("failed to accept connection, retrying")
|
||||
continue
|
||||
}
|
||||
return
|
||||
|
@ -311,8 +311,8 @@ func (ais *AudioInputServer) acceptConnections() {
|
|||
// Configure socket buffers for optimal performance
|
||||
if err := ConfigureSocketBuffers(conn, ais.socketBufferConfig); err != nil {
|
||||
// Log warning but don't fail - socket buffer optimization is not critical
|
||||
logger := logging.GetDefaultLogger().With().Str("component", AudioInputServerComponent).Logger()
|
||||
logger.Warn().Err(err).Msg("Failed to configure socket buffers, continuing with defaults")
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "audio-input").Logger()
|
||||
logger.Warn().Err(err).Msg("failed to configure socket buffers, using defaults")
|
||||
} else {
|
||||
// Record socket buffer metrics for monitoring
|
||||
RecordSocketBufferMetrics(conn, "audio-input")
|
||||
|
|
|
@ -31,7 +31,7 @@ func (aim *AudioInputIPCManager) Start() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
aim.logger.Info().Str("component", AudioInputIPCComponent).Msg("starting component")
|
||||
aim.logger.Debug().Str("component", AudioInputIPCComponent).Msg("starting component")
|
||||
|
||||
err := aim.supervisor.Start()
|
||||
if err != nil {
|
||||
|
@ -51,7 +51,7 @@ func (aim *AudioInputIPCManager) Start() error {
|
|||
|
||||
// Validate configuration before using it
|
||||
if err := ValidateInputIPCConfig(config.SampleRate, config.Channels, config.FrameSize); err != nil {
|
||||
aim.logger.Error().Err(err).Msg("Invalid input IPC config from constants, using defaults")
|
||||
aim.logger.Warn().Err(err).Msg("invalid input IPC config from constants, using defaults")
|
||||
// Use safe defaults if config validation fails
|
||||
config = InputIPCConfig{
|
||||
SampleRate: 48000,
|
||||
|
@ -69,7 +69,7 @@ func (aim *AudioInputIPCManager) Start() error {
|
|||
aim.logger.Warn().Err(err).Str("component", AudioInputIPCComponent).Msg("failed to send initial config, will retry later")
|
||||
}
|
||||
|
||||
aim.logger.Info().Str("component", AudioInputIPCComponent).Msg("component started successfully")
|
||||
aim.logger.Debug().Str("component", AudioInputIPCComponent).Msg("component started successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -79,9 +79,9 @@ func (aim *AudioInputIPCManager) Stop() {
|
|||
return
|
||||
}
|
||||
|
||||
aim.logger.Info().Str("component", AudioInputIPCComponent).Msg("stopping component")
|
||||
aim.logger.Debug().Str("component", AudioInputIPCComponent).Msg("stopping component")
|
||||
aim.supervisor.Stop()
|
||||
aim.logger.Info().Str("component", AudioInputIPCComponent).Msg("component stopped")
|
||||
aim.logger.Debug().Str("component", AudioInputIPCComponent).Msg("component stopped")
|
||||
}
|
||||
|
||||
// resetMetrics resets all metrics to zero
|
||||
|
@ -105,7 +105,7 @@ func (aim *AudioInputIPCManager) WriteOpusFrame(frame []byte) error {
|
|||
// Validate frame data
|
||||
if err := ValidateFrameData(frame); err != nil {
|
||||
atomic.AddInt64(&aim.metrics.FramesDropped, 1)
|
||||
aim.logger.Debug().Err(err).Msg("Invalid frame data")
|
||||
aim.logger.Debug().Err(err).Msg("invalid frame data")
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -122,7 +122,7 @@ func (aim *AudioInputIPCManager) WriteOpusFrame(frame []byte) error {
|
|||
if err != nil {
|
||||
// Count as dropped frame
|
||||
atomic.AddInt64(&aim.metrics.FramesDropped, 1)
|
||||
aim.logger.Debug().Err(err).Msg("Failed to send frame via IPC")
|
||||
aim.logger.Debug().Err(err).Msg("failed to send frame via IPC")
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -146,7 +146,7 @@ func (aim *AudioInputIPCManager) WriteOpusFrameZeroCopy(frame *ZeroCopyAudioFram
|
|||
// Validate zero-copy frame
|
||||
if err := ValidateZeroCopyFrame(frame); err != nil {
|
||||
atomic.AddInt64(&aim.metrics.FramesDropped, 1)
|
||||
aim.logger.Debug().Err(err).Msg("Invalid zero-copy frame")
|
||||
aim.logger.Debug().Err(err).Msg("invalid zero-copy frame")
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -163,7 +163,7 @@ func (aim *AudioInputIPCManager) WriteOpusFrameZeroCopy(frame *ZeroCopyAudioFram
|
|||
if err != nil {
|
||||
// Count as dropped frame
|
||||
atomic.AddInt64(&aim.metrics.FramesDropped, 1)
|
||||
aim.logger.Debug().Err(err).Msg("Failed to send zero-copy frame via IPC")
|
||||
aim.logger.Debug().Err(err).Msg("failed to send zero-copy frame via IPC")
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ import (
|
|||
// This should be called from main() when the subprocess is detected
|
||||
func RunAudioInputServer() error {
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "audio-input-server").Logger()
|
||||
logger.Info().Msg("Starting audio input server subprocess")
|
||||
logger.Debug().Msg("audio input server subprocess starting")
|
||||
|
||||
// Start adaptive buffer management for optimal performance
|
||||
StartAdaptiveBuffering()
|
||||
|
@ -23,7 +23,7 @@ func RunAudioInputServer() error {
|
|||
// Initialize CGO audio system
|
||||
err := CGOAudioPlaybackInit()
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("Failed to initialize CGO audio playback")
|
||||
logger.Error().Err(err).Msg("failed to initialize CGO audio playback")
|
||||
return err
|
||||
}
|
||||
defer CGOAudioPlaybackClose()
|
||||
|
@ -31,18 +31,18 @@ func RunAudioInputServer() error {
|
|||
// Create and start the IPC server
|
||||
server, err := NewAudioInputServer()
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("Failed to create audio input server")
|
||||
logger.Error().Err(err).Msg("failed to create audio input server")
|
||||
return err
|
||||
}
|
||||
defer server.Close()
|
||||
|
||||
err = server.Start()
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("Failed to start audio input server")
|
||||
logger.Error().Err(err).Msg("failed to start audio input server")
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info().Msg("Audio input server started, waiting for connections")
|
||||
logger.Debug().Msg("audio input server started, waiting for connections")
|
||||
|
||||
// Set up signal handling for graceful shutdown
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
@ -54,18 +54,18 @@ func RunAudioInputServer() error {
|
|||
// Wait for shutdown signal
|
||||
select {
|
||||
case sig := <-sigChan:
|
||||
logger.Info().Str("signal", sig.String()).Msg("Received shutdown signal")
|
||||
logger.Info().Str("signal", sig.String()).Msg("received shutdown signal")
|
||||
case <-ctx.Done():
|
||||
logger.Info().Msg("Context cancelled")
|
||||
logger.Debug().Msg("context cancelled")
|
||||
}
|
||||
|
||||
// Graceful shutdown
|
||||
logger.Info().Msg("Shutting down audio input server")
|
||||
logger.Debug().Msg("shutting down audio input server")
|
||||
server.Stop()
|
||||
|
||||
// Give some time for cleanup
|
||||
time.Sleep(GetConfig().DefaultSleepDuration)
|
||||
|
||||
logger.Info().Msg("Audio input server subprocess stopped")
|
||||
logger.Debug().Msg("audio input server subprocess stopped")
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -117,14 +117,14 @@ func NewLatencyMonitor(config LatencyConfig, logger zerolog.Logger) *LatencyMoni
|
|||
func (lm *LatencyMonitor) Start() {
|
||||
lm.wg.Add(1)
|
||||
go lm.monitoringLoop()
|
||||
lm.logger.Info().Msg("Latency monitor started")
|
||||
lm.logger.Debug().Msg("latency monitor started")
|
||||
}
|
||||
|
||||
// Stop stops the latency monitor
|
||||
func (lm *LatencyMonitor) Stop() {
|
||||
lm.cancel()
|
||||
lm.wg.Wait()
|
||||
lm.logger.Info().Msg("Latency monitor stopped")
|
||||
lm.logger.Debug().Msg("latency monitor stopped")
|
||||
}
|
||||
|
||||
// RecordLatency records a new latency measurement
|
||||
|
@ -263,20 +263,20 @@ func (lm *LatencyMonitor) runOptimization() {
|
|||
// Check if current latency exceeds threshold
|
||||
if metrics.Current > lm.config.MaxLatency {
|
||||
needsOptimization = true
|
||||
lm.logger.Warn().Dur("current_latency", metrics.Current).Dur("max_latency", lm.config.MaxLatency).Msg("Latency exceeds maximum threshold")
|
||||
lm.logger.Warn().Dur("current_latency", metrics.Current).Dur("max_latency", lm.config.MaxLatency).Msg("latency exceeds maximum threshold")
|
||||
}
|
||||
|
||||
// Check if average latency is above adaptive threshold
|
||||
adaptiveThreshold := time.Duration(float64(lm.config.TargetLatency.Nanoseconds()) * (1.0 + lm.config.AdaptiveThreshold))
|
||||
if metrics.Average > adaptiveThreshold {
|
||||
needsOptimization = true
|
||||
lm.logger.Info().Dur("average_latency", metrics.Average).Dur("threshold", adaptiveThreshold).Msg("Average latency above adaptive threshold")
|
||||
lm.logger.Debug().Dur("average_latency", metrics.Average).Dur("threshold", adaptiveThreshold).Msg("average latency above adaptive threshold")
|
||||
}
|
||||
|
||||
// Check if jitter is too high
|
||||
if metrics.Jitter > lm.config.JitterThreshold {
|
||||
needsOptimization = true
|
||||
lm.logger.Info().Dur("jitter", metrics.Jitter).Dur("threshold", lm.config.JitterThreshold).Msg("Jitter above threshold")
|
||||
lm.logger.Debug().Dur("jitter", metrics.Jitter).Dur("threshold", lm.config.JitterThreshold).Msg("jitter above threshold")
|
||||
}
|
||||
|
||||
if needsOptimization {
|
||||
|
@ -290,11 +290,11 @@ func (lm *LatencyMonitor) runOptimization() {
|
|||
|
||||
for _, callback := range callbacks {
|
||||
if err := callback(metrics); err != nil {
|
||||
lm.logger.Error().Err(err).Msg("Optimization callback failed")
|
||||
lm.logger.Error().Err(err).Msg("optimization callback failed")
|
||||
}
|
||||
}
|
||||
|
||||
lm.logger.Info().Interface("metrics", metrics).Msg("Latency optimization triggered")
|
||||
lm.logger.Debug().Interface("metrics", metrics).Msg("latency optimization triggered")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -156,7 +156,10 @@ func HandleMemoryMetrics(w http.ResponseWriter, r *http.Request) {
|
|||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Header().Set("Cache-Control", "no-cache")
|
||||
|
||||
if err := json.NewEncoder(w).Encode(metrics); err != nil {
|
||||
encoder := json.NewEncoder(w)
|
||||
encoder.SetIndent("", " ")
|
||||
|
||||
if err := encoder.Encode(metrics); err != nil {
|
||||
logger.Error().Err(err).Msg("failed to encode memory metrics")
|
||||
http.Error(w, "Internal server error", http.StatusInternalServerError)
|
||||
return
|
||||
|
@ -185,7 +188,7 @@ func LogMemoryMetrics() {
|
|||
// StartMemoryMetricsLogging starts periodic memory metrics logging
|
||||
func StartMemoryMetricsLogging(interval time.Duration) {
|
||||
logger := getMemoryMetricsLogger()
|
||||
logger.Info().Dur("interval", interval).Msg("starting memory metrics logging")
|
||||
logger.Debug().Dur("interval", interval).Msg("memory metrics logging started")
|
||||
|
||||
go func() {
|
||||
ticker := time.NewTicker(interval)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
@ -286,6 +287,141 @@ var (
|
|||
},
|
||||
)
|
||||
|
||||
// Device health metrics
|
||||
deviceHealthStatus = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_device_health_status",
|
||||
Help: "Current device health status (0=Healthy, 1=Degraded, 2=Failing, 3=Critical)",
|
||||
},
|
||||
[]string{"device_type"}, // device_type: capture, playback
|
||||
)
|
||||
|
||||
deviceHealthScore = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_device_health_score",
|
||||
Help: "Device health score (0.0-1.0, higher is better)",
|
||||
},
|
||||
[]string{"device_type"}, // device_type: capture, playback
|
||||
)
|
||||
|
||||
deviceConsecutiveErrors = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_device_consecutive_errors",
|
||||
Help: "Number of consecutive errors for device",
|
||||
},
|
||||
[]string{"device_type"}, // device_type: capture, playback
|
||||
)
|
||||
|
||||
deviceTotalErrors = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "jetkvm_audio_device_total_errors",
|
||||
Help: "Total number of errors for device",
|
||||
},
|
||||
[]string{"device_type"}, // device_type: capture, playback
|
||||
)
|
||||
|
||||
deviceLatencySpikes = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "jetkvm_audio_device_latency_spikes_total",
|
||||
Help: "Total number of latency spikes for device",
|
||||
},
|
||||
[]string{"device_type"}, // device_type: capture, playback
|
||||
)
|
||||
|
||||
// Memory metrics
|
||||
memoryHeapAllocBytes = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_memory_heap_alloc_bytes",
|
||||
Help: "Current heap allocation in bytes",
|
||||
},
|
||||
)
|
||||
|
||||
memoryHeapSysBytes = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_memory_heap_sys_bytes",
|
||||
Help: "Total heap system memory in bytes",
|
||||
},
|
||||
)
|
||||
|
||||
memoryHeapObjects = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_memory_heap_objects",
|
||||
Help: "Number of heap objects",
|
||||
},
|
||||
)
|
||||
|
||||
memoryGCCount = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "jetkvm_audio_memory_gc_total",
|
||||
Help: "Total number of garbage collections",
|
||||
},
|
||||
)
|
||||
|
||||
memoryGCCPUFraction = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_memory_gc_cpu_fraction",
|
||||
Help: "Fraction of CPU time spent in garbage collection",
|
||||
},
|
||||
)
|
||||
|
||||
// Buffer pool efficiency metrics
|
||||
bufferPoolHitRate = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_buffer_pool_hit_rate_percent",
|
||||
Help: "Buffer pool hit rate percentage",
|
||||
},
|
||||
[]string{"pool_name"}, // pool_name: frame_pool, control_pool, zero_copy_pool
|
||||
)
|
||||
|
||||
bufferPoolMissRate = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_buffer_pool_miss_rate_percent",
|
||||
Help: "Buffer pool miss rate percentage",
|
||||
},
|
||||
[]string{"pool_name"}, // pool_name: frame_pool, control_pool, zero_copy_pool
|
||||
)
|
||||
|
||||
bufferPoolUtilization = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_buffer_pool_utilization_percent",
|
||||
Help: "Buffer pool utilization percentage",
|
||||
},
|
||||
[]string{"pool_name"}, // pool_name: frame_pool, control_pool, zero_copy_pool
|
||||
)
|
||||
|
||||
bufferPoolThroughput = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_buffer_pool_throughput_ops_per_sec",
|
||||
Help: "Buffer pool throughput in operations per second",
|
||||
},
|
||||
[]string{"pool_name"}, // pool_name: frame_pool, control_pool, zero_copy_pool
|
||||
)
|
||||
|
||||
bufferPoolGetLatency = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_buffer_pool_get_latency_seconds",
|
||||
Help: "Average buffer pool get operation latency in seconds",
|
||||
},
|
||||
[]string{"pool_name"}, // pool_name: frame_pool, control_pool, zero_copy_pool
|
||||
)
|
||||
|
||||
bufferPoolPutLatency = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_buffer_pool_put_latency_seconds",
|
||||
Help: "Average buffer pool put operation latency in seconds",
|
||||
},
|
||||
[]string{"pool_name"}, // pool_name: frame_pool, control_pool, zero_copy_pool
|
||||
)
|
||||
|
||||
// Latency percentile metrics
|
||||
latencyPercentile = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_latency_percentile_seconds",
|
||||
Help: "Audio latency percentiles in seconds",
|
||||
},
|
||||
[]string{"source", "percentile"}, // source: input, output, processing; percentile: p50, p95, p99, min, max, avg
|
||||
)
|
||||
|
||||
// Metrics update tracking
|
||||
metricsUpdateMutex sync.RWMutex
|
||||
lastMetricsUpdate int64
|
||||
|
@ -299,6 +435,15 @@ var (
|
|||
micFramesDroppedValue int64
|
||||
micBytesProcessedValue int64
|
||||
micConnectionDropsValue int64
|
||||
|
||||
// Atomic counters for device health metrics
|
||||
deviceCaptureErrorsValue int64
|
||||
devicePlaybackErrorsValue int64
|
||||
deviceCaptureSpikesValue int64
|
||||
devicePlaybackSpikesValue int64
|
||||
|
||||
// Atomic counter for memory GC
|
||||
memoryGCCountValue uint32
|
||||
)
|
||||
|
||||
// UnifiedAudioMetrics provides a common structure for both input and output audio streams
|
||||
|
@ -479,6 +624,95 @@ func UpdateAdaptiveBufferMetrics(inputBufferSize, outputBufferSize int, cpuPerce
|
|||
atomic.StoreInt64(&lastMetricsUpdate, time.Now().Unix())
|
||||
}
|
||||
|
||||
// UpdateSocketBufferMetrics updates socket buffer metrics
|
||||
func UpdateSocketBufferMetrics(component, bufferType string, size, utilization float64, overflowOccurred bool) {
|
||||
metricsUpdateMutex.Lock()
|
||||
defer metricsUpdateMutex.Unlock()
|
||||
|
||||
socketBufferSizeGauge.WithLabelValues(component, bufferType).Set(size)
|
||||
socketBufferUtilizationGauge.WithLabelValues(component, bufferType).Set(utilization)
|
||||
|
||||
if overflowOccurred {
|
||||
socketBufferOverflowCounter.WithLabelValues(component, bufferType).Inc()
|
||||
}
|
||||
|
||||
atomic.StoreInt64(&lastMetricsUpdate, time.Now().Unix())
|
||||
}
|
||||
|
||||
// UpdateDeviceHealthMetrics updates device health metrics
|
||||
func UpdateDeviceHealthMetrics(deviceType string, status int, healthScore float64, consecutiveErrors, totalErrors, latencySpikes int64) {
|
||||
metricsUpdateMutex.Lock()
|
||||
defer metricsUpdateMutex.Unlock()
|
||||
|
||||
deviceHealthStatus.WithLabelValues(deviceType).Set(float64(status))
|
||||
deviceHealthScore.WithLabelValues(deviceType).Set(healthScore)
|
||||
deviceConsecutiveErrors.WithLabelValues(deviceType).Set(float64(consecutiveErrors))
|
||||
|
||||
// Update error counters with delta calculation
|
||||
var prevErrors, prevSpikes int64
|
||||
if deviceType == "capture" {
|
||||
prevErrors = atomic.SwapInt64(&deviceCaptureErrorsValue, totalErrors)
|
||||
prevSpikes = atomic.SwapInt64(&deviceCaptureSpikesValue, latencySpikes)
|
||||
} else {
|
||||
prevErrors = atomic.SwapInt64(&devicePlaybackErrorsValue, totalErrors)
|
||||
prevSpikes = atomic.SwapInt64(&devicePlaybackSpikesValue, latencySpikes)
|
||||
}
|
||||
|
||||
if prevErrors > 0 && totalErrors > prevErrors {
|
||||
deviceTotalErrors.WithLabelValues(deviceType).Add(float64(totalErrors - prevErrors))
|
||||
}
|
||||
if prevSpikes > 0 && latencySpikes > prevSpikes {
|
||||
deviceLatencySpikes.WithLabelValues(deviceType).Add(float64(latencySpikes - prevSpikes))
|
||||
}
|
||||
|
||||
atomic.StoreInt64(&lastMetricsUpdate, time.Now().Unix())
|
||||
}
|
||||
|
||||
// UpdateMemoryMetrics updates memory metrics
|
||||
func UpdateMemoryMetrics() {
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
|
||||
memoryHeapAllocBytes.Set(float64(m.HeapAlloc))
|
||||
memoryHeapSysBytes.Set(float64(m.HeapSys))
|
||||
memoryHeapObjects.Set(float64(m.HeapObjects))
|
||||
memoryGCCPUFraction.Set(m.GCCPUFraction)
|
||||
|
||||
// Update GC count with delta calculation
|
||||
currentGCCount := uint32(m.NumGC)
|
||||
prevGCCount := atomic.SwapUint32(&memoryGCCountValue, currentGCCount)
|
||||
if prevGCCount > 0 && currentGCCount > prevGCCount {
|
||||
memoryGCCount.Add(float64(currentGCCount - prevGCCount))
|
||||
}
|
||||
|
||||
atomic.StoreInt64(&lastMetricsUpdate, time.Now().Unix())
|
||||
}
|
||||
|
||||
// UpdateBufferPoolMetrics updates buffer pool efficiency metrics
|
||||
func UpdateBufferPoolMetrics(poolName string, hitRate, missRate, utilization, throughput, getLatency, putLatency float64) {
|
||||
metricsUpdateMutex.Lock()
|
||||
defer metricsUpdateMutex.Unlock()
|
||||
|
||||
bufferPoolHitRate.WithLabelValues(poolName).Set(hitRate * 100)
|
||||
bufferPoolMissRate.WithLabelValues(poolName).Set(missRate * 100)
|
||||
bufferPoolUtilization.WithLabelValues(poolName).Set(utilization * 100)
|
||||
bufferPoolThroughput.WithLabelValues(poolName).Set(throughput)
|
||||
bufferPoolGetLatency.WithLabelValues(poolName).Set(getLatency)
|
||||
bufferPoolPutLatency.WithLabelValues(poolName).Set(putLatency)
|
||||
|
||||
atomic.StoreInt64(&lastMetricsUpdate, time.Now().Unix())
|
||||
}
|
||||
|
||||
// UpdateLatencyMetrics updates latency percentile metrics
|
||||
func UpdateLatencyMetrics(source, percentile string, latencySeconds float64) {
|
||||
metricsUpdateMutex.Lock()
|
||||
defer metricsUpdateMutex.Unlock()
|
||||
|
||||
latencyPercentile.WithLabelValues(source, percentile).Set(latencySeconds)
|
||||
|
||||
atomic.StoreInt64(&lastMetricsUpdate, time.Now().Unix())
|
||||
}
|
||||
|
||||
// GetLastMetricsUpdate returns the timestamp of the last metrics update
|
||||
func GetLastMetricsUpdate() time.Time {
|
||||
timestamp := atomic.LoadInt64(&lastMetricsUpdate)
|
||||
|
@ -488,7 +722,7 @@ func GetLastMetricsUpdate() time.Time {
|
|||
// StartMetricsUpdater starts a goroutine that periodically updates Prometheus metrics
|
||||
func StartMetricsUpdater() {
|
||||
go func() {
|
||||
ticker := time.NewTicker(GetConfig().StatsUpdateInterval) // Update every 5 seconds
|
||||
ticker := time.NewTicker(5 * time.Second) // Update every 5 seconds
|
||||
defer ticker.Stop()
|
||||
|
||||
for range ticker.C {
|
||||
|
@ -500,18 +734,14 @@ func StartMetricsUpdater() {
|
|||
micMetrics := GetAudioInputMetrics()
|
||||
UpdateMicrophoneMetrics(convertAudioInputMetricsToUnified(micMetrics))
|
||||
|
||||
// Update microphone subprocess process metrics
|
||||
if inputSupervisor := GetAudioInputIPCSupervisor(); inputSupervisor != nil {
|
||||
if processMetrics := inputSupervisor.GetProcessMetrics(); processMetrics != nil {
|
||||
UpdateMicrophoneProcessMetrics(*processMetrics, inputSupervisor.IsRunning())
|
||||
}
|
||||
}
|
||||
|
||||
// Update audio configuration metrics
|
||||
audioConfig := GetAudioConfig()
|
||||
UpdateAudioConfigMetrics(audioConfig)
|
||||
micConfig := GetMicrophoneConfig()
|
||||
UpdateMicrophoneConfigMetrics(micConfig)
|
||||
|
||||
// Update memory metrics
|
||||
UpdateMemoryMetrics()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@ import (
|
|||
// This should be called from main() when the subprocess is detected
|
||||
func RunAudioOutputServer() error {
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "audio-output-server").Logger()
|
||||
logger.Info().Msg("Starting audio output server subprocess")
|
||||
logger.Debug().Msg("audio output server subprocess starting")
|
||||
|
||||
// Create audio server
|
||||
server, err := NewAudioOutputServer()
|
||||
|
@ -42,7 +42,7 @@ func RunAudioOutputServer() error {
|
|||
return err
|
||||
}
|
||||
|
||||
logger.Info().Msg("Audio output server started, waiting for connections")
|
||||
logger.Debug().Msg("audio output server started, waiting for connections")
|
||||
|
||||
// Set up signal handling for graceful shutdown
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
@ -54,18 +54,18 @@ func RunAudioOutputServer() error {
|
|||
// Wait for shutdown signal
|
||||
select {
|
||||
case sig := <-sigChan:
|
||||
logger.Info().Str("signal", sig.String()).Msg("Received shutdown signal")
|
||||
logger.Info().Str("signal", sig.String()).Msg("received shutdown signal")
|
||||
case <-ctx.Done():
|
||||
logger.Info().Msg("Context cancelled")
|
||||
logger.Debug().Msg("context cancelled")
|
||||
}
|
||||
|
||||
// Graceful shutdown
|
||||
logger.Info().Msg("Shutting down audio output server")
|
||||
logger.Debug().Msg("shutting down audio output server")
|
||||
StopNonBlockingAudioStreaming()
|
||||
|
||||
// Give some time for cleanup
|
||||
time.Sleep(GetConfig().DefaultSleepDuration)
|
||||
|
||||
logger.Info().Msg("Audio output server subprocess stopped")
|
||||
logger.Debug().Msg("audio output server subprocess stopped")
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -69,13 +69,13 @@ func (ps *PriorityScheduler) SetThreadPriority(priority int, policy int) error {
|
|||
// If we can't set real-time priority, try nice value instead
|
||||
schedNormal, _, _ := getSchedulingPolicies()
|
||||
if policy != schedNormal {
|
||||
ps.logger.Warn().Int("errno", int(errno)).Msg("Failed to set real-time priority, falling back to nice")
|
||||
ps.logger.Warn().Int("errno", int(errno)).Msg("failed to set real-time priority, falling back to nice")
|
||||
return ps.setNicePriority(priority)
|
||||
}
|
||||
return errno
|
||||
}
|
||||
|
||||
ps.logger.Debug().Int("tid", tid).Int("priority", priority).Int("policy", policy).Msg("Thread priority set")
|
||||
ps.logger.Debug().Int("tid", tid).Int("priority", priority).Int("policy", policy).Msg("thread priority set")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -93,11 +93,11 @@ func (ps *PriorityScheduler) setNicePriority(rtPriority int) error {
|
|||
|
||||
err := syscall.Setpriority(syscall.PRIO_PROCESS, 0, niceValue)
|
||||
if err != nil {
|
||||
ps.logger.Warn().Err(err).Int("nice", niceValue).Msg("Failed to set nice priority")
|
||||
ps.logger.Warn().Err(err).Int("nice", niceValue).Msg("failed to set nice priority")
|
||||
return err
|
||||
}
|
||||
|
||||
ps.logger.Debug().Int("nice", niceValue).Msg("Nice priority set as fallback")
|
||||
ps.logger.Debug().Int("nice", niceValue).Msg("nice priority set as fallback")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -132,13 +132,13 @@ func (ps *PriorityScheduler) ResetPriority() error {
|
|||
// Disable disables priority scheduling (useful for testing or fallback)
|
||||
func (ps *PriorityScheduler) Disable() {
|
||||
ps.enabled = false
|
||||
ps.logger.Info().Msg("Priority scheduling disabled")
|
||||
ps.logger.Debug().Msg("priority scheduling disabled")
|
||||
}
|
||||
|
||||
// Enable enables priority scheduling
|
||||
func (ps *PriorityScheduler) Enable() {
|
||||
ps.enabled = true
|
||||
ps.logger.Info().Msg("Priority scheduling enabled")
|
||||
ps.logger.Debug().Msg("priority scheduling enabled")
|
||||
}
|
||||
|
||||
// Global priority scheduler instance
|
||||
|
|
|
@ -95,7 +95,7 @@ func (pm *ProcessMonitor) Start() {
|
|||
|
||||
pm.running = true
|
||||
go pm.monitorLoop()
|
||||
pm.logger.Info().Msg("Process monitor started")
|
||||
pm.logger.Debug().Msg("process monitor started")
|
||||
}
|
||||
|
||||
// Stop stops monitoring processes
|
||||
|
@ -109,7 +109,7 @@ func (pm *ProcessMonitor) Stop() {
|
|||
|
||||
pm.running = false
|
||||
close(pm.stopChan)
|
||||
pm.logger.Info().Msg("Process monitor stopped")
|
||||
pm.logger.Debug().Msg("process monitor stopped")
|
||||
}
|
||||
|
||||
// AddProcess adds a process to monitor
|
||||
|
|
|
@ -210,10 +210,10 @@ func testMemoryLeakPrevention(t *testing.T) {
|
|||
func testConfigValidationEdgeCases(t *testing.T) {
|
||||
// Test sample rate edge cases
|
||||
testCases := []struct {
|
||||
sampleRate int
|
||||
channels int
|
||||
frameSize int
|
||||
shouldPass bool
|
||||
sampleRate int
|
||||
channels int
|
||||
frameSize int
|
||||
shouldPass bool
|
||||
description string
|
||||
}{
|
||||
{0, 2, 960, false, "zero sample rate"},
|
||||
|
@ -359,4 +359,4 @@ func testResourceExhaustionRecovery(t *testing.T) {
|
|||
}
|
||||
manager.Stop()
|
||||
assert.False(t, manager.IsRunning())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -140,17 +140,17 @@ func (r *AudioRelay) relayLoop() {
|
|||
for {
|
||||
select {
|
||||
case <-r.ctx.Done():
|
||||
r.logger.Debug().Msg("Audio relay loop stopping")
|
||||
r.logger.Debug().Msg("audio relay loop stopping")
|
||||
return
|
||||
default:
|
||||
frame, err := r.client.ReceiveFrame()
|
||||
if err != nil {
|
||||
consecutiveErrors++
|
||||
r.logger.Error().Err(err).Int("consecutive_errors", consecutiveErrors).Msg("Error reading frame from audio output server")
|
||||
r.logger.Error().Err(err).Int("consecutive_errors", consecutiveErrors).Msg("error reading frame from audio output server")
|
||||
r.incrementDropped()
|
||||
|
||||
if consecutiveErrors >= maxConsecutiveErrors {
|
||||
r.logger.Error().Msgf("Too many consecutive read errors (%d/%d), stopping audio relay", consecutiveErrors, maxConsecutiveErrors)
|
||||
r.logger.Error().Int("consecutive_errors", consecutiveErrors).Int("max_errors", maxConsecutiveErrors).Msg("too many consecutive read errors, stopping audio relay")
|
||||
return
|
||||
}
|
||||
time.Sleep(GetConfig().ShortSleepDuration)
|
||||
|
@ -159,7 +159,7 @@ func (r *AudioRelay) relayLoop() {
|
|||
|
||||
consecutiveErrors = 0
|
||||
if err := r.forwardToWebRTC(frame); err != nil {
|
||||
r.logger.Warn().Err(err).Msg("Failed to forward frame to WebRTC")
|
||||
r.logger.Warn().Err(err).Msg("failed to forward frame to webrtc")
|
||||
r.incrementDropped()
|
||||
} else {
|
||||
r.incrementRelayed()
|
||||
|
@ -173,7 +173,7 @@ func (r *AudioRelay) forwardToWebRTC(frame []byte) error {
|
|||
// Validate frame data before processing
|
||||
if err := ValidateFrameData(frame); err != nil {
|
||||
r.incrementDropped()
|
||||
r.logger.Debug().Err(err).Msg("Invalid frame data in relay")
|
||||
r.logger.Debug().Err(err).Msg("invalid frame data in relay")
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -288,10 +288,10 @@ func (s *AudioOutputSupervisor) waitForProcessExit() {
|
|||
s.processMonitor.RemoveProcess(pid)
|
||||
|
||||
if crashed {
|
||||
s.logger.Error().Int("pid", pid).Int("exit_code", exitCode).Msg("audio server process crashed")
|
||||
s.logger.Error().Int("pid", pid).Int("exit_code", exitCode).Msg("audio output server process crashed")
|
||||
s.recordRestartAttempt()
|
||||
} else {
|
||||
s.logger.Info().Int("pid", pid).Msg("audio server process exited gracefully")
|
||||
s.logger.Info().Int("pid", pid).Msg("audio output server process exited gracefully")
|
||||
}
|
||||
|
||||
if s.onProcessExit != nil {
|
||||
|
@ -310,11 +310,11 @@ func (s *AudioOutputSupervisor) terminateProcess() {
|
|||
return
|
||||
}
|
||||
|
||||
s.logger.Info().Int("pid", pid).Msg("terminating audio server process")
|
||||
s.logger.Info().Int("pid", pid).Msg("terminating audio output server process")
|
||||
|
||||
// Send SIGTERM first
|
||||
if err := cmd.Process.Signal(syscall.SIGTERM); err != nil {
|
||||
s.logger.Warn().Err(err).Int("pid", pid).Msg("failed to send SIGTERM")
|
||||
s.logger.Warn().Err(err).Int("pid", pid).Msg("failed to send SIGTERM to audio output server process")
|
||||
}
|
||||
|
||||
// Wait for graceful shutdown
|
||||
|
|
Loading…
Reference in New Issue