mirror of https://github.com/jetkvm/kvm.git
Compare commits
No commits in common. "b63404c26b8da38b68975e8ec12a8f9f1abf5ec6" and "260f62efc3ffb6aa95fd106db50f0d834f295e5d" have entirely different histories.
b63404c26b
...
260f62efc3
24
cloud.go
24
cloud.go
|
|
@ -77,6 +77,23 @@ var (
|
|||
},
|
||||
[]string{"type", "source"},
|
||||
)
|
||||
metricConnectionPingDuration = promauto.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "jetkvm_connection_ping_duration_seconds",
|
||||
Help: "The duration of the ping response",
|
||||
Buckets: []float64{
|
||||
0.1, 0.5, 1, 10,
|
||||
},
|
||||
},
|
||||
[]string{"type", "source"},
|
||||
)
|
||||
metricConnectionTotalPingSentCount = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "jetkvm_connection_ping_sent_total",
|
||||
Help: "The total number of pings sent to the connection",
|
||||
},
|
||||
[]string{"type", "source"},
|
||||
)
|
||||
metricConnectionTotalPingReceivedCount = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "jetkvm_connection_ping_received_total",
|
||||
|
|
@ -84,6 +101,13 @@ var (
|
|||
},
|
||||
[]string{"type", "source"},
|
||||
)
|
||||
metricConnectionSessionRequestCount = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "jetkvm_connection_session_requests_total",
|
||||
Help: "The total number of session requests received",
|
||||
},
|
||||
[]string{"type", "source"},
|
||||
)
|
||||
metricConnectionSessionRequestDuration = promauto.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "jetkvm_connection_session_request_duration_seconds",
|
||||
|
|
|
|||
|
|
@ -0,0 +1,560 @@
|
|||
package kvm
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// Test validateFloat64Param function
|
||||
func TestValidateFloat64Param(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
params map[string]interface{}
|
||||
paramName string
|
||||
methodName string
|
||||
min float64
|
||||
max float64
|
||||
expected float64
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "valid parameter",
|
||||
params: map[string]interface{}{"test": 50.0},
|
||||
paramName: "test",
|
||||
methodName: "testMethod",
|
||||
min: 0,
|
||||
max: 100,
|
||||
expected: 50.0,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "parameter at minimum boundary",
|
||||
params: map[string]interface{}{"test": 0.0},
|
||||
paramName: "test",
|
||||
methodName: "testMethod",
|
||||
min: 0,
|
||||
max: 100,
|
||||
expected: 0.0,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "parameter at maximum boundary",
|
||||
params: map[string]interface{}{"test": 100.0},
|
||||
paramName: "test",
|
||||
methodName: "testMethod",
|
||||
min: 0,
|
||||
max: 100,
|
||||
expected: 100.0,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "parameter below minimum",
|
||||
params: map[string]interface{}{"test": -1.0},
|
||||
paramName: "test",
|
||||
methodName: "testMethod",
|
||||
min: 0,
|
||||
max: 100,
|
||||
expected: 0,
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "parameter above maximum",
|
||||
params: map[string]interface{}{"test": 101.0},
|
||||
paramName: "test",
|
||||
methodName: "testMethod",
|
||||
min: 0,
|
||||
max: 100,
|
||||
expected: 0,
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "wrong parameter type",
|
||||
params: map[string]interface{}{"test": "not a number"},
|
||||
paramName: "test",
|
||||
methodName: "testMethod",
|
||||
min: 0,
|
||||
max: 100,
|
||||
expected: 0,
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "missing parameter",
|
||||
params: map[string]interface{}{},
|
||||
paramName: "test",
|
||||
methodName: "testMethod",
|
||||
min: 0,
|
||||
max: 100,
|
||||
expected: 0,
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := validateFloat64Param(tt.params, tt.paramName, tt.methodName, tt.min, tt.max)
|
||||
if tt.expectError {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test validateKeysArray function
|
||||
func TestValidateKeysArray(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
params map[string]interface{}
|
||||
methodName string
|
||||
expected []uint8
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "valid keys array",
|
||||
params: map[string]interface{}{"keys": []interface{}{65.0, 66.0, 67.0}},
|
||||
methodName: "testMethod",
|
||||
expected: []uint8{65, 66, 67},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "empty keys array",
|
||||
params: map[string]interface{}{"keys": []interface{}{}},
|
||||
methodName: "testMethod",
|
||||
expected: []uint8{},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "maximum keys array",
|
||||
params: map[string]interface{}{"keys": []interface{}{1.0, 2.0, 3.0, 4.0, 5.0, 6.0}},
|
||||
methodName: "testMethod",
|
||||
expected: []uint8{1, 2, 3, 4, 5, 6},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "too many keys",
|
||||
params: map[string]interface{}{"keys": []interface{}{1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0}},
|
||||
methodName: "testMethod",
|
||||
expected: nil,
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "invalid key type",
|
||||
params: map[string]interface{}{"keys": []interface{}{"not a number"}},
|
||||
methodName: "testMethod",
|
||||
expected: nil,
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "key value out of range (negative)",
|
||||
params: map[string]interface{}{"keys": []interface{}{-1.0}},
|
||||
methodName: "testMethod",
|
||||
expected: nil,
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "key value out of range (too high)",
|
||||
params: map[string]interface{}{"keys": []interface{}{256.0}},
|
||||
methodName: "testMethod",
|
||||
expected: nil,
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "wrong parameter type",
|
||||
params: map[string]interface{}{"keys": "not an array"},
|
||||
methodName: "testMethod",
|
||||
expected: nil,
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "missing keys parameter",
|
||||
params: map[string]interface{}{},
|
||||
methodName: "testMethod",
|
||||
expected: nil,
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := validateKeysArray(tt.params, tt.methodName)
|
||||
if tt.expectError {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test handleKeyboardReportDirect function
|
||||
func TestHandleKeyboardReportDirect(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
params map[string]interface{}
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "valid keyboard report",
|
||||
params: map[string]interface{}{
|
||||
"modifier": 2.0, // Shift key
|
||||
"keys": []interface{}{65.0, 66.0}, // A, B keys
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "empty keys array",
|
||||
params: map[string]interface{}{
|
||||
"modifier": 0.0,
|
||||
"keys": []interface{}{},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "invalid modifier",
|
||||
params: map[string]interface{}{
|
||||
"modifier": 256.0, // Out of range
|
||||
"keys": []interface{}{65.0},
|
||||
},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "invalid keys",
|
||||
params: map[string]interface{}{
|
||||
"modifier": 0.0,
|
||||
"keys": []interface{}{1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0}, // Too many keys
|
||||
},
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := handleKeyboardReportDirect(tt.params)
|
||||
if tt.expectError {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test handleAbsMouseReportDirect function
|
||||
func TestHandleAbsMouseReportDirect(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
params map[string]interface{}
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "valid absolute mouse report",
|
||||
params: map[string]interface{}{
|
||||
"x": 1000.0,
|
||||
"y": 500.0,
|
||||
"buttons": 1.0, // Left button
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "boundary values",
|
||||
params: map[string]interface{}{
|
||||
"x": 0.0,
|
||||
"y": 32767.0,
|
||||
"buttons": 255.0,
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "invalid x coordinate",
|
||||
params: map[string]interface{}{
|
||||
"x": -1.0, // Out of range
|
||||
"y": 500.0,
|
||||
"buttons": 0.0,
|
||||
},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "invalid y coordinate",
|
||||
params: map[string]interface{}{
|
||||
"x": 1000.0,
|
||||
"y": 32768.0, // Out of range
|
||||
"buttons": 0.0,
|
||||
},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "invalid buttons",
|
||||
params: map[string]interface{}{
|
||||
"x": 1000.0,
|
||||
"y": 500.0,
|
||||
"buttons": 256.0, // Out of range
|
||||
},
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := handleAbsMouseReportDirect(tt.params)
|
||||
if tt.expectError {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test handleRelMouseReportDirect function
|
||||
func TestHandleRelMouseReportDirect(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
params map[string]interface{}
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "valid relative mouse report",
|
||||
params: map[string]interface{}{
|
||||
"dx": 10.0,
|
||||
"dy": -5.0,
|
||||
"buttons": 2.0, // Right button
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "boundary values",
|
||||
params: map[string]interface{}{
|
||||
"dx": -127.0,
|
||||
"dy": 127.0,
|
||||
"buttons": 0.0,
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "invalid dx",
|
||||
params: map[string]interface{}{
|
||||
"dx": -128.0, // Out of range
|
||||
"dy": 0.0,
|
||||
"buttons": 0.0,
|
||||
},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "invalid dy",
|
||||
params: map[string]interface{}{
|
||||
"dx": 0.0,
|
||||
"dy": 128.0, // Out of range
|
||||
"buttons": 0.0,
|
||||
},
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := handleRelMouseReportDirect(tt.params)
|
||||
if tt.expectError {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test handleWheelReportDirect function
|
||||
func TestHandleWheelReportDirect(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
params map[string]interface{}
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "valid wheel report",
|
||||
params: map[string]interface{}{
|
||||
"wheelY": 3.0,
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "boundary values",
|
||||
params: map[string]interface{}{
|
||||
"wheelY": -127.0,
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "invalid wheelY",
|
||||
params: map[string]interface{}{
|
||||
"wheelY": 128.0, // Out of range
|
||||
},
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := handleWheelReportDirect(tt.params)
|
||||
if tt.expectError {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test handleInputRPCDirect function
|
||||
func TestHandleInputRPCDirect(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
method string
|
||||
params map[string]interface{}
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "keyboard report",
|
||||
method: "keyboardReport",
|
||||
params: map[string]interface{}{
|
||||
"modifier": 0.0,
|
||||
"keys": []interface{}{65.0},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "absolute mouse report",
|
||||
method: "absMouseReport",
|
||||
params: map[string]interface{}{
|
||||
"x": 1000.0,
|
||||
"y": 500.0,
|
||||
"buttons": 1.0,
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "relative mouse report",
|
||||
method: "relMouseReport",
|
||||
params: map[string]interface{}{
|
||||
"dx": 10.0,
|
||||
"dy": -5.0,
|
||||
"buttons": 2.0,
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "wheel report",
|
||||
method: "wheelReport",
|
||||
params: map[string]interface{}{
|
||||
"wheelY": 3.0,
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "unknown method",
|
||||
method: "unknownMethod",
|
||||
params: map[string]interface{}{},
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := handleInputRPCDirect(tt.method, tt.params)
|
||||
if tt.expectError {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test isInputMethod function
|
||||
func TestIsInputMethod(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
method string
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "keyboard report method",
|
||||
method: "keyboardReport",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "absolute mouse report method",
|
||||
method: "absMouseReport",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "relative mouse report method",
|
||||
method: "relMouseReport",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "wheel report method",
|
||||
method: "wheelReport",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "non-input method",
|
||||
method: "someOtherMethod",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "empty method",
|
||||
method: "",
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := isInputMethod(tt.method)
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmark tests to verify performance improvements
|
||||
func BenchmarkValidateFloat64Param(b *testing.B) {
|
||||
params := map[string]interface{}{"test": 50.0}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, _ = validateFloat64Param(params, "test", "benchmarkMethod", 0, 100)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkValidateKeysArray(b *testing.B) {
|
||||
params := map[string]interface{}{"keys": []interface{}{65.0, 66.0, 67.0}}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, _ = validateKeysArray(params, "benchmarkMethod")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkHandleKeyboardReportDirect(b *testing.B) {
|
||||
params := map[string]interface{}{
|
||||
"modifier": 2.0,
|
||||
"keys": []interface{}{65.0, 66.0},
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, _ = handleKeyboardReportDirect(params)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkHandleInputRPCDirect(b *testing.B) {
|
||||
params := map[string]interface{}{
|
||||
"modifier": 2.0,
|
||||
"keys": []interface{}{65.0, 66.0},
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, _ = handleInputRPCDirect("keyboardReport", params)
|
||||
}
|
||||
}
|
||||
|
|
@ -152,6 +152,17 @@ func (abm *AdaptiveBufferManager) GetOutputBufferSize() int {
|
|||
|
||||
// UpdateLatency updates the current latency measurement
|
||||
func (abm *AdaptiveBufferManager) UpdateLatency(latency time.Duration) {
|
||||
// Use exponential moving average for latency
|
||||
currentAvg := atomic.LoadInt64(&abm.averageLatency)
|
||||
newLatency := latency.Nanoseconds()
|
||||
|
||||
if currentAvg == 0 {
|
||||
atomic.StoreInt64(&abm.averageLatency, newLatency)
|
||||
} else {
|
||||
// Exponential moving average: 70% historical, 30% current
|
||||
newAvg := int64(float64(currentAvg)*GetConfig().HistoricalWeight + float64(newLatency)*GetConfig().CurrentWeight)
|
||||
atomic.StoreInt64(&abm.averageLatency, newAvg)
|
||||
}
|
||||
}
|
||||
|
||||
// adaptationLoop is the main loop that adjusts buffer sizes
|
||||
|
|
|
|||
|
|
@ -84,21 +84,3 @@ func GetAudioInputSupervisor() *AudioInputSupervisor {
|
|||
}
|
||||
return (*AudioInputSupervisor)(ptr)
|
||||
}
|
||||
|
||||
// PrewarmAudioInputSubprocess starts an audio input subprocess in advance to reduce activation latency
|
||||
func PrewarmAudioInputSubprocess() error {
|
||||
supervisor := GetAudioInputSupervisor()
|
||||
if supervisor == nil {
|
||||
return nil // No supervisor available, skip prewarming
|
||||
}
|
||||
return supervisor.PrewarmSubprocess()
|
||||
}
|
||||
|
||||
// IsAudioInputSubprocessPrewarmed returns whether an audio input subprocess is prewarmed and ready
|
||||
func IsAudioInputSubprocessPrewarmed() bool {
|
||||
supervisor := GetAudioInputSupervisor()
|
||||
if supervisor == nil {
|
||||
return false
|
||||
}
|
||||
return supervisor.IsPrewarmed()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,6 +23,8 @@
|
|||
// SetAudioQuality(AudioQualityHigh)
|
||||
//
|
||||
// // Audio output will automatically start when frames are received
|
||||
// metrics := GetAudioMetrics()
|
||||
// fmt.Printf("Latency: %v, Frames: %d\n", metrics.AverageLatency, metrics.FramesReceived)
|
||||
package audio
|
||||
|
||||
import (
|
||||
|
|
@ -330,60 +332,42 @@ func GetMicrophoneConfig() AudioConfig {
|
|||
return currentMicrophoneConfig
|
||||
}
|
||||
|
||||
// Batched metrics to reduce atomic operations frequency
|
||||
var (
|
||||
batchedFramesReceived int64
|
||||
batchedBytesProcessed int64
|
||||
batchedFramesDropped int64
|
||||
batchedConnectionDrops int64
|
||||
// GetAudioMetrics returns current audio metrics
|
||||
func GetAudioMetrics() AudioMetrics {
|
||||
// Get base metrics
|
||||
framesReceived := atomic.LoadInt64(&metrics.FramesReceived)
|
||||
framesDropped := atomic.LoadInt64(&metrics.FramesDropped)
|
||||
|
||||
lastFlushTime int64 // Unix timestamp in nanoseconds
|
||||
)
|
||||
// If audio relay is running, use relay stats instead
|
||||
if IsAudioRelayRunning() {
|
||||
relayReceived, relayDropped := GetAudioRelayStats()
|
||||
framesReceived = relayReceived
|
||||
framesDropped = relayDropped
|
||||
}
|
||||
|
||||
// RecordFrameReceived increments the frames received counter with batched updates
|
||||
return AudioMetrics{
|
||||
FramesReceived: framesReceived,
|
||||
FramesDropped: framesDropped,
|
||||
BytesProcessed: atomic.LoadInt64(&metrics.BytesProcessed),
|
||||
LastFrameTime: metrics.LastFrameTime,
|
||||
ConnectionDrops: atomic.LoadInt64(&metrics.ConnectionDrops),
|
||||
AverageLatency: metrics.AverageLatency,
|
||||
}
|
||||
}
|
||||
|
||||
// RecordFrameReceived increments the frames received counter
|
||||
func RecordFrameReceived(bytes int) {
|
||||
// Use local batching to reduce atomic operations frequency
|
||||
atomic.AddInt64(&batchedBytesProcessed, int64(bytes))
|
||||
|
||||
// Update timestamp immediately for accurate tracking
|
||||
atomic.AddInt64(&metrics.FramesReceived, 1)
|
||||
atomic.AddInt64(&metrics.BytesProcessed, int64(bytes))
|
||||
metrics.LastFrameTime = time.Now()
|
||||
}
|
||||
|
||||
// RecordFrameDropped increments the frames dropped counter with batched updates
|
||||
// RecordFrameDropped increments the frames dropped counter
|
||||
func RecordFrameDropped() {
|
||||
atomic.AddInt64(&metrics.FramesDropped, 1)
|
||||
}
|
||||
|
||||
// RecordConnectionDrop increments the connection drops counter with batched updates
|
||||
// RecordConnectionDrop increments the connection drops counter
|
||||
func RecordConnectionDrop() {
|
||||
}
|
||||
|
||||
// flushBatchedMetrics flushes accumulated metrics to the main counters
|
||||
func flushBatchedMetrics() {
|
||||
// Atomically move batched metrics to main metrics
|
||||
framesReceived := atomic.SwapInt64(&batchedFramesReceived, 0)
|
||||
bytesProcessed := atomic.SwapInt64(&batchedBytesProcessed, 0)
|
||||
framesDropped := atomic.SwapInt64(&batchedFramesDropped, 0)
|
||||
connectionDrops := atomic.SwapInt64(&batchedConnectionDrops, 0)
|
||||
|
||||
// Update main metrics if we have any batched data
|
||||
if framesReceived > 0 {
|
||||
atomic.AddInt64(&metrics.FramesReceived, framesReceived)
|
||||
}
|
||||
if bytesProcessed > 0 {
|
||||
atomic.AddInt64(&metrics.BytesProcessed, bytesProcessed)
|
||||
}
|
||||
if framesDropped > 0 {
|
||||
atomic.AddInt64(&metrics.FramesDropped, framesDropped)
|
||||
}
|
||||
if connectionDrops > 0 {
|
||||
atomic.AddInt64(&metrics.ConnectionDrops, connectionDrops)
|
||||
}
|
||||
|
||||
// Update last flush time
|
||||
atomic.StoreInt64(&lastFlushTime, time.Now().UnixNano())
|
||||
}
|
||||
|
||||
// FlushPendingMetrics forces a flush of all batched metrics
|
||||
func FlushPendingMetrics() {
|
||||
flushBatchedMetrics()
|
||||
atomic.AddInt64(&metrics.ConnectionDrops, 1)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,317 @@
|
|||
//go:build cgo
|
||||
// +build cgo
|
||||
|
||||
package audio
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestAudioQualityEdgeCases tests edge cases for audio quality functions
|
||||
// These tests ensure the recent validation removal doesn't introduce regressions
|
||||
func TestAudioQualityEdgeCases(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
testFunc func(t *testing.T)
|
||||
}{
|
||||
{"AudioQualityBoundaryValues", testAudioQualityBoundaryValues},
|
||||
{"MicrophoneQualityBoundaryValues", testMicrophoneQualityBoundaryValues},
|
||||
{"AudioQualityPresetsConsistency", testAudioQualityPresetsConsistency},
|
||||
{"MicrophoneQualityPresetsConsistency", testMicrophoneQualityPresetsConsistency},
|
||||
{"QualitySettingsThreadSafety", testQualitySettingsThreadSafety},
|
||||
{"QualityPresetsImmutability", testQualityPresetsImmutability},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.testFunc(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// testAudioQualityBoundaryValues tests boundary values for audio quality
|
||||
func testAudioQualityBoundaryValues(t *testing.T) {
|
||||
// Test minimum valid quality (0)
|
||||
originalConfig := GetAudioConfig()
|
||||
SetAudioQuality(AudioQualityLow)
|
||||
assert.Equal(t, AudioQualityLow, GetAudioConfig().Quality, "Should accept minimum quality value")
|
||||
|
||||
// Test maximum valid quality (3)
|
||||
SetAudioQuality(AudioQualityUltra)
|
||||
assert.Equal(t, AudioQualityUltra, GetAudioConfig().Quality, "Should accept maximum quality value")
|
||||
|
||||
// Test that quality settings work correctly
|
||||
SetAudioQuality(AudioQualityMedium)
|
||||
currentConfig := GetAudioConfig()
|
||||
assert.Equal(t, AudioQualityMedium, currentConfig.Quality, "Should set medium quality")
|
||||
t.Logf("Medium quality config: %+v", currentConfig)
|
||||
|
||||
SetAudioQuality(AudioQualityHigh)
|
||||
currentConfig = GetAudioConfig()
|
||||
assert.Equal(t, AudioQualityHigh, currentConfig.Quality, "Should set high quality")
|
||||
t.Logf("High quality config: %+v", currentConfig)
|
||||
|
||||
// Restore original quality
|
||||
SetAudioQuality(originalConfig.Quality)
|
||||
}
|
||||
|
||||
// testMicrophoneQualityBoundaryValues tests boundary values for microphone quality
|
||||
func testMicrophoneQualityBoundaryValues(t *testing.T) {
|
||||
// Test minimum valid quality
|
||||
originalConfig := GetMicrophoneConfig()
|
||||
SetMicrophoneQuality(AudioQualityLow)
|
||||
assert.Equal(t, AudioQualityLow, GetMicrophoneConfig().Quality, "Should accept minimum microphone quality value")
|
||||
|
||||
// Test maximum valid quality
|
||||
SetMicrophoneQuality(AudioQualityUltra)
|
||||
assert.Equal(t, AudioQualityUltra, GetMicrophoneConfig().Quality, "Should accept maximum microphone quality value")
|
||||
|
||||
// Test that quality settings work correctly
|
||||
SetMicrophoneQuality(AudioQualityMedium)
|
||||
currentConfig := GetMicrophoneConfig()
|
||||
assert.Equal(t, AudioQualityMedium, currentConfig.Quality, "Should set medium microphone quality")
|
||||
t.Logf("Medium microphone quality config: %+v", currentConfig)
|
||||
|
||||
SetMicrophoneQuality(AudioQualityHigh)
|
||||
currentConfig = GetMicrophoneConfig()
|
||||
assert.Equal(t, AudioQualityHigh, currentConfig.Quality, "Should set high microphone quality")
|
||||
t.Logf("High microphone quality config: %+v", currentConfig)
|
||||
|
||||
// Restore original quality
|
||||
SetMicrophoneQuality(originalConfig.Quality)
|
||||
}
|
||||
|
||||
// testAudioQualityPresetsConsistency tests consistency of audio quality presets
|
||||
func testAudioQualityPresetsConsistency(t *testing.T) {
|
||||
presets := GetAudioQualityPresets()
|
||||
require.NotNil(t, presets, "Audio quality presets should not be nil")
|
||||
require.NotEmpty(t, presets, "Audio quality presets should not be empty")
|
||||
|
||||
// Verify presets have expected structure
|
||||
for i, preset := range presets {
|
||||
t.Logf("Audio preset %d: %+v", i, preset)
|
||||
|
||||
// Each preset should have reasonable values
|
||||
assert.GreaterOrEqual(t, preset.Bitrate, 0, "Bitrate should be non-negative")
|
||||
assert.Greater(t, preset.SampleRate, 0, "Sample rate should be positive")
|
||||
assert.Greater(t, preset.Channels, 0, "Channels should be positive")
|
||||
}
|
||||
|
||||
// Test that presets are accessible by valid quality levels
|
||||
qualityLevels := []AudioQuality{AudioQualityLow, AudioQualityMedium, AudioQualityHigh, AudioQualityUltra}
|
||||
for _, quality := range qualityLevels {
|
||||
preset, exists := presets[quality]
|
||||
assert.True(t, exists, "Preset should exist for quality %v", quality)
|
||||
assert.Greater(t, preset.Bitrate, 0, "Preset bitrate should be positive for quality %v", quality)
|
||||
}
|
||||
}
|
||||
|
||||
// testMicrophoneQualityPresetsConsistency tests consistency of microphone quality presets
|
||||
func testMicrophoneQualityPresetsConsistency(t *testing.T) {
|
||||
presets := GetMicrophoneQualityPresets()
|
||||
require.NotNil(t, presets, "Microphone quality presets should not be nil")
|
||||
require.NotEmpty(t, presets, "Microphone quality presets should not be empty")
|
||||
|
||||
// Verify presets have expected structure
|
||||
for i, preset := range presets {
|
||||
t.Logf("Microphone preset %d: %+v", i, preset)
|
||||
|
||||
// Each preset should have reasonable values
|
||||
assert.GreaterOrEqual(t, preset.Bitrate, 0, "Bitrate should be non-negative")
|
||||
assert.Greater(t, preset.SampleRate, 0, "Sample rate should be positive")
|
||||
assert.Greater(t, preset.Channels, 0, "Channels should be positive")
|
||||
}
|
||||
|
||||
// Test that presets are accessible by valid quality levels
|
||||
qualityLevels := []AudioQuality{AudioQualityLow, AudioQualityMedium, AudioQualityHigh, AudioQualityUltra}
|
||||
for _, quality := range qualityLevels {
|
||||
preset, exists := presets[quality]
|
||||
assert.True(t, exists, "Microphone preset should exist for quality %v", quality)
|
||||
assert.Greater(t, preset.Bitrate, 0, "Microphone preset bitrate should be positive for quality %v", quality)
|
||||
}
|
||||
}
|
||||
|
||||
// testQualitySettingsThreadSafety tests thread safety of quality settings
|
||||
func testQualitySettingsThreadSafety(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping thread safety test in short mode")
|
||||
}
|
||||
|
||||
originalAudioConfig := GetAudioConfig()
|
||||
originalMicConfig := GetMicrophoneConfig()
|
||||
|
||||
// Test concurrent access to quality settings
|
||||
const numGoroutines = 50
|
||||
const numOperations = 100
|
||||
|
||||
done := make(chan bool, numGoroutines*2)
|
||||
|
||||
// Audio quality goroutines
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func(id int) {
|
||||
for j := 0; j < numOperations; j++ {
|
||||
// Cycle through valid quality values
|
||||
qualityIndex := j % 4
|
||||
var quality AudioQuality
|
||||
switch qualityIndex {
|
||||
case 0:
|
||||
quality = AudioQualityLow
|
||||
case 1:
|
||||
quality = AudioQualityMedium
|
||||
case 2:
|
||||
quality = AudioQualityHigh
|
||||
case 3:
|
||||
quality = AudioQualityUltra
|
||||
}
|
||||
SetAudioQuality(quality)
|
||||
_ = GetAudioConfig()
|
||||
}
|
||||
done <- true
|
||||
}(i)
|
||||
}
|
||||
|
||||
// Microphone quality goroutines
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func(id int) {
|
||||
for j := 0; j < numOperations; j++ {
|
||||
// Cycle through valid quality values
|
||||
qualityIndex := j % 4
|
||||
var quality AudioQuality
|
||||
switch qualityIndex {
|
||||
case 0:
|
||||
quality = AudioQualityLow
|
||||
case 1:
|
||||
quality = AudioQualityMedium
|
||||
case 2:
|
||||
quality = AudioQualityHigh
|
||||
case 3:
|
||||
quality = AudioQualityUltra
|
||||
}
|
||||
SetMicrophoneQuality(quality)
|
||||
_ = GetMicrophoneConfig()
|
||||
}
|
||||
done <- true
|
||||
}(i)
|
||||
}
|
||||
|
||||
// Wait for all goroutines to complete
|
||||
for i := 0; i < numGoroutines*2; i++ {
|
||||
<-done
|
||||
}
|
||||
|
||||
// Verify system is still functional
|
||||
SetAudioQuality(AudioQualityHigh)
|
||||
assert.Equal(t, AudioQualityHigh, GetAudioConfig().Quality, "Audio quality should be settable after concurrent access")
|
||||
|
||||
SetMicrophoneQuality(AudioQualityMedium)
|
||||
assert.Equal(t, AudioQualityMedium, GetMicrophoneConfig().Quality, "Microphone quality should be settable after concurrent access")
|
||||
|
||||
// Restore original values
|
||||
SetAudioQuality(originalAudioConfig.Quality)
|
||||
SetMicrophoneQuality(originalMicConfig.Quality)
|
||||
}
|
||||
|
||||
// testQualityPresetsImmutability tests that quality presets are not accidentally modified
|
||||
func testQualityPresetsImmutability(t *testing.T) {
|
||||
// Get presets multiple times and verify they're consistent
|
||||
presets1 := GetAudioQualityPresets()
|
||||
presets2 := GetAudioQualityPresets()
|
||||
|
||||
require.Equal(t, len(presets1), len(presets2), "Preset count should be consistent")
|
||||
|
||||
// Verify each preset is identical
|
||||
for quality := range presets1 {
|
||||
assert.Equal(t, presets1[quality].Bitrate, presets2[quality].Bitrate,
|
||||
"Preset %v bitrate should be consistent", quality)
|
||||
assert.Equal(t, presets1[quality].SampleRate, presets2[quality].SampleRate,
|
||||
"Preset %v sample rate should be consistent", quality)
|
||||
assert.Equal(t, presets1[quality].Channels, presets2[quality].Channels,
|
||||
"Preset %v channels should be consistent", quality)
|
||||
}
|
||||
|
||||
// Test microphone presets as well
|
||||
micPresets1 := GetMicrophoneQualityPresets()
|
||||
micPresets2 := GetMicrophoneQualityPresets()
|
||||
|
||||
require.Equal(t, len(micPresets1), len(micPresets2), "Microphone preset count should be consistent")
|
||||
|
||||
for quality := range micPresets1 {
|
||||
assert.Equal(t, micPresets1[quality].Bitrate, micPresets2[quality].Bitrate,
|
||||
"Microphone preset %v bitrate should be consistent", quality)
|
||||
assert.Equal(t, micPresets1[quality].SampleRate, micPresets2[quality].SampleRate,
|
||||
"Microphone preset %v sample rate should be consistent", quality)
|
||||
assert.Equal(t, micPresets1[quality].Channels, micPresets2[quality].Channels,
|
||||
"Microphone preset %v channels should be consistent", quality)
|
||||
}
|
||||
}
|
||||
|
||||
// TestQualityValidationRemovalRegression tests that validation removal doesn't cause regressions
|
||||
func TestQualityValidationRemovalRegression(t *testing.T) {
|
||||
// This test ensures that removing validation from GET endpoints doesn't break functionality
|
||||
|
||||
// Test that presets are still accessible
|
||||
audioPresets := GetAudioQualityPresets()
|
||||
assert.NotNil(t, audioPresets, "Audio presets should be accessible after validation removal")
|
||||
assert.NotEmpty(t, audioPresets, "Audio presets should not be empty")
|
||||
|
||||
micPresets := GetMicrophoneQualityPresets()
|
||||
assert.NotNil(t, micPresets, "Microphone presets should be accessible after validation removal")
|
||||
assert.NotEmpty(t, micPresets, "Microphone presets should not be empty")
|
||||
|
||||
// Test that quality getters still work
|
||||
audioConfig := GetAudioConfig()
|
||||
assert.GreaterOrEqual(t, int(audioConfig.Quality), 0, "Audio quality should be non-negative")
|
||||
|
||||
micConfig := GetMicrophoneConfig()
|
||||
assert.GreaterOrEqual(t, int(micConfig.Quality), 0, "Microphone quality should be non-negative")
|
||||
|
||||
// Test that setters still work (for valid values)
|
||||
originalAudio := GetAudioConfig()
|
||||
originalMic := GetMicrophoneConfig()
|
||||
|
||||
SetAudioQuality(AudioQualityMedium)
|
||||
assert.Equal(t, AudioQualityMedium, GetAudioConfig().Quality, "Audio quality setter should work")
|
||||
|
||||
SetMicrophoneQuality(AudioQualityHigh)
|
||||
assert.Equal(t, AudioQualityHigh, GetMicrophoneConfig().Quality, "Microphone quality setter should work")
|
||||
|
||||
// Restore original values
|
||||
SetAudioQuality(originalAudio.Quality)
|
||||
SetMicrophoneQuality(originalMic.Quality)
|
||||
}
|
||||
|
||||
// TestPerformanceAfterValidationRemoval tests that performance improved after validation removal
|
||||
func TestPerformanceAfterValidationRemoval(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping performance test in short mode")
|
||||
}
|
||||
|
||||
// Benchmark preset access (should be faster without validation)
|
||||
const iterations = 10000
|
||||
|
||||
// Time audio preset access
|
||||
start := time.Now()
|
||||
for i := 0; i < iterations; i++ {
|
||||
_ = GetAudioQualityPresets()
|
||||
}
|
||||
audioDuration := time.Since(start)
|
||||
|
||||
// Time microphone preset access
|
||||
start = time.Now()
|
||||
for i := 0; i < iterations; i++ {
|
||||
_ = GetMicrophoneQualityPresets()
|
||||
}
|
||||
micDuration := time.Since(start)
|
||||
|
||||
t.Logf("Audio presets access time for %d iterations: %v", iterations, audioDuration)
|
||||
t.Logf("Microphone presets access time for %d iterations: %v", iterations, micDuration)
|
||||
|
||||
// Verify reasonable performance (should complete quickly without validation overhead)
|
||||
maxExpectedDuration := time.Second // Very generous limit
|
||||
assert.Less(t, audioDuration, maxExpectedDuration, "Audio preset access should be fast")
|
||||
assert.Less(t, micDuration, maxExpectedDuration, "Microphone preset access should be fast")
|
||||
}
|
||||
|
|
@ -0,0 +1,366 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/jetkvm/kvm/internal/usbgadget"
|
||||
)
|
||||
|
||||
// Unit tests for the audio package
|
||||
|
||||
func TestAudioQuality(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
quality AudioQuality
|
||||
expected string
|
||||
}{
|
||||
{"Low Quality", AudioQualityLow, "low"},
|
||||
{"Medium Quality", AudioQualityMedium, "medium"},
|
||||
{"High Quality", AudioQualityHigh, "high"},
|
||||
{"Ultra Quality", AudioQualityUltra, "ultra"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Test quality setting
|
||||
SetAudioQuality(tt.quality)
|
||||
config := GetAudioConfig()
|
||||
assert.Equal(t, tt.quality, config.Quality)
|
||||
assert.Greater(t, config.Bitrate, 0)
|
||||
assert.Greater(t, config.SampleRate, 0)
|
||||
assert.Greater(t, config.Channels, 0)
|
||||
assert.Greater(t, config.FrameSize, time.Duration(0))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMicrophoneQuality(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
quality AudioQuality
|
||||
}{
|
||||
{"Low Quality", AudioQualityLow},
|
||||
{"Medium Quality", AudioQualityMedium},
|
||||
{"High Quality", AudioQualityHigh},
|
||||
{"Ultra Quality", AudioQualityUltra},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Test microphone quality setting
|
||||
SetMicrophoneQuality(tt.quality)
|
||||
config := GetMicrophoneConfig()
|
||||
assert.Equal(t, tt.quality, config.Quality)
|
||||
assert.Equal(t, 1, config.Channels) // Microphone is always mono
|
||||
assert.Greater(t, config.Bitrate, 0)
|
||||
assert.Greater(t, config.SampleRate, 0)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAudioQualityPresets(t *testing.T) {
|
||||
presets := GetAudioQualityPresets()
|
||||
require.NotEmpty(t, presets)
|
||||
|
||||
// Test that all quality levels have presets
|
||||
for quality := AudioQualityLow; quality <= AudioQualityUltra; quality++ {
|
||||
config, exists := presets[quality]
|
||||
require.True(t, exists, "Preset should exist for quality %d", quality)
|
||||
assert.Equal(t, quality, config.Quality)
|
||||
assert.Greater(t, config.Bitrate, 0)
|
||||
assert.Greater(t, config.SampleRate, 0)
|
||||
assert.Greater(t, config.Channels, 0)
|
||||
assert.Greater(t, config.FrameSize, time.Duration(0))
|
||||
}
|
||||
|
||||
// Test that higher quality has higher bitrate
|
||||
lowConfig := presets[AudioQualityLow]
|
||||
mediumConfig := presets[AudioQualityMedium]
|
||||
highConfig := presets[AudioQualityHigh]
|
||||
ultraConfig := presets[AudioQualityUltra]
|
||||
|
||||
assert.Less(t, lowConfig.Bitrate, mediumConfig.Bitrate)
|
||||
assert.Less(t, mediumConfig.Bitrate, highConfig.Bitrate)
|
||||
assert.Less(t, highConfig.Bitrate, ultraConfig.Bitrate)
|
||||
}
|
||||
|
||||
func TestMicrophoneQualityPresets(t *testing.T) {
|
||||
presets := GetMicrophoneQualityPresets()
|
||||
require.NotEmpty(t, presets)
|
||||
|
||||
// Test that all quality levels have presets
|
||||
for quality := AudioQualityLow; quality <= AudioQualityUltra; quality++ {
|
||||
config, exists := presets[quality]
|
||||
require.True(t, exists, "Microphone preset should exist for quality %d", quality)
|
||||
assert.Equal(t, quality, config.Quality)
|
||||
assert.Equal(t, 1, config.Channels) // Always mono
|
||||
assert.Greater(t, config.Bitrate, 0)
|
||||
assert.Greater(t, config.SampleRate, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAudioMetrics(t *testing.T) {
|
||||
// Test initial metrics
|
||||
metrics := GetAudioMetrics()
|
||||
assert.GreaterOrEqual(t, metrics.FramesReceived, int64(0))
|
||||
assert.GreaterOrEqual(t, metrics.FramesDropped, int64(0))
|
||||
assert.GreaterOrEqual(t, metrics.BytesProcessed, int64(0))
|
||||
assert.GreaterOrEqual(t, metrics.ConnectionDrops, int64(0))
|
||||
|
||||
// Test recording metrics
|
||||
RecordFrameReceived(1024)
|
||||
metrics = GetAudioMetrics()
|
||||
assert.Greater(t, metrics.BytesProcessed, int64(0))
|
||||
assert.Greater(t, metrics.FramesReceived, int64(0))
|
||||
|
||||
RecordFrameDropped()
|
||||
metrics = GetAudioMetrics()
|
||||
assert.Greater(t, metrics.FramesDropped, int64(0))
|
||||
|
||||
RecordConnectionDrop()
|
||||
metrics = GetAudioMetrics()
|
||||
assert.Greater(t, metrics.ConnectionDrops, int64(0))
|
||||
}
|
||||
|
||||
func TestMaxAudioFrameSize(t *testing.T) {
|
||||
frameSize := GetMaxAudioFrameSize()
|
||||
assert.Greater(t, frameSize, 0)
|
||||
assert.Equal(t, GetConfig().MaxAudioFrameSize, frameSize)
|
||||
}
|
||||
|
||||
func TestMetricsUpdateInterval(t *testing.T) {
|
||||
// Test getting current interval
|
||||
interval := GetMetricsUpdateInterval()
|
||||
assert.Greater(t, interval, time.Duration(0))
|
||||
|
||||
// Test setting new interval
|
||||
newInterval := 2 * time.Second
|
||||
SetMetricsUpdateInterval(newInterval)
|
||||
updatedInterval := GetMetricsUpdateInterval()
|
||||
assert.Equal(t, newInterval, updatedInterval)
|
||||
}
|
||||
|
||||
func TestAudioConfigConsistency(t *testing.T) {
|
||||
// Test that setting audio quality updates the config consistently
|
||||
for quality := AudioQualityLow; quality <= AudioQualityUltra; quality++ {
|
||||
SetAudioQuality(quality)
|
||||
config := GetAudioConfig()
|
||||
presets := GetAudioQualityPresets()
|
||||
expectedConfig := presets[quality]
|
||||
|
||||
assert.Equal(t, expectedConfig.Quality, config.Quality)
|
||||
assert.Equal(t, expectedConfig.Bitrate, config.Bitrate)
|
||||
assert.Equal(t, expectedConfig.SampleRate, config.SampleRate)
|
||||
assert.Equal(t, expectedConfig.Channels, config.Channels)
|
||||
assert.Equal(t, expectedConfig.FrameSize, config.FrameSize)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMicrophoneConfigConsistency(t *testing.T) {
|
||||
// Test that setting microphone quality updates the config consistently
|
||||
for quality := AudioQualityLow; quality <= AudioQualityUltra; quality++ {
|
||||
SetMicrophoneQuality(quality)
|
||||
config := GetMicrophoneConfig()
|
||||
presets := GetMicrophoneQualityPresets()
|
||||
expectedConfig := presets[quality]
|
||||
|
||||
assert.Equal(t, expectedConfig.Quality, config.Quality)
|
||||
assert.Equal(t, expectedConfig.Bitrate, config.Bitrate)
|
||||
assert.Equal(t, expectedConfig.SampleRate, config.SampleRate)
|
||||
assert.Equal(t, expectedConfig.Channels, config.Channels)
|
||||
assert.Equal(t, expectedConfig.FrameSize, config.FrameSize)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmark tests
|
||||
func BenchmarkGetAudioConfig(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = GetAudioConfig()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkGetAudioMetrics(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = GetAudioMetrics()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkRecordFrameReceived(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
RecordFrameReceived(1024)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSetAudioQuality(b *testing.B) {
|
||||
qualities := []AudioQuality{AudioQualityLow, AudioQualityMedium, AudioQualityHigh, AudioQualityUltra}
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
SetAudioQuality(qualities[i%len(qualities)])
|
||||
}
|
||||
}
|
||||
|
||||
// TestAudioUsbGadgetIntegration tests audio functionality with USB gadget reconfiguration
|
||||
// This test simulates the production scenario where audio devices are enabled/disabled
|
||||
// through USB gadget configuration changes
|
||||
func TestAudioUsbGadgetIntegration(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
initialAudioEnabled bool
|
||||
newAudioEnabled bool
|
||||
expectedTransition string
|
||||
}{
|
||||
{
|
||||
name: "EnableAudio",
|
||||
initialAudioEnabled: false,
|
||||
newAudioEnabled: true,
|
||||
expectedTransition: "disabled_to_enabled",
|
||||
},
|
||||
{
|
||||
name: "DisableAudio",
|
||||
initialAudioEnabled: true,
|
||||
newAudioEnabled: false,
|
||||
expectedTransition: "enabled_to_disabled",
|
||||
},
|
||||
{
|
||||
name: "NoChange",
|
||||
initialAudioEnabled: true,
|
||||
newAudioEnabled: true,
|
||||
expectedTransition: "no_change",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Simulate initial USB device configuration
|
||||
initialDevices := &usbgadget.Devices{
|
||||
Keyboard: true,
|
||||
AbsoluteMouse: true,
|
||||
RelativeMouse: true,
|
||||
MassStorage: true,
|
||||
Audio: tt.initialAudioEnabled,
|
||||
}
|
||||
|
||||
// Simulate new USB device configuration
|
||||
newDevices := &usbgadget.Devices{
|
||||
Keyboard: true,
|
||||
AbsoluteMouse: true,
|
||||
RelativeMouse: true,
|
||||
MassStorage: true,
|
||||
Audio: tt.newAudioEnabled,
|
||||
}
|
||||
|
||||
// Test audio configuration validation
|
||||
err := validateAudioDeviceConfiguration(tt.newAudioEnabled)
|
||||
assert.NoError(t, err, "Audio configuration should be valid")
|
||||
|
||||
// Test audio state transition simulation
|
||||
transition := simulateAudioStateTransition(ctx, initialDevices, newDevices)
|
||||
assert.Equal(t, tt.expectedTransition, transition, "Audio state transition should match expected")
|
||||
|
||||
// Test that audio configuration is consistent after transition
|
||||
if tt.newAudioEnabled {
|
||||
config := GetAudioConfig()
|
||||
assert.Greater(t, config.Bitrate, 0, "Audio bitrate should be positive when enabled")
|
||||
assert.Greater(t, config.SampleRate, 0, "Audio sample rate should be positive when enabled")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// validateAudioDeviceConfiguration simulates the audio validation that happens in production
|
||||
func validateAudioDeviceConfiguration(enabled bool) error {
|
||||
if !enabled {
|
||||
return nil // No validation needed when disabled
|
||||
}
|
||||
|
||||
// Simulate audio device availability checks
|
||||
// In production, this would check for ALSA devices, audio hardware, etc.
|
||||
config := GetAudioConfig()
|
||||
if config.Bitrate <= 0 {
|
||||
return assert.AnError
|
||||
}
|
||||
if config.SampleRate <= 0 {
|
||||
return assert.AnError
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// simulateAudioStateTransition simulates the audio process management during USB reconfiguration
|
||||
func simulateAudioStateTransition(ctx context.Context, initial, new *usbgadget.Devices) string {
|
||||
previousAudioEnabled := initial.Audio
|
||||
newAudioEnabled := new.Audio
|
||||
|
||||
if previousAudioEnabled == newAudioEnabled {
|
||||
return "no_change"
|
||||
}
|
||||
|
||||
if !newAudioEnabled {
|
||||
// Simulate stopping audio processes
|
||||
// In production, this would stop AudioInputManager and audioSupervisor
|
||||
time.Sleep(10 * time.Millisecond) // Simulate process stop time
|
||||
return "enabled_to_disabled"
|
||||
}
|
||||
|
||||
if newAudioEnabled {
|
||||
// Simulate starting audio processes after USB reconfiguration
|
||||
// In production, this would start audioSupervisor and broadcast events
|
||||
time.Sleep(10 * time.Millisecond) // Simulate process start time
|
||||
return "disabled_to_enabled"
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
// TestAudioUsbGadgetTimeout tests that audio operations don't timeout during USB reconfiguration
|
||||
func TestAudioUsbGadgetTimeout(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping timeout test in short mode")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Test that audio configuration changes complete within reasonable time
|
||||
start := time.Now()
|
||||
|
||||
// Simulate multiple rapid USB device configuration changes
|
||||
for i := 0; i < 10; i++ {
|
||||
audioEnabled := i%2 == 0
|
||||
devices := &usbgadget.Devices{
|
||||
Keyboard: true,
|
||||
AbsoluteMouse: true,
|
||||
RelativeMouse: true,
|
||||
MassStorage: true,
|
||||
Audio: audioEnabled,
|
||||
}
|
||||
|
||||
err := validateAudioDeviceConfiguration(devices.Audio)
|
||||
assert.NoError(t, err, "Audio validation should not fail")
|
||||
|
||||
// Ensure we don't timeout
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.Fatal("Audio configuration test timed out")
|
||||
default:
|
||||
// Continue
|
||||
}
|
||||
}
|
||||
|
||||
elapsed := time.Since(start)
|
||||
t.Logf("Audio USB gadget configuration test completed in %v", elapsed)
|
||||
assert.Less(t, elapsed, 3*time.Second, "Audio configuration should complete quickly")
|
||||
}
|
||||
|
|
@ -23,7 +23,6 @@ type BaseAudioMetrics struct {
|
|||
|
||||
// BaseAudioManager provides common functionality for audio managers
|
||||
type BaseAudioManager struct {
|
||||
// Core metrics and state
|
||||
metrics BaseAudioMetrics
|
||||
logger zerolog.Logger
|
||||
running int32
|
||||
|
|
@ -59,12 +58,6 @@ func (bam *BaseAudioManager) resetMetrics() {
|
|||
bam.metrics.AverageLatency = 0
|
||||
}
|
||||
|
||||
// flushPendingMetrics is now a no-op since we use direct atomic updates
|
||||
func (bam *BaseAudioManager) flushPendingMetrics() {
|
||||
// No-op: metrics are now updated directly without local buffering
|
||||
// This function is kept for API compatibility
|
||||
}
|
||||
|
||||
// getBaseMetrics returns a copy of the base metrics
|
||||
func (bam *BaseAudioManager) getBaseMetrics() BaseAudioMetrics {
|
||||
return BaseAudioMetrics{
|
||||
|
|
@ -77,16 +70,28 @@ func (bam *BaseAudioManager) getBaseMetrics() BaseAudioMetrics {
|
|||
}
|
||||
}
|
||||
|
||||
// recordFrameProcessed records a processed frame with simplified tracking
|
||||
// recordFrameProcessed records a processed frame
|
||||
func (bam *BaseAudioManager) recordFrameProcessed(bytes int) {
|
||||
atomic.AddInt64(&bam.metrics.FramesProcessed, 1)
|
||||
atomic.AddInt64(&bam.metrics.BytesProcessed, int64(bytes))
|
||||
bam.metrics.LastFrameTime = time.Now()
|
||||
}
|
||||
|
||||
// recordFrameDropped records a dropped frame with simplified tracking
|
||||
// recordFrameDropped records a dropped frame
|
||||
func (bam *BaseAudioManager) recordFrameDropped() {
|
||||
atomic.AddInt64(&bam.metrics.FramesDropped, 1)
|
||||
}
|
||||
|
||||
// updateLatency updates the average latency
|
||||
func (bam *BaseAudioManager) updateLatency(latency time.Duration) {
|
||||
// Simple moving average - could be enhanced with more sophisticated algorithms
|
||||
currentAvg := bam.metrics.AverageLatency
|
||||
if currentAvg == 0 {
|
||||
bam.metrics.AverageLatency = latency
|
||||
} else {
|
||||
// Weighted average: 90% old + 10% new
|
||||
bam.metrics.AverageLatency = time.Duration(float64(currentAvg)*0.9 + float64(latency)*0.1)
|
||||
}
|
||||
}
|
||||
|
||||
// logComponentStart logs component start with consistent format
|
||||
|
|
|
|||
|
|
@ -71,6 +71,45 @@ func (bs *BaseSupervisor) GetLastExitInfo() (exitCode int, exitTime time.Time) {
|
|||
return bs.lastExitCode, bs.lastExitTime
|
||||
}
|
||||
|
||||
// GetProcessMetrics returns process metrics if available
|
||||
func (bs *BaseSupervisor) GetProcessMetrics() *ProcessMetrics {
|
||||
bs.mutex.RLock()
|
||||
defer bs.mutex.RUnlock()
|
||||
|
||||
if bs.cmd == nil || bs.cmd.Process == nil {
|
||||
return &ProcessMetrics{
|
||||
PID: 0,
|
||||
CPUPercent: 0.0,
|
||||
MemoryRSS: 0,
|
||||
MemoryVMS: 0,
|
||||
MemoryPercent: 0.0,
|
||||
Timestamp: time.Now(),
|
||||
ProcessName: "audio-server",
|
||||
}
|
||||
}
|
||||
|
||||
pid := bs.cmd.Process.Pid
|
||||
if bs.processMonitor != nil {
|
||||
metrics := bs.processMonitor.GetCurrentMetrics()
|
||||
for _, metric := range metrics {
|
||||
if metric.PID == pid {
|
||||
return &metric
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Return default metrics if process not found in monitor
|
||||
return &ProcessMetrics{
|
||||
PID: pid,
|
||||
CPUPercent: 0.0,
|
||||
MemoryRSS: 0,
|
||||
MemoryVMS: 0,
|
||||
MemoryPercent: 0.0,
|
||||
Timestamp: time.Now(),
|
||||
ProcessName: "audio-server",
|
||||
}
|
||||
}
|
||||
|
||||
// logSupervisorStart logs supervisor start event
|
||||
func (bs *BaseSupervisor) logSupervisorStart() {
|
||||
bs.logger.Info().Msg("Supervisor starting")
|
||||
|
|
|
|||
|
|
@ -94,12 +94,6 @@ func NewBatchAudioProcessor(batchSize int, batchDuration time.Duration) *BatchAu
|
|||
batchDuration = cache.BatchProcessingDelay
|
||||
}
|
||||
|
||||
// Use optimized queue sizes from configuration
|
||||
queueSize := cache.BatchProcessorMaxQueueSize
|
||||
if queueSize <= 0 {
|
||||
queueSize = batchSize * 2 // Fallback to double batch size
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
// Pre-allocate logger to avoid repeated allocations
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "batch-audio").Logger()
|
||||
|
|
@ -116,8 +110,8 @@ func NewBatchAudioProcessor(batchSize int, batchDuration time.Duration) *BatchAu
|
|||
logger: &logger,
|
||||
batchSize: batchSize,
|
||||
batchDuration: batchDuration,
|
||||
readQueue: make(chan batchReadRequest, queueSize),
|
||||
writeQueue: make(chan batchWriteRequest, queueSize),
|
||||
readQueue: make(chan batchReadRequest, batchSize*2),
|
||||
writeQueue: make(chan batchWriteRequest, batchSize*2),
|
||||
readBufPool: &sync.Pool{
|
||||
New: func() interface{} {
|
||||
// Use pre-calculated frame size to avoid GetConfig() calls
|
||||
|
|
@ -179,20 +173,14 @@ func (bap *BatchAudioProcessor) BatchReadEncode(buffer []byte) (int, error) {
|
|||
|
||||
// Validate buffer before processing
|
||||
if err := ValidateBufferSize(len(buffer)); err != nil {
|
||||
// Only log validation errors in debug mode to reduce overhead
|
||||
if bap.logger.GetLevel() <= zerolog.DebugLevel {
|
||||
bap.logger.Debug().Err(err).Msg("invalid buffer for batch processing")
|
||||
}
|
||||
bap.logger.Debug().Err(err).Msg("invalid buffer for batch processing")
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if !bap.IsRunning() {
|
||||
// Fallback to single operation if batch processor is not running
|
||||
// Use sampling to reduce atomic operations overhead
|
||||
if atomic.LoadInt64(&bap.stats.SingleReads)%10 == 0 {
|
||||
atomic.AddInt64(&bap.stats.SingleReads, 10)
|
||||
atomic.AddInt64(&bap.stats.SingleFrames, 10)
|
||||
}
|
||||
atomic.AddInt64(&bap.stats.SingleReads, 1)
|
||||
atomic.AddInt64(&bap.stats.SingleFrames, 1)
|
||||
return CGOAudioReadEncode(buffer)
|
||||
}
|
||||
|
||||
|
|
@ -209,11 +197,8 @@ func (bap *BatchAudioProcessor) BatchReadEncode(buffer []byte) (int, error) {
|
|||
// Successfully queued
|
||||
default:
|
||||
// Queue is full, fallback to single operation
|
||||
// Use sampling to reduce atomic operations overhead
|
||||
if atomic.LoadInt64(&bap.stats.SingleReads)%10 == 0 {
|
||||
atomic.AddInt64(&bap.stats.SingleReads, 10)
|
||||
atomic.AddInt64(&bap.stats.SingleFrames, 10)
|
||||
}
|
||||
atomic.AddInt64(&bap.stats.SingleReads, 1)
|
||||
atomic.AddInt64(&bap.stats.SingleFrames, 1)
|
||||
return CGOAudioReadEncode(buffer)
|
||||
}
|
||||
|
||||
|
|
@ -223,11 +208,8 @@ func (bap *BatchAudioProcessor) BatchReadEncode(buffer []byte) (int, error) {
|
|||
return result.length, result.err
|
||||
case <-time.After(cache.BatchProcessingTimeout):
|
||||
// Timeout, fallback to single operation
|
||||
// Use sampling to reduce atomic operations overhead
|
||||
if atomic.LoadInt64(&bap.stats.SingleReads)%10 == 0 {
|
||||
atomic.AddInt64(&bap.stats.SingleReads, 10)
|
||||
atomic.AddInt64(&bap.stats.SingleFrames, 10)
|
||||
}
|
||||
atomic.AddInt64(&bap.stats.SingleReads, 1)
|
||||
atomic.AddInt64(&bap.stats.SingleFrames, 1)
|
||||
return CGOAudioReadEncode(buffer)
|
||||
}
|
||||
}
|
||||
|
|
@ -241,20 +223,14 @@ func (bap *BatchAudioProcessor) BatchDecodeWrite(buffer []byte) (int, error) {
|
|||
|
||||
// Validate buffer before processing
|
||||
if err := ValidateBufferSize(len(buffer)); err != nil {
|
||||
// Only log validation errors in debug mode to reduce overhead
|
||||
if bap.logger.GetLevel() <= zerolog.DebugLevel {
|
||||
bap.logger.Debug().Err(err).Msg("invalid buffer for batch processing")
|
||||
}
|
||||
bap.logger.Debug().Err(err).Msg("invalid buffer for batch processing")
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if !bap.IsRunning() {
|
||||
// Fallback to single operation if batch processor is not running
|
||||
// Use sampling to reduce atomic operations overhead
|
||||
if atomic.LoadInt64(&bap.stats.SingleWrites)%10 == 0 {
|
||||
atomic.AddInt64(&bap.stats.SingleWrites, 10)
|
||||
atomic.AddInt64(&bap.stats.WriteFrames, 10)
|
||||
}
|
||||
atomic.AddInt64(&bap.stats.SingleWrites, 1)
|
||||
atomic.AddInt64(&bap.stats.WriteFrames, 1)
|
||||
return CGOAudioDecodeWriteLegacy(buffer)
|
||||
}
|
||||
|
||||
|
|
@ -271,11 +247,8 @@ func (bap *BatchAudioProcessor) BatchDecodeWrite(buffer []byte) (int, error) {
|
|||
// Successfully queued
|
||||
default:
|
||||
// Queue is full, fall back to single operation
|
||||
// Use sampling to reduce atomic operations overhead
|
||||
if atomic.LoadInt64(&bap.stats.SingleWrites)%10 == 0 {
|
||||
atomic.AddInt64(&bap.stats.SingleWrites, 10)
|
||||
atomic.AddInt64(&bap.stats.WriteFrames, 10)
|
||||
}
|
||||
atomic.AddInt64(&bap.stats.SingleWrites, 1)
|
||||
atomic.AddInt64(&bap.stats.WriteFrames, 1)
|
||||
return CGOAudioDecodeWriteLegacy(buffer)
|
||||
}
|
||||
|
||||
|
|
@ -284,11 +257,8 @@ func (bap *BatchAudioProcessor) BatchDecodeWrite(buffer []byte) (int, error) {
|
|||
case result := <-resultChan:
|
||||
return result.length, result.err
|
||||
case <-time.After(cache.BatchProcessingTimeout):
|
||||
// Use sampling to reduce atomic operations overhead
|
||||
if atomic.LoadInt64(&bap.stats.SingleWrites)%10 == 0 {
|
||||
atomic.AddInt64(&bap.stats.SingleWrites, 10)
|
||||
atomic.AddInt64(&bap.stats.WriteFrames, 10)
|
||||
}
|
||||
atomic.AddInt64(&bap.stats.SingleWrites, 1)
|
||||
atomic.AddInt64(&bap.stats.WriteFrames, 1)
|
||||
return CGOAudioDecodeWriteLegacy(buffer)
|
||||
}
|
||||
}
|
||||
|
|
@ -428,21 +398,19 @@ func (bap *BatchAudioProcessor) processBatchRead(batch []batchReadRequest) {
|
|||
|
||||
// Get cached config once - avoid repeated calls
|
||||
cache := GetCachedConfig()
|
||||
threadPinningThreshold := cache.BatchProcessorThreadPinningThreshold
|
||||
if threadPinningThreshold == 0 {
|
||||
threadPinningThreshold = cache.MinBatchSizeForThreadPinning // Fallback
|
||||
}
|
||||
minBatchSize := cache.MinBatchSizeForThreadPinning
|
||||
|
||||
// Only pin to OS thread for large batches to reduce thread contention
|
||||
var start time.Time
|
||||
threadWasPinned := false
|
||||
if batchSize >= threadPinningThreshold && atomic.CompareAndSwapInt32(&bap.threadPinned, 0, 1) {
|
||||
if batchSize >= minBatchSize && atomic.CompareAndSwapInt32(&bap.threadPinned, 0, 1) {
|
||||
start = time.Now()
|
||||
threadWasPinned = true
|
||||
runtime.LockOSThread()
|
||||
// Skip priority setting for better performance - audio threads already have good priority
|
||||
}
|
||||
|
||||
// Batch stats updates to reduce atomic operations (update once per batch instead of per frame)
|
||||
// Update stats efficiently
|
||||
atomic.AddInt64(&bap.stats.BatchedReads, 1)
|
||||
atomic.AddInt64(&bap.stats.BatchedFrames, int64(batchSize))
|
||||
if batchSize > 1 {
|
||||
|
|
@ -469,7 +437,6 @@ func (bap *BatchAudioProcessor) processBatchRead(batch []batchReadRequest) {
|
|||
bap.stats.OSThreadPinTime += time.Since(start)
|
||||
}
|
||||
|
||||
// Update timestamp only once per batch instead of per frame
|
||||
bap.stats.LastBatchTime = time.Now()
|
||||
}
|
||||
|
||||
|
|
@ -481,14 +448,10 @@ func (bap *BatchAudioProcessor) processBatchWrite(batch []batchWriteRequest) {
|
|||
|
||||
// Get cached config to avoid GetConfig() calls in hot path
|
||||
cache := GetCachedConfig()
|
||||
threadPinningThreshold := cache.BatchProcessorThreadPinningThreshold
|
||||
if threadPinningThreshold == 0 {
|
||||
threadPinningThreshold = cache.MinBatchSizeForThreadPinning // Fallback
|
||||
}
|
||||
|
||||
// Only pin to OS thread for large batches to reduce thread contention
|
||||
start := time.Now()
|
||||
shouldPinThread := len(batch) >= threadPinningThreshold
|
||||
shouldPinThread := len(batch) >= cache.MinBatchSizeForThreadPinning
|
||||
|
||||
// Track if we pinned the thread in this call
|
||||
threadWasPinned := false
|
||||
|
|
@ -497,7 +460,8 @@ func (bap *BatchAudioProcessor) processBatchWrite(batch []batchWriteRequest) {
|
|||
threadWasPinned = true
|
||||
runtime.LockOSThread()
|
||||
|
||||
// Priority scheduler not implemented - using default thread priority
|
||||
// Set high priority for batch audio processing - skip logging in hotpath
|
||||
_ = SetAudioThreadPriority()
|
||||
}
|
||||
|
||||
batchSize := len(batch)
|
||||
|
|
@ -510,7 +474,8 @@ func (bap *BatchAudioProcessor) processBatchWrite(batch []batchWriteRequest) {
|
|||
// Add deferred function to release thread lock if we pinned it
|
||||
if threadWasPinned {
|
||||
defer func() {
|
||||
// Priority scheduler not implemented - using default thread priority
|
||||
// Skip logging in hotpath for performance
|
||||
_ = ResetThreadPriority()
|
||||
runtime.UnlockOSThread()
|
||||
atomic.StoreInt32(&bap.writePinned, 0)
|
||||
bap.stats.WriteThreadTime += time.Since(start)
|
||||
|
|
|
|||
|
|
@ -367,7 +367,6 @@ func (p *AudioBufferPool) Get() []byte {
|
|||
bufPtr := (*unsafe.Pointer)(unsafe.Pointer(&cache.buffers[i]))
|
||||
buf := (*[]byte)(atomic.LoadPointer(bufPtr))
|
||||
if buf != nil && atomic.CompareAndSwapPointer(bufPtr, unsafe.Pointer(buf), nil) {
|
||||
// Direct hit count update to avoid sampling complexity in critical path
|
||||
atomic.AddInt64(&p.hitCount, 1)
|
||||
*buf = (*buf)[:0]
|
||||
return *buf
|
||||
|
|
@ -384,7 +383,6 @@ func (p *AudioBufferPool) Get() []byte {
|
|||
buf := p.preallocated[lastIdx]
|
||||
p.preallocated = p.preallocated[:lastIdx]
|
||||
p.mutex.Unlock()
|
||||
// Direct hit count update to avoid sampling complexity in critical path
|
||||
atomic.AddInt64(&p.hitCount, 1)
|
||||
*buf = (*buf)[:0]
|
||||
return *buf
|
||||
|
|
@ -394,7 +392,6 @@ func (p *AudioBufferPool) Get() []byte {
|
|||
// Try sync.Pool next
|
||||
if poolBuf := p.pool.Get(); poolBuf != nil {
|
||||
buf := poolBuf.(*[]byte)
|
||||
// Direct hit count update to avoid sampling complexity in critical path
|
||||
atomic.AddInt64(&p.hitCount, 1)
|
||||
atomic.AddInt64(&p.currentSize, -1)
|
||||
// Fast capacity check - most buffers should be correct size
|
||||
|
|
@ -406,7 +403,6 @@ func (p *AudioBufferPool) Get() []byte {
|
|||
}
|
||||
|
||||
// Pool miss - allocate new buffer with exact capacity
|
||||
// Direct miss count update to avoid sampling complexity in critical path
|
||||
atomic.AddInt64(&p.missCount, 1)
|
||||
return make([]byte, 0, p.bufferSize)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -712,17 +712,7 @@ func cgoAudioClose() {
|
|||
// AudioConfigCache provides a comprehensive caching system for audio configuration
|
||||
// to minimize GetConfig() calls in the hot path
|
||||
type AudioConfigCache struct {
|
||||
// Atomic int64 fields MUST be first for ARM32 alignment (8-byte alignment required)
|
||||
minFrameDuration atomic.Int64 // Store as nanoseconds
|
||||
maxFrameDuration atomic.Int64 // Store as nanoseconds
|
||||
maxLatency atomic.Int64 // Store as nanoseconds
|
||||
minMetricsUpdateInterval atomic.Int64 // Store as nanoseconds
|
||||
maxMetricsUpdateInterval atomic.Int64 // Store as nanoseconds
|
||||
restartWindow atomic.Int64 // Store as nanoseconds
|
||||
restartDelay atomic.Int64 // Store as nanoseconds
|
||||
maxRestartDelay atomic.Int64 // Store as nanoseconds
|
||||
|
||||
// Atomic int32 fields for lock-free access to frequently used values
|
||||
// Atomic fields for lock-free access to frequently used values
|
||||
minReadEncodeBuffer atomic.Int32
|
||||
maxDecodeWriteBuffer atomic.Int32
|
||||
maxPacketSize atomic.Int32
|
||||
|
|
@ -741,24 +731,17 @@ type AudioConfigCache struct {
|
|||
// Additional cached values for validation functions
|
||||
maxAudioFrameSize atomic.Int32
|
||||
maxChannels atomic.Int32
|
||||
minFrameDuration atomic.Int64 // Store as nanoseconds
|
||||
maxFrameDuration atomic.Int64 // Store as nanoseconds
|
||||
minOpusBitrate atomic.Int32
|
||||
maxOpusBitrate atomic.Int32
|
||||
|
||||
// Socket and buffer configuration values
|
||||
socketMaxBuffer atomic.Int32
|
||||
socketMinBuffer atomic.Int32
|
||||
inputProcessingTimeoutMS atomic.Int32
|
||||
maxRestartAttempts atomic.Int32
|
||||
|
||||
// Batch processing related values
|
||||
BatchProcessingTimeout time.Duration
|
||||
BatchProcessorFramesPerBatch int
|
||||
BatchProcessorTimeout time.Duration
|
||||
BatchProcessingDelay time.Duration
|
||||
MinBatchSizeForThreadPinning int
|
||||
BatchProcessorMaxQueueSize int
|
||||
BatchProcessorAdaptiveThreshold float64
|
||||
BatchProcessorThreadPinningThreshold int
|
||||
BatchProcessingTimeout time.Duration
|
||||
BatchProcessorFramesPerBatch int
|
||||
BatchProcessorTimeout time.Duration
|
||||
BatchProcessingDelay time.Duration
|
||||
MinBatchSizeForThreadPinning int
|
||||
|
||||
// Mutex for updating the cache
|
||||
mutex sync.RWMutex
|
||||
|
|
@ -831,9 +814,6 @@ func (c *AudioConfigCache) Update() {
|
|||
c.BatchProcessorTimeout = config.BatchProcessorTimeout
|
||||
c.BatchProcessingDelay = config.BatchProcessingDelay
|
||||
c.MinBatchSizeForThreadPinning = config.MinBatchSizeForThreadPinning
|
||||
c.BatchProcessorMaxQueueSize = config.BatchProcessorMaxQueueSize
|
||||
c.BatchProcessorAdaptiveThreshold = config.BatchProcessorAdaptiveThreshold
|
||||
c.BatchProcessorThreadPinningThreshold = config.BatchProcessorThreadPinningThreshold
|
||||
|
||||
// Pre-allocate common errors
|
||||
c.bufferTooSmallReadEncode = newBufferTooSmallError(0, config.MinReadEncodeBuffer)
|
||||
|
|
@ -1038,7 +1018,8 @@ var (
|
|||
batchProcessingCount atomic.Int64
|
||||
batchFrameCount atomic.Int64
|
||||
batchProcessingTime atomic.Int64
|
||||
// Batch time tracking removed
|
||||
// Flag to control time tracking overhead
|
||||
enableBatchTimeTracking atomic.Bool
|
||||
)
|
||||
|
||||
// GetBufferFromPool gets a buffer from the pool with at least the specified capacity
|
||||
|
|
@ -1243,8 +1224,7 @@ func BatchReadEncode(batchSize int) ([][]byte, error) {
|
|||
|
||||
// Track batch processing statistics - only if enabled
|
||||
var startTime time.Time
|
||||
// Batch time tracking removed
|
||||
trackTime := false
|
||||
trackTime := enableBatchTimeTracking.Load()
|
||||
if trackTime {
|
||||
startTime = time.Now()
|
||||
}
|
||||
|
|
@ -1311,8 +1291,7 @@ func BatchDecodeWrite(frames [][]byte) error {
|
|||
|
||||
// Track batch processing statistics - only if enabled
|
||||
var startTime time.Time
|
||||
// Batch time tracking removed
|
||||
trackTime := false
|
||||
trackTime := enableBatchTimeTracking.Load()
|
||||
if trackTime {
|
||||
startTime = time.Now()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,48 @@
|
|||
//go:build !cgo
|
||||
|
||||
package audio
|
||||
|
||||
import "errors"
|
||||
|
||||
// Stub implementations for linting (no CGO dependencies)
|
||||
|
||||
func cgoAudioInit() error {
|
||||
return errors.New("audio not available in lint mode")
|
||||
}
|
||||
|
||||
func cgoAudioClose() {
|
||||
// No-op
|
||||
}
|
||||
|
||||
func cgoAudioReadEncode(buf []byte) (int, error) {
|
||||
return 0, errors.New("audio not available in lint mode")
|
||||
}
|
||||
|
||||
func cgoAudioPlaybackInit() error {
|
||||
return errors.New("audio not available in lint mode")
|
||||
}
|
||||
|
||||
func cgoAudioPlaybackClose() {
|
||||
// No-op
|
||||
}
|
||||
|
||||
func cgoAudioDecodeWrite(buf []byte) (int, error) {
|
||||
return 0, errors.New("audio not available in lint mode")
|
||||
}
|
||||
|
||||
// cgoAudioDecodeWriteWithBuffers is a stub implementation for the optimized decode-write function
|
||||
func cgoAudioDecodeWriteWithBuffers(opusData []byte, pcmBuffer []byte) (int, error) {
|
||||
return 0, errors.New("audio not available in lint mode")
|
||||
}
|
||||
|
||||
// Uppercase aliases for external API compatibility
|
||||
|
||||
var (
|
||||
CGOAudioInit = cgoAudioInit
|
||||
CGOAudioClose = cgoAudioClose
|
||||
CGOAudioReadEncode = cgoAudioReadEncode
|
||||
CGOAudioPlaybackInit = cgoAudioPlaybackInit
|
||||
CGOAudioPlaybackClose = cgoAudioPlaybackClose
|
||||
CGOAudioDecodeWriteLegacy = cgoAudioDecodeWrite
|
||||
CGOAudioDecodeWrite = cgoAudioDecodeWriteWithBuffers
|
||||
)
|
||||
|
|
@ -224,8 +224,51 @@ type AudioConfigConstants struct {
|
|||
// Used in: process_monitor.go for configuring thread scheduling behavior
|
||||
// Impact: Controls how audio threads are scheduled by the Linux kernel
|
||||
|
||||
// Removed unused scheduling policy constants and RT priority values
|
||||
// The priority scheduler is not implemented - functions are called but don't exist
|
||||
// SchedNormal defines normal (CFS) scheduling policy.
|
||||
// Used in: process_monitor.go for non-critical audio threads
|
||||
// Impact: Standard time-sharing scheduling, may cause audio latency under load.
|
||||
// Value 0 corresponds to SCHED_NORMAL in Linux kernel.
|
||||
SchedNormal int
|
||||
|
||||
// SchedFIFO defines First-In-First-Out real-time scheduling policy.
|
||||
// Used in: process_monitor.go for critical audio threads requiring deterministic timing
|
||||
// Impact: Provides real-time scheduling but may starve other processes if misused.
|
||||
// Value 1 corresponds to SCHED_FIFO in Linux kernel.
|
||||
SchedFIFO int
|
||||
|
||||
// SchedRR defines Round-Robin real-time scheduling policy.
|
||||
// Used in: process_monitor.go for real-time threads that should share CPU time
|
||||
// Impact: Real-time scheduling with time slicing, balances determinism and fairness.
|
||||
// Value 2 corresponds to SCHED_RR in Linux kernel.
|
||||
SchedRR int
|
||||
|
||||
// Real-time Priority Levels - Priority values for real-time audio thread scheduling
|
||||
// Used in: process_monitor.go for setting thread priorities
|
||||
// Impact: Higher priorities get more CPU time but may affect system responsiveness
|
||||
|
||||
// RTAudioHighPriority defines highest priority for critical audio threads.
|
||||
// Used in: process_monitor.go for time-critical audio processing (encoding/decoding)
|
||||
// Impact: Ensures audio threads get CPU time but may impact system responsiveness.
|
||||
// Default 80 provides high priority without completely starving other processes.
|
||||
RTAudioHighPriority int
|
||||
|
||||
// RTAudioMediumPriority defines medium priority for important audio threads.
|
||||
// Used in: process_monitor.go for audio I/O and buffering operations
|
||||
// Impact: Good priority for audio operations while maintaining system balance.
|
||||
// Default 60 provides elevated priority for audio without extreme impact.
|
||||
RTAudioMediumPriority int
|
||||
|
||||
// RTAudioLowPriority defines low priority for background audio threads.
|
||||
// Used in: process_monitor.go for audio monitoring and metrics collection
|
||||
// Impact: Ensures audio background tasks run without impacting critical operations.
|
||||
// Default 40 provides some priority elevation while remaining background.
|
||||
RTAudioLowPriority int
|
||||
|
||||
// RTNormalPriority defines normal priority (no real-time scheduling).
|
||||
// Used in: process_monitor.go for non-critical audio threads
|
||||
// Impact: Standard scheduling priority, no special real-time guarantees.
|
||||
// Default 0 uses normal kernel scheduling without real-time privileges.
|
||||
RTNormalPriority int
|
||||
|
||||
// Process Management - Configuration for audio process lifecycle management
|
||||
// Used in: supervisor.go for managing audio process restarts and recovery
|
||||
|
|
@ -859,12 +902,6 @@ type AudioConfigConstants struct {
|
|||
// Default 200ms provides reasonable wait time for microphone access.
|
||||
MicContentionTimeout time.Duration // 200ms contention timeout
|
||||
|
||||
// Subprocess Pre-warming Configuration
|
||||
// Used in: input_supervisor.go for reducing microphone activation latency
|
||||
// Impact: Pre-warms audio input subprocess during startup to eliminate cold start delay
|
||||
// Default true enables pre-warming for optimal user experience
|
||||
EnableSubprocessPrewarming bool // Enable subprocess pre-warming (default: true)
|
||||
|
||||
// Priority Scheduler Configuration - Settings for process priority management
|
||||
// Used in: priority_scheduler.go for system priority control
|
||||
// Impact: Controls valid range for process priority adjustments
|
||||
|
|
@ -1169,24 +1206,6 @@ type AudioConfigConstants struct {
|
|||
// Default 5ms provides quick batch processing with reasonable timeout.
|
||||
BatchProcessorTimeout time.Duration
|
||||
|
||||
// BatchProcessorMaxQueueSize defines maximum queue size for batch operations.
|
||||
// Used in: batch_audio.go for queue size control
|
||||
// Impact: Larger queues reduce blocking but increase memory usage.
|
||||
// Default 16 provides good balance between memory and performance.
|
||||
BatchProcessorMaxQueueSize int
|
||||
|
||||
// BatchProcessorAdaptiveThreshold defines threshold for adaptive batch sizing.
|
||||
// Used in: batch_audio.go for dynamic batch size adjustment
|
||||
// Impact: Lower thresholds enable more aggressive batching.
|
||||
// Default 0.8 enables batching when 80% of queue is full.
|
||||
BatchProcessorAdaptiveThreshold float64
|
||||
|
||||
// BatchProcessorThreadPinningThreshold defines minimum batch size for thread pinning.
|
||||
// Used in: batch_audio.go for OS thread pinning optimization
|
||||
// Impact: Higher thresholds reduce thread pinning overhead.
|
||||
// Default 8 frames enables pinning for larger batches only.
|
||||
BatchProcessorThreadPinningThreshold int
|
||||
|
||||
// Output Streaming Constants - Configuration for audio output streaming
|
||||
// Used in: output_streaming.go for output stream timing control
|
||||
// Impact: Controls output streaming frame rate and timing
|
||||
|
|
@ -1483,7 +1502,36 @@ type AudioConfigConstants struct {
|
|||
// Default 512 bytes accommodates typical encoding variations.
|
||||
FrameSizeTolerance int
|
||||
|
||||
// Removed device health monitoring configuration - functionality not used
|
||||
// Device Health Monitoring Configuration
|
||||
// Used in: device_health.go for proactive device monitoring and recovery
|
||||
// Impact: Controls health check frequency and recovery thresholds
|
||||
|
||||
// HealthCheckIntervalMS defines interval between device health checks in milliseconds.
|
||||
// Used in: DeviceHealthMonitor for periodic health assessment
|
||||
// Impact: Lower values provide faster detection but increase CPU usage.
|
||||
// Default 5000ms (5s) provides good balance between responsiveness and overhead.
|
||||
HealthCheckIntervalMS int
|
||||
|
||||
// HealthRecoveryThreshold defines number of consecutive successful operations
|
||||
// required to mark a device as healthy after being unhealthy.
|
||||
// Used in: DeviceHealthMonitor for recovery state management
|
||||
// Impact: Higher values prevent premature recovery declarations.
|
||||
// Default 3 consecutive successes ensures stable recovery.
|
||||
HealthRecoveryThreshold int
|
||||
|
||||
// HealthLatencyThresholdMS defines maximum acceptable latency in milliseconds
|
||||
// before considering a device unhealthy.
|
||||
// Used in: DeviceHealthMonitor for latency-based health assessment
|
||||
// Impact: Lower values trigger recovery sooner but may cause false positives.
|
||||
// Default 100ms provides reasonable threshold for real-time audio.
|
||||
HealthLatencyThresholdMS int
|
||||
|
||||
// HealthErrorRateLimit defines maximum error rate (0.0-1.0) before
|
||||
// considering a device unhealthy.
|
||||
// Used in: DeviceHealthMonitor for error rate assessment
|
||||
// Impact: Lower values trigger recovery sooner for error-prone devices.
|
||||
// Default 0.1 (10%) allows some transient errors while detecting problems.
|
||||
HealthErrorRateLimit float64
|
||||
|
||||
// Latency Histogram Bucket Configuration
|
||||
// Used in: LatencyHistogram for granular latency measurement buckets
|
||||
|
|
@ -1770,6 +1818,47 @@ func DefaultAudioConfig() *AudioConfigConstants {
|
|||
// Used in: Non-critical audio processing tasks
|
||||
// Impact: Provides standard scheduling suitable for non-critical tasks.
|
||||
// Default 0 (SCHED_NORMAL) for standard time-sharing scheduling.
|
||||
SchedNormal: 0,
|
||||
|
||||
// SchedFIFO defines real-time first-in-first-out scheduling policy.
|
||||
// Used in: Critical audio processing requiring deterministic timing
|
||||
// Impact: Provides deterministic scheduling for latency-critical operations.
|
||||
// Default 1 (SCHED_FIFO) for real-time first-in-first-out scheduling.
|
||||
SchedFIFO: 1,
|
||||
|
||||
// SchedRR defines real-time round-robin scheduling policy.
|
||||
// Used in: Balanced real-time processing with time slicing
|
||||
// Impact: Provides real-time scheduling with balanced time slicing.
|
||||
// Default 2 (SCHED_RR) for real-time round-robin scheduling.
|
||||
SchedRR: 2,
|
||||
|
||||
// Real-time Priority Levels - Configuration for process priorities
|
||||
// Used in: Process priority management and CPU scheduling
|
||||
// Impact: Controls priority hierarchy for audio system components
|
||||
|
||||
// RTAudioHighPriority defines highest priority for audio processing.
|
||||
// Used in: Latency-critical audio operations and CPU priority assignment
|
||||
// Impact: Ensures highest CPU priority without starving system processes.
|
||||
// Default 80 provides highest priority for latency-critical operations.
|
||||
RTAudioHighPriority: 80,
|
||||
|
||||
// RTAudioMediumPriority defines medium priority for audio tasks.
|
||||
// Used in: Important audio tasks requiring elevated priority
|
||||
// Impact: Provides elevated priority while allowing higher priority operations.
|
||||
// Default 60 balances importance with system operation priority.
|
||||
RTAudioMediumPriority: 60,
|
||||
|
||||
// RTAudioLowPriority defines low priority for audio tasks.
|
||||
// Used in: Audio tasks needing responsiveness but not latency-critical
|
||||
// Impact: Provides moderate real-time priority for responsive tasks.
|
||||
// Default 40 ensures responsiveness without being latency-critical.
|
||||
RTAudioLowPriority: 40,
|
||||
|
||||
// RTNormalPriority defines normal scheduling priority.
|
||||
// Used in: Non-real-time audio processing tasks
|
||||
// Impact: Provides standard priority for non-real-time operations.
|
||||
// Default 0 represents normal scheduling priority.
|
||||
RTNormalPriority: 0,
|
||||
|
||||
// Process Management - Configuration for process restart and recovery
|
||||
// Used in: Process monitoring and failure recovery systems
|
||||
|
|
@ -2058,17 +2147,17 @@ func DefaultAudioConfig() *AudioConfigConstants {
|
|||
// Used in: process management, thread scheduling for audio processing
|
||||
// Impact: Controls CPU scheduling priority for audio threads
|
||||
|
||||
// AudioHighPriority defines highest priority for critical audio threads (5).
|
||||
// AudioHighPriority defines highest priority for critical audio threads (-10).
|
||||
// Used in: Real-time audio processing threads, encoder/decoder threads
|
||||
// Impact: Ensures audio threads get CPU time but prioritizes mouse input
|
||||
// Modified to 5 to prevent mouse lag on single-core RV1106
|
||||
AudioHighPriority: 5,
|
||||
// Impact: Ensures audio threads get CPU time before other processes
|
||||
// Default -10 provides high priority without requiring root privileges
|
||||
AudioHighPriority: -10,
|
||||
|
||||
// AudioMediumPriority defines medium priority for important audio threads (10).
|
||||
// AudioMediumPriority defines medium priority for important audio threads (-5).
|
||||
// Used in: Audio buffer management, IPC communication threads
|
||||
// Impact: Balances audio performance with system responsiveness
|
||||
// Modified to 10 to prioritize mouse input on single-core RV1106
|
||||
AudioMediumPriority: 10,
|
||||
// Default -5 ensures good performance while allowing other critical tasks
|
||||
AudioMediumPriority: -5,
|
||||
|
||||
// AudioLowPriority defines low priority for non-critical audio threads (0).
|
||||
// Used in: Metrics collection, logging, cleanup tasks
|
||||
|
|
@ -2082,11 +2171,11 @@ func DefaultAudioConfig() *AudioConfigConstants {
|
|||
// Default 0 represents normal Linux process priority
|
||||
NormalPriority: 0,
|
||||
|
||||
// NiceValue defines default nice value for audio processes (5).
|
||||
// NiceValue defines default nice value for audio processes (-10).
|
||||
// Used in: Process creation, priority adjustment for audio components
|
||||
// Impact: Ensures audio processes don't interfere with mouse input
|
||||
// Modified to 5 to prioritize mouse input on single-core RV1106
|
||||
NiceValue: 5,
|
||||
// Impact: Improves audio process scheduling without requiring special privileges
|
||||
// Default -10 provides better scheduling while remaining accessible to non-root users
|
||||
NiceValue: -10,
|
||||
|
||||
// Error Handling - Configuration for robust error recovery and retry logic
|
||||
// Used in: Throughout audio pipeline for handling transient failures
|
||||
|
|
@ -2254,9 +2343,6 @@ func DefaultAudioConfig() *AudioConfigConstants {
|
|||
// Microphone Contention Configuration
|
||||
MicContentionTimeout: 200 * time.Millisecond,
|
||||
|
||||
// Subprocess Pre-warming Configuration
|
||||
EnableSubprocessPrewarming: true,
|
||||
|
||||
// Priority Scheduler Configuration
|
||||
MinNiceValue: -20,
|
||||
MaxNiceValue: 19,
|
||||
|
|
@ -2336,11 +2422,8 @@ func DefaultAudioConfig() *AudioConfigConstants {
|
|||
ProcessMonitorTraditionalHz: 100.0, // 100.0 Hz traditional clock
|
||||
|
||||
// Batch Processing Constants
|
||||
BatchProcessorFramesPerBatch: 4, // 4 frames per batch
|
||||
BatchProcessorTimeout: 5 * time.Millisecond, // 5ms timeout
|
||||
BatchProcessorMaxQueueSize: 16, // 16 max queue size for balanced memory/performance
|
||||
BatchProcessorAdaptiveThreshold: 0.8, // 0.8 threshold for adaptive batching (80% queue full)
|
||||
BatchProcessorThreadPinningThreshold: 8, // 8 frames minimum for thread pinning optimization
|
||||
BatchProcessorFramesPerBatch: 4, // 4 frames per batch
|
||||
BatchProcessorTimeout: 5 * time.Millisecond, // 5ms timeout
|
||||
|
||||
// Output Streaming Constants
|
||||
OutputStreamingFrameIntervalMS: 20, // 20ms frame interval (50 FPS)
|
||||
|
|
@ -2421,7 +2504,11 @@ func DefaultAudioConfig() *AudioConfigConstants {
|
|||
MinFrameSize: 1, // 1 byte minimum frame size (allow small frames)
|
||||
FrameSizeTolerance: 512, // 512 bytes frame size tolerance
|
||||
|
||||
// Removed device health monitoring configuration - functionality not used
|
||||
// Device Health Monitoring Configuration
|
||||
HealthCheckIntervalMS: 5000, // 5000ms (5s) health check interval
|
||||
HealthRecoveryThreshold: 3, // 3 consecutive successes for recovery
|
||||
HealthLatencyThresholdMS: 100, // 100ms latency threshold for health
|
||||
HealthErrorRateLimit: 0.1, // 10% error rate limit for health
|
||||
|
||||
// Latency Histogram Bucket Configuration
|
||||
LatencyBucket10ms: 10 * time.Millisecond, // 10ms latency bucket
|
||||
|
|
@ -2438,9 +2525,6 @@ func DefaultAudioConfig() *AudioConfigConstants {
|
|||
|
||||
// Goroutine Monitoring Configuration
|
||||
GoroutineMonitorInterval: 30 * time.Second, // 30s monitoring interval
|
||||
|
||||
// Performance Configuration Flags - Production optimizations
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,514 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/jetkvm/kvm/internal/logging"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// DeviceHealthStatus represents the health status of an audio device
|
||||
type DeviceHealthStatus int
|
||||
|
||||
const (
|
||||
DeviceHealthUnknown DeviceHealthStatus = iota
|
||||
DeviceHealthHealthy
|
||||
DeviceHealthDegraded
|
||||
DeviceHealthFailing
|
||||
DeviceHealthCritical
|
||||
)
|
||||
|
||||
func (s DeviceHealthStatus) String() string {
|
||||
switch s {
|
||||
case DeviceHealthHealthy:
|
||||
return "healthy"
|
||||
case DeviceHealthDegraded:
|
||||
return "degraded"
|
||||
case DeviceHealthFailing:
|
||||
return "failing"
|
||||
case DeviceHealthCritical:
|
||||
return "critical"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// DeviceHealthMetrics tracks health-related metrics for audio devices
|
||||
type DeviceHealthMetrics struct {
|
||||
// Error tracking
|
||||
ConsecutiveErrors int64 `json:"consecutive_errors"`
|
||||
TotalErrors int64 `json:"total_errors"`
|
||||
LastErrorTime time.Time `json:"last_error_time"`
|
||||
ErrorRate float64 `json:"error_rate"` // errors per minute
|
||||
|
||||
// Performance metrics
|
||||
AverageLatency time.Duration `json:"average_latency"`
|
||||
MaxLatency time.Duration `json:"max_latency"`
|
||||
LatencySpikes int64 `json:"latency_spikes"`
|
||||
Underruns int64 `json:"underruns"`
|
||||
Overruns int64 `json:"overruns"`
|
||||
|
||||
// Device availability
|
||||
LastSuccessfulOp time.Time `json:"last_successful_op"`
|
||||
DeviceDisconnects int64 `json:"device_disconnects"`
|
||||
RecoveryAttempts int64 `json:"recovery_attempts"`
|
||||
SuccessfulRecoveries int64 `json:"successful_recoveries"`
|
||||
|
||||
// Health assessment
|
||||
CurrentStatus DeviceHealthStatus `json:"current_status"`
|
||||
StatusLastChanged time.Time `json:"status_last_changed"`
|
||||
HealthScore float64 `json:"health_score"` // 0.0 to 1.0
|
||||
}
|
||||
|
||||
// DeviceHealthMonitor monitors the health of audio devices and triggers recovery
|
||||
type DeviceHealthMonitor struct {
|
||||
// Atomic fields first for ARM32 alignment
|
||||
running int32
|
||||
monitoringEnabled int32
|
||||
|
||||
// Configuration
|
||||
checkInterval time.Duration
|
||||
recoveryThreshold int
|
||||
latencyThreshold time.Duration
|
||||
errorRateLimit float64 // max errors per minute
|
||||
|
||||
// State tracking
|
||||
captureMetrics *DeviceHealthMetrics
|
||||
playbackMetrics *DeviceHealthMetrics
|
||||
mutex sync.RWMutex
|
||||
|
||||
// Control channels
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
stopChan chan struct{}
|
||||
doneChan chan struct{}
|
||||
|
||||
// Recovery callbacks
|
||||
recoveryCallbacks map[string]func() error
|
||||
callbackMutex sync.RWMutex
|
||||
|
||||
// Logging
|
||||
logger zerolog.Logger
|
||||
config *AudioConfigConstants
|
||||
}
|
||||
|
||||
// NewDeviceHealthMonitor creates a new device health monitor
|
||||
func NewDeviceHealthMonitor() *DeviceHealthMonitor {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
config := GetConfig()
|
||||
|
||||
return &DeviceHealthMonitor{
|
||||
checkInterval: time.Duration(config.HealthCheckIntervalMS) * time.Millisecond,
|
||||
recoveryThreshold: config.HealthRecoveryThreshold,
|
||||
latencyThreshold: time.Duration(config.HealthLatencyThresholdMS) * time.Millisecond,
|
||||
errorRateLimit: config.HealthErrorRateLimit,
|
||||
captureMetrics: &DeviceHealthMetrics{
|
||||
CurrentStatus: DeviceHealthUnknown,
|
||||
HealthScore: 1.0,
|
||||
},
|
||||
playbackMetrics: &DeviceHealthMetrics{
|
||||
CurrentStatus: DeviceHealthUnknown,
|
||||
HealthScore: 1.0,
|
||||
},
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
stopChan: make(chan struct{}),
|
||||
doneChan: make(chan struct{}),
|
||||
recoveryCallbacks: make(map[string]func() error),
|
||||
logger: logging.GetDefaultLogger().With().Str("component", "device-health-monitor").Logger(),
|
||||
config: config,
|
||||
}
|
||||
}
|
||||
|
||||
// Start begins health monitoring
|
||||
func (dhm *DeviceHealthMonitor) Start() error {
|
||||
if !atomic.CompareAndSwapInt32(&dhm.running, 0, 1) {
|
||||
return fmt.Errorf("device health monitor already running")
|
||||
}
|
||||
|
||||
dhm.logger.Debug().Msg("device health monitor starting")
|
||||
atomic.StoreInt32(&dhm.monitoringEnabled, 1)
|
||||
|
||||
go dhm.monitoringLoop()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop stops health monitoring
|
||||
func (dhm *DeviceHealthMonitor) Stop() {
|
||||
if !atomic.CompareAndSwapInt32(&dhm.running, 1, 0) {
|
||||
return
|
||||
}
|
||||
|
||||
dhm.logger.Debug().Msg("device health monitor stopping")
|
||||
atomic.StoreInt32(&dhm.monitoringEnabled, 0)
|
||||
|
||||
close(dhm.stopChan)
|
||||
dhm.cancel()
|
||||
|
||||
// Wait for monitoring loop to finish
|
||||
select {
|
||||
case <-dhm.doneChan:
|
||||
dhm.logger.Debug().Msg("device health monitor stopped")
|
||||
case <-time.After(time.Duration(dhm.config.SupervisorTimeout)):
|
||||
dhm.logger.Warn().Msg("device health monitor stop timeout")
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterRecoveryCallback registers a recovery function for a specific component
|
||||
func (dhm *DeviceHealthMonitor) RegisterRecoveryCallback(component string, callback func() error) {
|
||||
dhm.callbackMutex.Lock()
|
||||
defer dhm.callbackMutex.Unlock()
|
||||
dhm.recoveryCallbacks[component] = callback
|
||||
dhm.logger.Debug().Str("component", component).Msg("registered recovery callback")
|
||||
}
|
||||
|
||||
// RecordError records an error for health tracking
|
||||
func (dhm *DeviceHealthMonitor) RecordError(deviceType string, err error) {
|
||||
if atomic.LoadInt32(&dhm.monitoringEnabled) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
dhm.mutex.Lock()
|
||||
defer dhm.mutex.Unlock()
|
||||
|
||||
var metrics *DeviceHealthMetrics
|
||||
switch deviceType {
|
||||
case "capture":
|
||||
metrics = dhm.captureMetrics
|
||||
case "playback":
|
||||
metrics = dhm.playbackMetrics
|
||||
default:
|
||||
dhm.logger.Warn().Str("device_type", deviceType).Msg("unknown device type for error recording")
|
||||
return
|
||||
}
|
||||
|
||||
atomic.AddInt64(&metrics.ConsecutiveErrors, 1)
|
||||
atomic.AddInt64(&metrics.TotalErrors, 1)
|
||||
metrics.LastErrorTime = time.Now()
|
||||
|
||||
// Update error rate (errors per minute)
|
||||
if !metrics.LastErrorTime.IsZero() {
|
||||
timeSinceFirst := time.Since(metrics.LastErrorTime)
|
||||
if timeSinceFirst > 0 {
|
||||
metrics.ErrorRate = float64(metrics.TotalErrors) / timeSinceFirst.Minutes()
|
||||
}
|
||||
}
|
||||
|
||||
dhm.logger.Debug().
|
||||
Str("device_type", deviceType).
|
||||
Err(err).
|
||||
Int64("consecutive_errors", metrics.ConsecutiveErrors).
|
||||
Float64("error_rate", metrics.ErrorRate).
|
||||
Msg("recorded device error")
|
||||
|
||||
// Trigger immediate health assessment
|
||||
dhm.assessDeviceHealth(deviceType, metrics)
|
||||
}
|
||||
|
||||
// RecordSuccess records a successful operation
|
||||
func (dhm *DeviceHealthMonitor) RecordSuccess(deviceType string) {
|
||||
if atomic.LoadInt32(&dhm.monitoringEnabled) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
dhm.mutex.Lock()
|
||||
defer dhm.mutex.Unlock()
|
||||
|
||||
var metrics *DeviceHealthMetrics
|
||||
switch deviceType {
|
||||
case "capture":
|
||||
metrics = dhm.captureMetrics
|
||||
case "playback":
|
||||
metrics = dhm.playbackMetrics
|
||||
default:
|
||||
return
|
||||
}
|
||||
|
||||
// Reset consecutive errors on success
|
||||
atomic.StoreInt64(&metrics.ConsecutiveErrors, 0)
|
||||
metrics.LastSuccessfulOp = time.Now()
|
||||
|
||||
// Improve health score gradually
|
||||
if metrics.HealthScore < 1.0 {
|
||||
metrics.HealthScore = min(1.0, metrics.HealthScore+0.1)
|
||||
}
|
||||
}
|
||||
|
||||
// RecordLatency records operation latency for health assessment
|
||||
func (dhm *DeviceHealthMonitor) RecordLatency(deviceType string, latency time.Duration) {
|
||||
if atomic.LoadInt32(&dhm.monitoringEnabled) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
dhm.mutex.Lock()
|
||||
defer dhm.mutex.Unlock()
|
||||
|
||||
var metrics *DeviceHealthMetrics
|
||||
switch deviceType {
|
||||
case "capture":
|
||||
metrics = dhm.captureMetrics
|
||||
case "playback":
|
||||
metrics = dhm.playbackMetrics
|
||||
default:
|
||||
return
|
||||
}
|
||||
|
||||
// Update latency metrics
|
||||
if metrics.AverageLatency == 0 {
|
||||
metrics.AverageLatency = latency
|
||||
} else {
|
||||
// Exponential moving average
|
||||
metrics.AverageLatency = time.Duration(float64(metrics.AverageLatency)*0.9 + float64(latency)*0.1)
|
||||
}
|
||||
|
||||
if latency > metrics.MaxLatency {
|
||||
metrics.MaxLatency = latency
|
||||
}
|
||||
|
||||
// Track latency spikes
|
||||
if latency > dhm.latencyThreshold {
|
||||
atomic.AddInt64(&metrics.LatencySpikes, 1)
|
||||
}
|
||||
}
|
||||
|
||||
// RecordUnderrun records an audio underrun event
|
||||
func (dhm *DeviceHealthMonitor) RecordUnderrun(deviceType string) {
|
||||
if atomic.LoadInt32(&dhm.monitoringEnabled) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
dhm.mutex.Lock()
|
||||
defer dhm.mutex.Unlock()
|
||||
|
||||
var metrics *DeviceHealthMetrics
|
||||
switch deviceType {
|
||||
case "capture":
|
||||
metrics = dhm.captureMetrics
|
||||
case "playback":
|
||||
metrics = dhm.playbackMetrics
|
||||
default:
|
||||
return
|
||||
}
|
||||
|
||||
atomic.AddInt64(&metrics.Underruns, 1)
|
||||
dhm.logger.Debug().Str("device_type", deviceType).Msg("recorded audio underrun")
|
||||
}
|
||||
|
||||
// RecordOverrun records an audio overrun event
|
||||
func (dhm *DeviceHealthMonitor) RecordOverrun(deviceType string) {
|
||||
if atomic.LoadInt32(&dhm.monitoringEnabled) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
dhm.mutex.Lock()
|
||||
defer dhm.mutex.Unlock()
|
||||
|
||||
var metrics *DeviceHealthMetrics
|
||||
switch deviceType {
|
||||
case "capture":
|
||||
metrics = dhm.captureMetrics
|
||||
case "playback":
|
||||
metrics = dhm.playbackMetrics
|
||||
default:
|
||||
return
|
||||
}
|
||||
|
||||
atomic.AddInt64(&metrics.Overruns, 1)
|
||||
dhm.logger.Debug().Str("device_type", deviceType).Msg("recorded audio overrun")
|
||||
}
|
||||
|
||||
// GetHealthMetrics returns current health metrics
|
||||
func (dhm *DeviceHealthMonitor) GetHealthMetrics() (capture, playback DeviceHealthMetrics) {
|
||||
dhm.mutex.RLock()
|
||||
defer dhm.mutex.RUnlock()
|
||||
return *dhm.captureMetrics, *dhm.playbackMetrics
|
||||
}
|
||||
|
||||
// monitoringLoop runs the main health monitoring loop
|
||||
func (dhm *DeviceHealthMonitor) monitoringLoop() {
|
||||
defer close(dhm.doneChan)
|
||||
|
||||
ticker := time.NewTicker(dhm.checkInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-dhm.stopChan:
|
||||
return
|
||||
case <-dhm.ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
dhm.performHealthCheck()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// performHealthCheck performs a comprehensive health check
|
||||
func (dhm *DeviceHealthMonitor) performHealthCheck() {
|
||||
dhm.mutex.Lock()
|
||||
defer dhm.mutex.Unlock()
|
||||
|
||||
// Assess health for both devices
|
||||
dhm.assessDeviceHealth("capture", dhm.captureMetrics)
|
||||
dhm.assessDeviceHealth("playback", dhm.playbackMetrics)
|
||||
|
||||
// Check if recovery is needed
|
||||
dhm.checkRecoveryNeeded("capture", dhm.captureMetrics)
|
||||
dhm.checkRecoveryNeeded("playback", dhm.playbackMetrics)
|
||||
}
|
||||
|
||||
// assessDeviceHealth assesses the health status of a device
|
||||
func (dhm *DeviceHealthMonitor) assessDeviceHealth(deviceType string, metrics *DeviceHealthMetrics) {
|
||||
previousStatus := metrics.CurrentStatus
|
||||
newStatus := dhm.calculateHealthStatus(metrics)
|
||||
|
||||
if newStatus != previousStatus {
|
||||
metrics.CurrentStatus = newStatus
|
||||
metrics.StatusLastChanged = time.Now()
|
||||
dhm.logger.Info().
|
||||
Str("device_type", deviceType).
|
||||
Str("previous_status", previousStatus.String()).
|
||||
Str("new_status", newStatus.String()).
|
||||
Float64("health_score", metrics.HealthScore).
|
||||
Msg("device health status changed")
|
||||
}
|
||||
|
||||
// Update health score
|
||||
metrics.HealthScore = dhm.calculateHealthScore(metrics)
|
||||
}
|
||||
|
||||
// calculateHealthStatus determines health status based on metrics
|
||||
func (dhm *DeviceHealthMonitor) calculateHealthStatus(metrics *DeviceHealthMetrics) DeviceHealthStatus {
|
||||
consecutiveErrors := atomic.LoadInt64(&metrics.ConsecutiveErrors)
|
||||
totalErrors := atomic.LoadInt64(&metrics.TotalErrors)
|
||||
|
||||
// Critical: Too many consecutive errors or device disconnected recently
|
||||
if consecutiveErrors >= int64(dhm.recoveryThreshold) {
|
||||
return DeviceHealthCritical
|
||||
}
|
||||
|
||||
// Critical: No successful operations in a long time
|
||||
if !metrics.LastSuccessfulOp.IsZero() && time.Since(metrics.LastSuccessfulOp) > time.Duration(dhm.config.SupervisorTimeout) {
|
||||
return DeviceHealthCritical
|
||||
}
|
||||
|
||||
// Failing: High error rate or frequent latency spikes
|
||||
if metrics.ErrorRate > dhm.errorRateLimit || atomic.LoadInt64(&metrics.LatencySpikes) > int64(dhm.config.MaxDroppedFrames) {
|
||||
return DeviceHealthFailing
|
||||
}
|
||||
|
||||
// Degraded: Some errors or performance issues
|
||||
if consecutiveErrors > 0 || totalErrors > int64(dhm.config.MaxDroppedFrames/2) || metrics.AverageLatency > dhm.latencyThreshold {
|
||||
return DeviceHealthDegraded
|
||||
}
|
||||
|
||||
// Healthy: No significant issues
|
||||
return DeviceHealthHealthy
|
||||
}
|
||||
|
||||
// calculateHealthScore calculates a numeric health score (0.0 to 1.0)
|
||||
func (dhm *DeviceHealthMonitor) calculateHealthScore(metrics *DeviceHealthMetrics) float64 {
|
||||
score := 1.0
|
||||
|
||||
// Penalize consecutive errors
|
||||
consecutiveErrors := atomic.LoadInt64(&metrics.ConsecutiveErrors)
|
||||
if consecutiveErrors > 0 {
|
||||
score -= float64(consecutiveErrors) * 0.1
|
||||
}
|
||||
|
||||
// Penalize high error rate
|
||||
if metrics.ErrorRate > 0 {
|
||||
score -= min(0.5, metrics.ErrorRate/dhm.errorRateLimit*0.5)
|
||||
}
|
||||
|
||||
// Penalize high latency
|
||||
if metrics.AverageLatency > dhm.latencyThreshold {
|
||||
excess := float64(metrics.AverageLatency-dhm.latencyThreshold) / float64(dhm.latencyThreshold)
|
||||
score -= min(0.3, excess*0.3)
|
||||
}
|
||||
|
||||
// Penalize underruns/overruns
|
||||
underruns := atomic.LoadInt64(&metrics.Underruns)
|
||||
overruns := atomic.LoadInt64(&metrics.Overruns)
|
||||
if underruns+overruns > 0 {
|
||||
score -= min(0.2, float64(underruns+overruns)*0.01)
|
||||
}
|
||||
|
||||
return max(0.0, score)
|
||||
}
|
||||
|
||||
// checkRecoveryNeeded checks if recovery is needed and triggers it
|
||||
func (dhm *DeviceHealthMonitor) checkRecoveryNeeded(deviceType string, metrics *DeviceHealthMetrics) {
|
||||
if metrics.CurrentStatus == DeviceHealthCritical {
|
||||
dhm.triggerRecovery(deviceType, metrics)
|
||||
}
|
||||
}
|
||||
|
||||
// triggerRecovery triggers recovery for a device
|
||||
func (dhm *DeviceHealthMonitor) triggerRecovery(deviceType string, metrics *DeviceHealthMetrics) {
|
||||
atomic.AddInt64(&metrics.RecoveryAttempts, 1)
|
||||
|
||||
dhm.logger.Warn().
|
||||
Str("device_type", deviceType).
|
||||
Str("status", metrics.CurrentStatus.String()).
|
||||
Int64("consecutive_errors", atomic.LoadInt64(&metrics.ConsecutiveErrors)).
|
||||
Float64("error_rate", metrics.ErrorRate).
|
||||
Msg("triggering device recovery")
|
||||
|
||||
// Try registered recovery callbacks
|
||||
dhm.callbackMutex.RLock()
|
||||
defer dhm.callbackMutex.RUnlock()
|
||||
|
||||
for component, callback := range dhm.recoveryCallbacks {
|
||||
if callback != nil {
|
||||
go func(comp string, cb func() error) {
|
||||
if err := cb(); err != nil {
|
||||
dhm.logger.Error().
|
||||
Str("component", comp).
|
||||
Str("device_type", deviceType).
|
||||
Err(err).
|
||||
Msg("recovery callback failed")
|
||||
} else {
|
||||
atomic.AddInt64(&metrics.SuccessfulRecoveries, 1)
|
||||
dhm.logger.Info().
|
||||
Str("component", comp).
|
||||
Str("device_type", deviceType).
|
||||
Msg("recovery callback succeeded")
|
||||
}
|
||||
}(component, callback)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Global device health monitor instance
|
||||
var (
|
||||
globalDeviceHealthMonitor *DeviceHealthMonitor
|
||||
deviceHealthOnce sync.Once
|
||||
)
|
||||
|
||||
// GetDeviceHealthMonitor returns the global device health monitor
|
||||
func GetDeviceHealthMonitor() *DeviceHealthMonitor {
|
||||
deviceHealthOnce.Do(func() {
|
||||
globalDeviceHealthMonitor = NewDeviceHealthMonitor()
|
||||
})
|
||||
return globalDeviceHealthMonitor
|
||||
}
|
||||
|
||||
// Helper functions for min/max
|
||||
func min(a, b float64) float64 {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func max(a, b float64) float64 {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
|
@ -2,6 +2,7 @@ package audio
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
|
@ -16,9 +17,13 @@ import (
|
|||
type AudioEventType string
|
||||
|
||||
const (
|
||||
AudioEventMuteChanged AudioEventType = "audio-mute-changed"
|
||||
AudioEventMicrophoneState AudioEventType = "microphone-state-changed"
|
||||
AudioEventDeviceChanged AudioEventType = "audio-device-changed"
|
||||
AudioEventMuteChanged AudioEventType = "audio-mute-changed"
|
||||
AudioEventMetricsUpdate AudioEventType = "audio-metrics-update"
|
||||
AudioEventMicrophoneState AudioEventType = "microphone-state-changed"
|
||||
AudioEventMicrophoneMetrics AudioEventType = "microphone-metrics-update"
|
||||
AudioEventProcessMetrics AudioEventType = "audio-process-metrics"
|
||||
AudioEventMicProcessMetrics AudioEventType = "microphone-process-metrics"
|
||||
AudioEventDeviceChanged AudioEventType = "audio-device-changed"
|
||||
)
|
||||
|
||||
// AudioEvent represents a WebSocket audio event
|
||||
|
|
@ -32,12 +37,43 @@ type AudioMuteData struct {
|
|||
Muted bool `json:"muted"`
|
||||
}
|
||||
|
||||
// AudioMetricsData represents audio metrics data
|
||||
type AudioMetricsData struct {
|
||||
FramesReceived int64 `json:"frames_received"`
|
||||
FramesDropped int64 `json:"frames_dropped"`
|
||||
BytesProcessed int64 `json:"bytes_processed"`
|
||||
LastFrameTime string `json:"last_frame_time"`
|
||||
ConnectionDrops int64 `json:"connection_drops"`
|
||||
AverageLatency string `json:"average_latency"`
|
||||
}
|
||||
|
||||
// MicrophoneStateData represents microphone state data
|
||||
type MicrophoneStateData struct {
|
||||
Running bool `json:"running"`
|
||||
SessionActive bool `json:"session_active"`
|
||||
}
|
||||
|
||||
// MicrophoneMetricsData represents microphone metrics data
|
||||
type MicrophoneMetricsData struct {
|
||||
FramesSent int64 `json:"frames_sent"`
|
||||
FramesDropped int64 `json:"frames_dropped"`
|
||||
BytesProcessed int64 `json:"bytes_processed"`
|
||||
LastFrameTime string `json:"last_frame_time"`
|
||||
ConnectionDrops int64 `json:"connection_drops"`
|
||||
AverageLatency string `json:"average_latency"`
|
||||
}
|
||||
|
||||
// ProcessMetricsData represents process metrics data for WebSocket events
|
||||
type ProcessMetricsData struct {
|
||||
PID int `json:"pid"`
|
||||
CPUPercent float64 `json:"cpu_percent"`
|
||||
MemoryRSS int64 `json:"memory_rss"`
|
||||
MemoryVMS int64 `json:"memory_vms"`
|
||||
MemoryPercent float64 `json:"memory_percent"`
|
||||
Running bool `json:"running"`
|
||||
ProcessName string `json:"process_name"`
|
||||
}
|
||||
|
||||
// AudioDeviceChangedData represents audio device configuration change data
|
||||
type AudioDeviceChangedData struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
|
|
@ -70,6 +106,12 @@ func initializeBroadcaster() {
|
|||
subscribers: make(map[string]*AudioEventSubscriber),
|
||||
logger: &l,
|
||||
}
|
||||
|
||||
// Start metrics broadcasting goroutine
|
||||
go audioEventBroadcaster.startMetricsBroadcasting()
|
||||
|
||||
// Start granular metrics logging with same interval as metrics broadcasting
|
||||
// StartGranularMetricsLogging(GetMetricsUpdateInterval()) // Disabled to reduce log pollution
|
||||
}
|
||||
|
||||
// InitializeAudioEventBroadcaster initializes the global audio event broadcaster
|
||||
|
|
@ -176,6 +218,90 @@ func (aeb *AudioEventBroadcaster) sendInitialState(connectionID string) {
|
|||
},
|
||||
}
|
||||
aeb.sendToSubscriber(subscriber, micStateEvent)
|
||||
|
||||
// Send current metrics
|
||||
aeb.sendCurrentMetrics(subscriber)
|
||||
}
|
||||
|
||||
// convertAudioMetricsToEventDataWithLatencyMs converts internal audio metrics to AudioMetricsData with millisecond latency formatting
|
||||
func convertAudioMetricsToEventDataWithLatencyMs(metrics AudioMetrics) AudioMetricsData {
|
||||
return AudioMetricsData{
|
||||
FramesReceived: metrics.FramesReceived,
|
||||
FramesDropped: metrics.FramesDropped,
|
||||
BytesProcessed: metrics.BytesProcessed,
|
||||
LastFrameTime: metrics.LastFrameTime.Format(GetConfig().EventTimeFormatString),
|
||||
ConnectionDrops: metrics.ConnectionDrops,
|
||||
AverageLatency: fmt.Sprintf("%.1fms", float64(metrics.AverageLatency.Nanoseconds())/1e6),
|
||||
}
|
||||
}
|
||||
|
||||
// convertAudioInputMetricsToEventDataWithLatencyMs converts internal audio input metrics to MicrophoneMetricsData with millisecond latency formatting
|
||||
func convertAudioInputMetricsToEventDataWithLatencyMs(metrics AudioInputMetrics) MicrophoneMetricsData {
|
||||
return MicrophoneMetricsData{
|
||||
FramesSent: metrics.FramesSent,
|
||||
FramesDropped: metrics.FramesDropped,
|
||||
BytesProcessed: metrics.BytesProcessed,
|
||||
LastFrameTime: metrics.LastFrameTime.Format(GetConfig().EventTimeFormatString),
|
||||
ConnectionDrops: metrics.ConnectionDrops,
|
||||
AverageLatency: fmt.Sprintf("%.1fms", float64(metrics.AverageLatency.Nanoseconds())/1e6),
|
||||
}
|
||||
}
|
||||
|
||||
// convertProcessMetricsToEventData converts internal process metrics to ProcessMetricsData for events
|
||||
func convertProcessMetricsToEventData(metrics ProcessMetrics, running bool) ProcessMetricsData {
|
||||
return ProcessMetricsData{
|
||||
PID: metrics.PID,
|
||||
CPUPercent: metrics.CPUPercent,
|
||||
MemoryRSS: metrics.MemoryRSS,
|
||||
MemoryVMS: metrics.MemoryVMS,
|
||||
MemoryPercent: metrics.MemoryPercent,
|
||||
Running: running,
|
||||
ProcessName: metrics.ProcessName,
|
||||
}
|
||||
}
|
||||
|
||||
// createProcessMetricsData creates ProcessMetricsData from ProcessMetrics with running status
|
||||
func createProcessMetricsData(metrics *ProcessMetrics, running bool, processName string) ProcessMetricsData {
|
||||
if metrics == nil {
|
||||
return ProcessMetricsData{
|
||||
PID: 0,
|
||||
CPUPercent: 0.0,
|
||||
MemoryRSS: 0,
|
||||
MemoryVMS: 0,
|
||||
MemoryPercent: 0.0,
|
||||
Running: false,
|
||||
ProcessName: processName,
|
||||
}
|
||||
}
|
||||
return ProcessMetricsData{
|
||||
PID: metrics.PID,
|
||||
CPUPercent: metrics.CPUPercent,
|
||||
MemoryRSS: metrics.MemoryRSS,
|
||||
MemoryVMS: metrics.MemoryVMS,
|
||||
MemoryPercent: metrics.MemoryPercent,
|
||||
Running: running,
|
||||
ProcessName: metrics.ProcessName,
|
||||
}
|
||||
}
|
||||
|
||||
// getInactiveProcessMetrics returns ProcessMetricsData for an inactive audio input process
|
||||
func getInactiveProcessMetrics() ProcessMetricsData {
|
||||
return createProcessMetricsData(nil, false, "audio-input-server")
|
||||
}
|
||||
|
||||
// getActiveAudioInputSupervisor safely retrieves the audio input supervisor if session is active
|
||||
func getActiveAudioInputSupervisor() *AudioInputSupervisor {
|
||||
sessionProvider := GetSessionProvider()
|
||||
if !sessionProvider.IsSessionActive() {
|
||||
return nil
|
||||
}
|
||||
|
||||
inputManager := sessionProvider.GetAudioInputManager()
|
||||
if inputManager == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return inputManager.GetSupervisor()
|
||||
}
|
||||
|
||||
// createAudioEvent creates an AudioEvent
|
||||
|
|
@ -186,6 +312,122 @@ func createAudioEvent(eventType AudioEventType, data interface{}) AudioEvent {
|
|||
}
|
||||
}
|
||||
|
||||
func (aeb *AudioEventBroadcaster) getMicrophoneProcessMetrics() ProcessMetricsData {
|
||||
inputSupervisor := getActiveAudioInputSupervisor()
|
||||
if inputSupervisor == nil {
|
||||
return getInactiveProcessMetrics()
|
||||
}
|
||||
|
||||
processMetrics := inputSupervisor.GetProcessMetrics()
|
||||
if processMetrics == nil {
|
||||
return getInactiveProcessMetrics()
|
||||
}
|
||||
|
||||
// If process is running but CPU is 0%, it means we're waiting for the second sample
|
||||
// to calculate CPU percentage. Return metrics with correct running status.
|
||||
if inputSupervisor.IsRunning() && processMetrics.CPUPercent == 0.0 {
|
||||
return createProcessMetricsData(processMetrics, true, processMetrics.ProcessName)
|
||||
}
|
||||
|
||||
// Subprocess is running, return actual metrics
|
||||
return createProcessMetricsData(processMetrics, inputSupervisor.IsRunning(), processMetrics.ProcessName)
|
||||
}
|
||||
|
||||
// sendCurrentMetrics sends current audio and microphone metrics to a subscriber
|
||||
func (aeb *AudioEventBroadcaster) sendCurrentMetrics(subscriber *AudioEventSubscriber) {
|
||||
// Send audio metrics
|
||||
audioMetrics := GetAudioMetrics()
|
||||
audioMetricsEvent := createAudioEvent(AudioEventMetricsUpdate, convertAudioMetricsToEventDataWithLatencyMs(audioMetrics))
|
||||
aeb.sendToSubscriber(subscriber, audioMetricsEvent)
|
||||
|
||||
// Send audio process metrics
|
||||
if outputSupervisor := GetAudioOutputSupervisor(); outputSupervisor != nil {
|
||||
if processMetrics := outputSupervisor.GetProcessMetrics(); processMetrics != nil {
|
||||
audioProcessEvent := createAudioEvent(AudioEventProcessMetrics, convertProcessMetricsToEventData(*processMetrics, outputSupervisor.IsRunning()))
|
||||
aeb.sendToSubscriber(subscriber, audioProcessEvent)
|
||||
}
|
||||
}
|
||||
|
||||
// Send microphone metrics using session provider
|
||||
sessionProvider := GetSessionProvider()
|
||||
if sessionProvider.IsSessionActive() {
|
||||
if inputManager := sessionProvider.GetAudioInputManager(); inputManager != nil {
|
||||
micMetrics := inputManager.GetMetrics()
|
||||
micMetricsEvent := createAudioEvent(AudioEventMicrophoneMetrics, convertAudioInputMetricsToEventDataWithLatencyMs(micMetrics))
|
||||
aeb.sendToSubscriber(subscriber, micMetricsEvent)
|
||||
}
|
||||
}
|
||||
|
||||
// Send microphone process metrics (always send, even when subprocess is not running)
|
||||
micProcessEvent := createAudioEvent(AudioEventMicProcessMetrics, aeb.getMicrophoneProcessMetrics())
|
||||
aeb.sendToSubscriber(subscriber, micProcessEvent)
|
||||
}
|
||||
|
||||
// startMetricsBroadcasting starts a goroutine that periodically broadcasts metrics
|
||||
func (aeb *AudioEventBroadcaster) startMetricsBroadcasting() {
|
||||
// Use centralized interval to match process monitor frequency for synchronized metrics
|
||||
ticker := time.NewTicker(GetMetricsUpdateInterval())
|
||||
defer ticker.Stop()
|
||||
|
||||
for range ticker.C {
|
||||
aeb.mutex.RLock()
|
||||
subscriberCount := len(aeb.subscribers)
|
||||
|
||||
// Early exit if no subscribers to save CPU
|
||||
if subscriberCount == 0 {
|
||||
aeb.mutex.RUnlock()
|
||||
continue
|
||||
}
|
||||
|
||||
// Create a copy for safe iteration
|
||||
subscribersCopy := make([]*AudioEventSubscriber, 0, subscriberCount)
|
||||
for _, sub := range aeb.subscribers {
|
||||
subscribersCopy = append(subscribersCopy, sub)
|
||||
}
|
||||
aeb.mutex.RUnlock()
|
||||
|
||||
// Pre-check for cancelled contexts to avoid unnecessary work
|
||||
activeSubscribers := 0
|
||||
for _, sub := range subscribersCopy {
|
||||
if sub.ctx.Err() == nil {
|
||||
activeSubscribers++
|
||||
}
|
||||
}
|
||||
|
||||
// Skip metrics gathering if no active subscribers
|
||||
if activeSubscribers == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Broadcast audio metrics
|
||||
audioMetrics := GetAudioMetrics()
|
||||
audioMetricsEvent := createAudioEvent(AudioEventMetricsUpdate, convertAudioMetricsToEventDataWithLatencyMs(audioMetrics))
|
||||
aeb.broadcast(audioMetricsEvent)
|
||||
|
||||
// Broadcast microphone metrics if available using session provider
|
||||
sessionProvider := GetSessionProvider()
|
||||
if sessionProvider.IsSessionActive() {
|
||||
if inputManager := sessionProvider.GetAudioInputManager(); inputManager != nil {
|
||||
micMetrics := inputManager.GetMetrics()
|
||||
micMetricsEvent := createAudioEvent(AudioEventMicrophoneMetrics, convertAudioInputMetricsToEventDataWithLatencyMs(micMetrics))
|
||||
aeb.broadcast(micMetricsEvent)
|
||||
}
|
||||
}
|
||||
|
||||
// Broadcast audio process metrics
|
||||
if outputSupervisor := GetAudioOutputSupervisor(); outputSupervisor != nil {
|
||||
if processMetrics := outputSupervisor.GetProcessMetrics(); processMetrics != nil {
|
||||
audioProcessEvent := createAudioEvent(AudioEventProcessMetrics, convertProcessMetricsToEventData(*processMetrics, outputSupervisor.IsRunning()))
|
||||
aeb.broadcast(audioProcessEvent)
|
||||
}
|
||||
}
|
||||
|
||||
// Broadcast microphone process metrics (always broadcast, even when subprocess is not running)
|
||||
micProcessEvent := createAudioEvent(AudioEventMicProcessMetrics, aeb.getMicrophoneProcessMetrics())
|
||||
aeb.broadcast(micProcessEvent)
|
||||
}
|
||||
}
|
||||
|
||||
// broadcast sends an event to all subscribers
|
||||
func (aeb *AudioEventBroadcaster) broadcast(event AudioEvent) {
|
||||
aeb.mutex.RLock()
|
||||
|
|
|
|||
|
|
@ -133,7 +133,8 @@ func GetGoroutineMonitor() *GoroutineMonitor {
|
|||
|
||||
// StartGoroutineMonitoring starts the global goroutine monitor
|
||||
func StartGoroutineMonitoring() {
|
||||
// Goroutine monitoring disabled
|
||||
monitor := GetGoroutineMonitor()
|
||||
monitor.Start()
|
||||
}
|
||||
|
||||
// StopGoroutineMonitoring stops the global goroutine monitor
|
||||
|
|
|
|||
|
|
@ -0,0 +1,263 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/jetkvm/kvm/internal/logging"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// LatencyPercentiles holds calculated percentile values
|
||||
type LatencyPercentiles struct {
|
||||
P50 time.Duration `json:"p50"`
|
||||
P95 time.Duration `json:"p95"`
|
||||
P99 time.Duration `json:"p99"`
|
||||
Min time.Duration `json:"min"`
|
||||
Max time.Duration `json:"max"`
|
||||
Avg time.Duration `json:"avg"`
|
||||
}
|
||||
|
||||
// BufferPoolEfficiencyMetrics tracks detailed buffer pool performance
|
||||
type BufferPoolEfficiencyMetrics struct {
|
||||
// Pool utilization metrics
|
||||
HitRate float64 `json:"hit_rate"`
|
||||
MissRate float64 `json:"miss_rate"`
|
||||
UtilizationRate float64 `json:"utilization_rate"`
|
||||
FragmentationRate float64 `json:"fragmentation_rate"`
|
||||
|
||||
// Memory efficiency metrics
|
||||
MemoryEfficiency float64 `json:"memory_efficiency"`
|
||||
AllocationOverhead float64 `json:"allocation_overhead"`
|
||||
ReuseEffectiveness float64 `json:"reuse_effectiveness"`
|
||||
|
||||
// Performance metrics
|
||||
AverageGetLatency time.Duration `json:"average_get_latency"`
|
||||
AveragePutLatency time.Duration `json:"average_put_latency"`
|
||||
Throughput float64 `json:"throughput"` // Operations per second
|
||||
}
|
||||
|
||||
// GranularMetricsCollector aggregates all granular metrics
|
||||
type GranularMetricsCollector struct {
|
||||
// Buffer pool efficiency tracking
|
||||
framePoolMetrics *BufferPoolEfficiencyTracker
|
||||
controlPoolMetrics *BufferPoolEfficiencyTracker
|
||||
zeroCopyMetrics *BufferPoolEfficiencyTracker
|
||||
|
||||
mutex sync.RWMutex
|
||||
logger zerolog.Logger
|
||||
}
|
||||
|
||||
// BufferPoolEfficiencyTracker tracks detailed efficiency metrics for a buffer pool
|
||||
type BufferPoolEfficiencyTracker struct {
|
||||
// Atomic counters
|
||||
getOperations int64 // Total get operations (atomic)
|
||||
putOperations int64 // Total put operations (atomic)
|
||||
getLatencySum int64 // Sum of get latencies in nanoseconds (atomic)
|
||||
putLatencySum int64 // Sum of put latencies in nanoseconds (atomic)
|
||||
allocationBytes int64 // Total bytes allocated (atomic)
|
||||
reuseCount int64 // Number of successful reuses (atomic)
|
||||
|
||||
// Recent operation times for throughput calculation
|
||||
recentOps []time.Time
|
||||
opsMutex sync.RWMutex
|
||||
|
||||
poolName string
|
||||
logger zerolog.Logger
|
||||
}
|
||||
|
||||
// NewBufferPoolEfficiencyTracker creates a new efficiency tracker
|
||||
func NewBufferPoolEfficiencyTracker(poolName string, logger zerolog.Logger) *BufferPoolEfficiencyTracker {
|
||||
return &BufferPoolEfficiencyTracker{
|
||||
recentOps: make([]time.Time, 0, 1000), // Track last 1000 operations
|
||||
poolName: poolName,
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// RecordGetOperation records a buffer get operation with its latency
|
||||
func (bpet *BufferPoolEfficiencyTracker) RecordGetOperation(latency time.Duration, wasHit bool) {
|
||||
atomic.AddInt64(&bpet.getOperations, 1)
|
||||
atomic.AddInt64(&bpet.getLatencySum, latency.Nanoseconds())
|
||||
|
||||
if wasHit {
|
||||
atomic.AddInt64(&bpet.reuseCount, 1)
|
||||
}
|
||||
|
||||
// Record operation time for throughput calculation
|
||||
bpet.opsMutex.Lock()
|
||||
now := time.Now()
|
||||
if len(bpet.recentOps) >= 1000 {
|
||||
bpet.recentOps = bpet.recentOps[1:]
|
||||
}
|
||||
bpet.recentOps = append(bpet.recentOps, now)
|
||||
bpet.opsMutex.Unlock()
|
||||
}
|
||||
|
||||
// RecordPutOperation records a buffer put operation with its latency
|
||||
func (bpet *BufferPoolEfficiencyTracker) RecordPutOperation(latency time.Duration, bufferSize int) {
|
||||
atomic.AddInt64(&bpet.putOperations, 1)
|
||||
atomic.AddInt64(&bpet.putLatencySum, latency.Nanoseconds())
|
||||
atomic.AddInt64(&bpet.allocationBytes, int64(bufferSize))
|
||||
}
|
||||
|
||||
// GetEfficiencyMetrics calculates current efficiency metrics
|
||||
func (bpet *BufferPoolEfficiencyTracker) GetEfficiencyMetrics() BufferPoolEfficiencyMetrics {
|
||||
getOps := atomic.LoadInt64(&bpet.getOperations)
|
||||
putOps := atomic.LoadInt64(&bpet.putOperations)
|
||||
reuseCount := atomic.LoadInt64(&bpet.reuseCount)
|
||||
getLatencySum := atomic.LoadInt64(&bpet.getLatencySum)
|
||||
putLatencySum := atomic.LoadInt64(&bpet.putLatencySum)
|
||||
allocationBytes := atomic.LoadInt64(&bpet.allocationBytes)
|
||||
|
||||
var hitRate, missRate, avgGetLatency, avgPutLatency float64
|
||||
var throughput float64
|
||||
|
||||
if getOps > 0 {
|
||||
hitRate = float64(reuseCount) / float64(getOps) * 100
|
||||
missRate = 100 - hitRate
|
||||
avgGetLatency = float64(getLatencySum) / float64(getOps)
|
||||
}
|
||||
|
||||
if putOps > 0 {
|
||||
avgPutLatency = float64(putLatencySum) / float64(putOps)
|
||||
}
|
||||
|
||||
// Calculate throughput from recent operations
|
||||
bpet.opsMutex.RLock()
|
||||
if len(bpet.recentOps) > 1 {
|
||||
timeSpan := bpet.recentOps[len(bpet.recentOps)-1].Sub(bpet.recentOps[0])
|
||||
if timeSpan > 0 {
|
||||
throughput = float64(len(bpet.recentOps)) / timeSpan.Seconds()
|
||||
}
|
||||
}
|
||||
bpet.opsMutex.RUnlock()
|
||||
|
||||
// Calculate efficiency metrics
|
||||
utilizationRate := hitRate // Simplified: hit rate as utilization
|
||||
memoryEfficiency := hitRate // Simplified: reuse rate as memory efficiency
|
||||
reuseEffectiveness := hitRate
|
||||
|
||||
// Calculate fragmentation (simplified as inverse of hit rate)
|
||||
fragmentationRate := missRate
|
||||
|
||||
// Calculate allocation overhead (simplified)
|
||||
allocationOverhead := float64(0)
|
||||
if getOps > 0 && allocationBytes > 0 {
|
||||
allocationOverhead = float64(allocationBytes) / float64(getOps)
|
||||
}
|
||||
|
||||
return BufferPoolEfficiencyMetrics{
|
||||
HitRate: hitRate,
|
||||
MissRate: missRate,
|
||||
UtilizationRate: utilizationRate,
|
||||
FragmentationRate: fragmentationRate,
|
||||
MemoryEfficiency: memoryEfficiency,
|
||||
AllocationOverhead: allocationOverhead,
|
||||
ReuseEffectiveness: reuseEffectiveness,
|
||||
AverageGetLatency: time.Duration(avgGetLatency),
|
||||
AveragePutLatency: time.Duration(avgPutLatency),
|
||||
Throughput: throughput,
|
||||
}
|
||||
}
|
||||
|
||||
// NewGranularMetricsCollector creates a new granular metrics collector
|
||||
func NewGranularMetricsCollector(logger zerolog.Logger) *GranularMetricsCollector {
|
||||
return &GranularMetricsCollector{
|
||||
framePoolMetrics: NewBufferPoolEfficiencyTracker("frame_pool", logger.With().Str("pool", "frame").Logger()),
|
||||
controlPoolMetrics: NewBufferPoolEfficiencyTracker("control_pool", logger.With().Str("pool", "control").Logger()),
|
||||
zeroCopyMetrics: NewBufferPoolEfficiencyTracker("zero_copy_pool", logger.With().Str("pool", "zero_copy").Logger()),
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// RecordFramePoolOperation records frame pool operations
|
||||
func (gmc *GranularMetricsCollector) RecordFramePoolGet(latency time.Duration, wasHit bool) {
|
||||
gmc.framePoolMetrics.RecordGetOperation(latency, wasHit)
|
||||
}
|
||||
|
||||
func (gmc *GranularMetricsCollector) RecordFramePoolPut(latency time.Duration, bufferSize int) {
|
||||
gmc.framePoolMetrics.RecordPutOperation(latency, bufferSize)
|
||||
}
|
||||
|
||||
// RecordControlPoolOperation records control pool operations
|
||||
func (gmc *GranularMetricsCollector) RecordControlPoolGet(latency time.Duration, wasHit bool) {
|
||||
gmc.controlPoolMetrics.RecordGetOperation(latency, wasHit)
|
||||
}
|
||||
|
||||
func (gmc *GranularMetricsCollector) RecordControlPoolPut(latency time.Duration, bufferSize int) {
|
||||
gmc.controlPoolMetrics.RecordPutOperation(latency, bufferSize)
|
||||
}
|
||||
|
||||
// RecordZeroCopyOperation records zero-copy pool operations
|
||||
func (gmc *GranularMetricsCollector) RecordZeroCopyGet(latency time.Duration, wasHit bool) {
|
||||
gmc.zeroCopyMetrics.RecordGetOperation(latency, wasHit)
|
||||
}
|
||||
|
||||
func (gmc *GranularMetricsCollector) RecordZeroCopyPut(latency time.Duration, bufferSize int) {
|
||||
gmc.zeroCopyMetrics.RecordPutOperation(latency, bufferSize)
|
||||
}
|
||||
|
||||
// GetBufferPoolEfficiency returns efficiency metrics for all buffer pools
|
||||
func (gmc *GranularMetricsCollector) GetBufferPoolEfficiency() map[string]BufferPoolEfficiencyMetrics {
|
||||
gmc.mutex.RLock()
|
||||
defer gmc.mutex.RUnlock()
|
||||
|
||||
return map[string]BufferPoolEfficiencyMetrics{
|
||||
"frame_pool": gmc.framePoolMetrics.GetEfficiencyMetrics(),
|
||||
"control_pool": gmc.controlPoolMetrics.GetEfficiencyMetrics(),
|
||||
"zero_copy_pool": gmc.zeroCopyMetrics.GetEfficiencyMetrics(),
|
||||
}
|
||||
}
|
||||
|
||||
// LogGranularMetrics logs comprehensive granular metrics
|
||||
func (gmc *GranularMetricsCollector) LogGranularMetrics() {
|
||||
bufferEfficiency := gmc.GetBufferPoolEfficiency()
|
||||
|
||||
// Log buffer pool efficiency
|
||||
for poolName, efficiency := range bufferEfficiency {
|
||||
gmc.logger.Info().
|
||||
Str("pool", poolName).
|
||||
Float64("hit_rate", efficiency.HitRate).
|
||||
Float64("miss_rate", efficiency.MissRate).
|
||||
Float64("utilization_rate", efficiency.UtilizationRate).
|
||||
Float64("memory_efficiency", efficiency.MemoryEfficiency).
|
||||
Dur("avg_get_latency", efficiency.AverageGetLatency).
|
||||
Dur("avg_put_latency", efficiency.AveragePutLatency).
|
||||
Float64("throughput", efficiency.Throughput).
|
||||
Msg("Buffer pool efficiency metrics")
|
||||
}
|
||||
}
|
||||
|
||||
// Global granular metrics collector instance
|
||||
var (
|
||||
granularMetricsCollector *GranularMetricsCollector
|
||||
granularMetricsOnce sync.Once
|
||||
)
|
||||
|
||||
// GetGranularMetricsCollector returns the global granular metrics collector
|
||||
func GetGranularMetricsCollector() *GranularMetricsCollector {
|
||||
granularMetricsOnce.Do(func() {
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "granular-metrics").Logger()
|
||||
granularMetricsCollector = NewGranularMetricsCollector(logger)
|
||||
})
|
||||
return granularMetricsCollector
|
||||
}
|
||||
|
||||
// StartGranularMetricsLogging starts periodic granular metrics logging
|
||||
func StartGranularMetricsLogging(interval time.Duration) {
|
||||
collector := GetGranularMetricsCollector()
|
||||
logger := collector.logger
|
||||
|
||||
logger.Info().Dur("interval", interval).Msg("Starting granular metrics logging")
|
||||
|
||||
go func() {
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for range ticker.C {
|
||||
collector.LogGranularMetrics()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
|
@ -0,0 +1,100 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestGranularMetricsCollector tests the GranularMetricsCollector functionality
|
||||
func TestGranularMetricsCollector(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
testFunc func(t *testing.T)
|
||||
}{
|
||||
{"GetGranularMetricsCollector", testGetGranularMetricsCollector},
|
||||
{"ConcurrentCollectorAccess", testConcurrentCollectorAccess},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.testFunc(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// testGetGranularMetricsCollector tests singleton behavior
|
||||
func testGetGranularMetricsCollector(t *testing.T) {
|
||||
collector1 := GetGranularMetricsCollector()
|
||||
collector2 := GetGranularMetricsCollector()
|
||||
|
||||
require.NotNil(t, collector1)
|
||||
require.NotNil(t, collector2)
|
||||
assert.Same(t, collector1, collector2, "Should return the same singleton instance")
|
||||
}
|
||||
|
||||
// testConcurrentCollectorAccess tests thread safety of the collector
|
||||
func testConcurrentCollectorAccess(t *testing.T) {
|
||||
collector := GetGranularMetricsCollector()
|
||||
require.NotNil(t, collector)
|
||||
|
||||
const numGoroutines = 10
|
||||
const operationsPerGoroutine = 50
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(numGoroutines)
|
||||
|
||||
// Concurrent buffer pool operations
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func(id int) {
|
||||
defer wg.Done()
|
||||
for j := 0; j < operationsPerGoroutine; j++ {
|
||||
// Test buffer pool operations
|
||||
latency := time.Duration(id*operationsPerGoroutine+j) * time.Microsecond
|
||||
collector.RecordFramePoolGet(latency, true)
|
||||
collector.RecordFramePoolPut(latency, 1024)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// Verify collector is still functional
|
||||
efficiency := collector.GetBufferPoolEfficiency()
|
||||
assert.NotNil(t, efficiency)
|
||||
}
|
||||
|
||||
func BenchmarkGranularMetricsCollector(b *testing.B) {
|
||||
collector := GetGranularMetricsCollector()
|
||||
|
||||
b.Run("RecordFramePoolGet", func(b *testing.B) {
|
||||
latency := 5 * time.Millisecond
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
collector.RecordFramePoolGet(latency, true)
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("RecordFramePoolPut", func(b *testing.B) {
|
||||
latency := 5 * time.Millisecond
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
collector.RecordFramePoolPut(latency, 1024)
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("GetBufferPoolEfficiency", func(b *testing.B) {
|
||||
// Pre-populate with some data
|
||||
for i := 0; i < 100; i++ {
|
||||
collector.RecordFramePoolGet(time.Duration(i)*time.Microsecond, true)
|
||||
collector.RecordFramePoolPut(time.Duration(i)*time.Microsecond, 1024)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = collector.GetBufferPoolEfficiency()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
@ -65,9 +65,6 @@ func (aim *AudioInputManager) Stop() {
|
|||
|
||||
aim.logComponentStop(AudioInputManagerComponent)
|
||||
|
||||
// Flush any pending sampled metrics before stopping
|
||||
aim.flushPendingMetrics()
|
||||
|
||||
// Stop the IPC-based audio input
|
||||
aim.ipcManager.Stop()
|
||||
|
||||
|
|
@ -109,8 +106,12 @@ func (aim *AudioInputManager) WriteOpusFrame(frame []byte) error {
|
|||
}
|
||||
|
||||
if err != nil {
|
||||
atomic.AddInt64(&aim.metrics.FramesDropped, 1)
|
||||
return err
|
||||
}
|
||||
|
||||
// Update metrics
|
||||
atomic.AddInt64(&aim.framesSent, 1)
|
||||
aim.recordFrameProcessed(len(frame))
|
||||
aim.updateLatency(processingTime)
|
||||
|
||||
|
|
@ -195,6 +196,26 @@ func (aim *AudioInputManager) GetComprehensiveMetrics() map[string]interface{} {
|
|||
return comprehensiveMetrics
|
||||
}
|
||||
|
||||
// LogPerformanceStats logs current performance statistics
|
||||
func (aim *AudioInputManager) LogPerformanceStats() {
|
||||
metrics := aim.GetComprehensiveMetrics()
|
||||
|
||||
managerStats := metrics["manager"].(map[string]interface{})
|
||||
ipcStats := metrics["ipc"].(map[string]interface{})
|
||||
detailedStats := metrics["detailed"].(map[string]interface{})
|
||||
|
||||
aim.logger.Info().
|
||||
Int64("manager_frames_sent", managerStats["frames_sent"].(int64)).
|
||||
Int64("manager_frames_dropped", managerStats["frames_dropped"].(int64)).
|
||||
Float64("manager_latency_ms", managerStats["average_latency_ms"].(float64)).
|
||||
Int64("ipc_frames_sent", ipcStats["frames_sent"].(int64)).
|
||||
Int64("ipc_frames_dropped", ipcStats["frames_dropped"].(int64)).
|
||||
Float64("ipc_latency_ms", ipcStats["average_latency_ms"].(float64)).
|
||||
Float64("client_drop_rate", detailedStats["client_drop_rate"].(float64)).
|
||||
Float64("frames_per_second", detailedStats["frames_per_second"].(float64)).
|
||||
Msg("Audio input performance metrics")
|
||||
}
|
||||
|
||||
// IsRunning returns whether the audio input manager is running
|
||||
// This checks both the internal state and existing system processes
|
||||
func (aim *AudioInputManager) IsRunning() bool {
|
||||
|
|
|
|||
|
|
@ -292,6 +292,9 @@ func (ais *AudioInputServer) Start() error {
|
|||
// Submit the connection acceptor to the audio reader pool
|
||||
if !SubmitAudioReaderTask(ais.acceptConnections) {
|
||||
// If the pool is full or shutting down, fall back to direct goroutine creation
|
||||
logger := logging.GetDefaultLogger().With().Str("component", AudioInputClientComponent).Logger()
|
||||
logger.Warn().Msg("Audio reader pool full or shutting down, falling back to direct goroutine creation")
|
||||
|
||||
go ais.acceptConnections()
|
||||
}
|
||||
|
||||
|
|
@ -366,6 +369,9 @@ func (ais *AudioInputServer) acceptConnections() {
|
|||
// Handle this connection using the goroutine pool
|
||||
if !SubmitAudioReaderTask(func() { ais.handleConnection(conn) }) {
|
||||
// If the pool is full or shutting down, fall back to direct goroutine creation
|
||||
logger := logging.GetDefaultLogger().With().Str("component", AudioInputClientComponent).Logger()
|
||||
logger.Warn().Msg("Audio reader pool full or shutting down, falling back to direct goroutine creation")
|
||||
|
||||
go ais.handleConnection(conn)
|
||||
}
|
||||
}
|
||||
|
|
@ -936,12 +942,9 @@ func (ais *AudioInputServer) startReaderGoroutine() {
|
|||
|
||||
// If too many consecutive errors, close connection to force reconnect
|
||||
if consecutiveErrors >= maxConsecutiveErrors {
|
||||
// Only log critical errors to reduce hotpath overhead
|
||||
if logger.GetLevel() <= zerolog.ErrorLevel {
|
||||
logger.Error().
|
||||
Int("consecutive_errors", consecutiveErrors).
|
||||
Msg("Too many consecutive read errors, closing connection")
|
||||
}
|
||||
logger.Error().
|
||||
Int("consecutive_errors", consecutiveErrors).
|
||||
Msg("Too many consecutive read errors, closing connection")
|
||||
|
||||
ais.mtx.Lock()
|
||||
if ais.conn != nil {
|
||||
|
|
@ -958,10 +961,7 @@ func (ais *AudioInputServer) startReaderGoroutine() {
|
|||
// Reset error counter on successful read
|
||||
if consecutiveErrors > 0 {
|
||||
consecutiveErrors = 0
|
||||
// Only log recovery info if debug level enabled to reduce overhead
|
||||
if logger.GetLevel() <= zerolog.InfoLevel {
|
||||
logger.Info().Msg("Input connection recovered")
|
||||
}
|
||||
logger.Info().Msg("Input connection recovered")
|
||||
}
|
||||
|
||||
// Send to message channel with non-blocking write
|
||||
|
|
@ -971,11 +971,7 @@ func (ais *AudioInputServer) startReaderGoroutine() {
|
|||
default:
|
||||
// Channel full, drop message
|
||||
atomic.AddInt64(&ais.droppedFrames, 1)
|
||||
// Avoid sampling logic in critical path - only log if warn level enabled
|
||||
if logger.GetLevel() <= zerolog.WarnLevel {
|
||||
droppedCount := atomic.LoadInt64(&ais.droppedFrames)
|
||||
logger.Warn().Int64("total_dropped", droppedCount).Msg("Message channel full, dropping frame")
|
||||
}
|
||||
logger.Warn().Msg("Message channel full, dropping frame")
|
||||
}
|
||||
} else {
|
||||
// No connection, wait briefly before checking again
|
||||
|
|
@ -989,10 +985,7 @@ func (ais *AudioInputServer) startReaderGoroutine() {
|
|||
logger := logging.GetDefaultLogger().With().Str("component", AudioInputClientComponent).Logger()
|
||||
if !SubmitAudioReaderTask(readerTask) {
|
||||
// If the pool is full or shutting down, fall back to direct goroutine creation
|
||||
// Only log if warn level enabled - avoid sampling logic in critical path
|
||||
if logger.GetLevel() <= zerolog.WarnLevel {
|
||||
logger.Warn().Msg("Audio reader pool full or shutting down, falling back to direct goroutine creation")
|
||||
}
|
||||
logger.Warn().Msg("Audio reader pool full or shutting down, falling back to direct goroutine creation")
|
||||
|
||||
go readerTask()
|
||||
}
|
||||
|
|
@ -1004,17 +997,12 @@ func (ais *AudioInputServer) startProcessorGoroutine() {
|
|||
|
||||
// Create a processor task that will run in the goroutine pool
|
||||
processorTask := func() {
|
||||
// Only lock OS thread and set priority for high-load scenarios
|
||||
// This reduces interference with input processing threads
|
||||
config := GetConfig()
|
||||
useThreadOptimizations := config.MaxAudioProcessorWorkers > 8
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
if useThreadOptimizations {
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
// Priority scheduler not implemented - using default thread priority
|
||||
}
|
||||
// Set high priority for audio processing - skip logging in hotpath
|
||||
_ = SetAudioThreadPriority()
|
||||
defer func() { _ = ResetThreadPriority() }()
|
||||
|
||||
// Create logger for this goroutine
|
||||
logger := logging.GetDefaultLogger().With().Str("component", AudioInputServerComponent).Logger()
|
||||
|
|
@ -1022,8 +1010,8 @@ func (ais *AudioInputServer) startProcessorGoroutine() {
|
|||
// Enhanced error tracking for processing
|
||||
var processingErrors int
|
||||
var lastProcessingError time.Time
|
||||
maxProcessingErrors := config.MaxConsecutiveErrors
|
||||
errorResetWindow := config.RestartWindow
|
||||
maxProcessingErrors := GetConfig().MaxConsecutiveErrors
|
||||
errorResetWindow := GetConfig().RestartWindow
|
||||
|
||||
defer ais.wg.Done()
|
||||
for {
|
||||
|
|
@ -1126,17 +1114,19 @@ func (ais *AudioInputServer) startMonitorGoroutine() {
|
|||
|
||||
// Create a monitor task that will run in the goroutine pool
|
||||
monitorTask := func() {
|
||||
// Monitor goroutine doesn't need thread locking for most scenarios
|
||||
// Only use thread optimizations for high-throughput scenarios
|
||||
config := GetConfig()
|
||||
useThreadOptimizations := config.MaxAudioProcessorWorkers > 8
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
if useThreadOptimizations {
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
// Priority scheduler not implemented - using default thread priority
|
||||
// Set I/O priority for monitoring
|
||||
logger := logging.GetDefaultLogger().With().Str("component", AudioInputClientComponent).Logger()
|
||||
if err := SetAudioIOThreadPriority(); err != nil {
|
||||
logger.Warn().Err(err).Msg("Failed to set audio I/O priority")
|
||||
}
|
||||
defer func() {
|
||||
if err := ResetThreadPriority(); err != nil {
|
||||
logger.Warn().Err(err).Msg("Failed to reset thread priority")
|
||||
}
|
||||
}()
|
||||
|
||||
defer ais.wg.Done()
|
||||
ticker := time.NewTicker(GetConfig().DefaultTickerInterval)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,277 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestAudioInputIPCManager tests the AudioInputIPCManager component
|
||||
func TestAudioInputIPCManager(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
testFunc func(t *testing.T)
|
||||
}{
|
||||
{"Start", testAudioInputIPCManagerStart},
|
||||
{"Stop", testAudioInputIPCManagerStop},
|
||||
{"StartStop", testAudioInputIPCManagerStartStop},
|
||||
{"IsRunning", testAudioInputIPCManagerIsRunning},
|
||||
{"IsReady", testAudioInputIPCManagerIsReady},
|
||||
{"GetMetrics", testAudioInputIPCManagerGetMetrics},
|
||||
{"ConcurrentOperations", testAudioInputIPCManagerConcurrent},
|
||||
{"MultipleStarts", testAudioInputIPCManagerMultipleStarts},
|
||||
{"MultipleStops", testAudioInputIPCManagerMultipleStops},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.testFunc(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testAudioInputIPCManagerStart(t *testing.T) {
|
||||
manager := NewAudioInputIPCManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
// Test initial state
|
||||
assert.False(t, manager.IsRunning())
|
||||
assert.False(t, manager.IsReady())
|
||||
|
||||
// Test start
|
||||
err := manager.Start()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, manager.IsRunning())
|
||||
|
||||
// Cleanup
|
||||
manager.Stop()
|
||||
}
|
||||
|
||||
func testAudioInputIPCManagerStop(t *testing.T) {
|
||||
manager := NewAudioInputIPCManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
// Start first
|
||||
err := manager.Start()
|
||||
require.NoError(t, err)
|
||||
assert.True(t, manager.IsRunning())
|
||||
|
||||
// Test stop
|
||||
manager.Stop()
|
||||
assert.False(t, manager.IsRunning())
|
||||
assert.False(t, manager.IsReady())
|
||||
}
|
||||
|
||||
func testAudioInputIPCManagerStartStop(t *testing.T) {
|
||||
manager := NewAudioInputIPCManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
// Test multiple start/stop cycles
|
||||
for i := 0; i < 3; i++ {
|
||||
// Start
|
||||
err := manager.Start()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, manager.IsRunning())
|
||||
|
||||
// Stop
|
||||
manager.Stop()
|
||||
assert.False(t, manager.IsRunning())
|
||||
}
|
||||
}
|
||||
|
||||
func testAudioInputIPCManagerIsRunning(t *testing.T) {
|
||||
manager := NewAudioInputIPCManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
// Initially not running
|
||||
assert.False(t, manager.IsRunning())
|
||||
|
||||
// Start and check
|
||||
err := manager.Start()
|
||||
require.NoError(t, err)
|
||||
assert.True(t, manager.IsRunning())
|
||||
|
||||
// Stop and check
|
||||
manager.Stop()
|
||||
assert.False(t, manager.IsRunning())
|
||||
}
|
||||
|
||||
func testAudioInputIPCManagerIsReady(t *testing.T) {
|
||||
manager := NewAudioInputIPCManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
// Initially not ready
|
||||
assert.False(t, manager.IsReady())
|
||||
|
||||
// Start and check ready state
|
||||
err := manager.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Give some time for initialization
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Stop
|
||||
manager.Stop()
|
||||
assert.False(t, manager.IsReady())
|
||||
}
|
||||
|
||||
func testAudioInputIPCManagerGetMetrics(t *testing.T) {
|
||||
manager := NewAudioInputIPCManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
// Test metrics when not running
|
||||
metrics := manager.GetMetrics()
|
||||
assert.NotNil(t, metrics)
|
||||
|
||||
// Start and test metrics
|
||||
err := manager.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
metrics = manager.GetMetrics()
|
||||
assert.NotNil(t, metrics)
|
||||
|
||||
// Cleanup
|
||||
manager.Stop()
|
||||
}
|
||||
|
||||
func testAudioInputIPCManagerConcurrent(t *testing.T) {
|
||||
manager := NewAudioInputIPCManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
const numGoroutines = 10
|
||||
|
||||
// Test concurrent starts
|
||||
wg.Add(numGoroutines)
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
manager.Start()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Should be running
|
||||
assert.True(t, manager.IsRunning())
|
||||
|
||||
// Test concurrent stops
|
||||
wg.Add(numGoroutines)
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
manager.Stop()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Should be stopped
|
||||
assert.False(t, manager.IsRunning())
|
||||
}
|
||||
|
||||
func testAudioInputIPCManagerMultipleStarts(t *testing.T) {
|
||||
manager := NewAudioInputIPCManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
// First start should succeed
|
||||
err := manager.Start()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, manager.IsRunning())
|
||||
|
||||
// Subsequent starts should be no-op
|
||||
err = manager.Start()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, manager.IsRunning())
|
||||
|
||||
err = manager.Start()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, manager.IsRunning())
|
||||
|
||||
// Cleanup
|
||||
manager.Stop()
|
||||
}
|
||||
|
||||
func testAudioInputIPCManagerMultipleStops(t *testing.T) {
|
||||
manager := NewAudioInputIPCManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
// Start first
|
||||
err := manager.Start()
|
||||
require.NoError(t, err)
|
||||
assert.True(t, manager.IsRunning())
|
||||
|
||||
// First stop should work
|
||||
manager.Stop()
|
||||
assert.False(t, manager.IsRunning())
|
||||
|
||||
// Subsequent stops should be no-op
|
||||
manager.Stop()
|
||||
assert.False(t, manager.IsRunning())
|
||||
|
||||
manager.Stop()
|
||||
assert.False(t, manager.IsRunning())
|
||||
}
|
||||
|
||||
// TestAudioInputIPCMetrics tests the AudioInputMetrics functionality
|
||||
func TestAudioInputIPCMetrics(t *testing.T) {
|
||||
metrics := &AudioInputMetrics{}
|
||||
|
||||
// Test initial state
|
||||
assert.Equal(t, int64(0), metrics.FramesSent)
|
||||
assert.Equal(t, int64(0), metrics.FramesDropped)
|
||||
assert.Equal(t, int64(0), metrics.BytesProcessed)
|
||||
assert.Equal(t, int64(0), metrics.ConnectionDrops)
|
||||
assert.Equal(t, time.Duration(0), metrics.AverageLatency)
|
||||
assert.True(t, metrics.LastFrameTime.IsZero())
|
||||
|
||||
// Test field assignment
|
||||
metrics.FramesSent = 50
|
||||
metrics.FramesDropped = 2
|
||||
metrics.BytesProcessed = 512
|
||||
metrics.ConnectionDrops = 1
|
||||
metrics.AverageLatency = 5 * time.Millisecond
|
||||
metrics.LastFrameTime = time.Now()
|
||||
|
||||
// Verify assignments
|
||||
assert.Equal(t, int64(50), metrics.FramesSent)
|
||||
assert.Equal(t, int64(2), metrics.FramesDropped)
|
||||
assert.Equal(t, int64(512), metrics.BytesProcessed)
|
||||
assert.Equal(t, int64(1), metrics.ConnectionDrops)
|
||||
assert.Equal(t, 5*time.Millisecond, metrics.AverageLatency)
|
||||
assert.False(t, metrics.LastFrameTime.IsZero())
|
||||
}
|
||||
|
||||
// BenchmarkAudioInputIPCManager benchmarks the AudioInputIPCManager operations
|
||||
func BenchmarkAudioInputIPCManager(b *testing.B) {
|
||||
b.Run("Start", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
manager := NewAudioInputIPCManager()
|
||||
manager.Start()
|
||||
manager.Stop()
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("IsRunning", func(b *testing.B) {
|
||||
manager := NewAudioInputIPCManager()
|
||||
manager.Start()
|
||||
defer manager.Stop()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
manager.IsRunning()
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("GetMetrics", func(b *testing.B) {
|
||||
manager := NewAudioInputIPCManager()
|
||||
manager.Start()
|
||||
defer manager.Stop()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
manager.GetMetrics()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
@ -21,10 +21,6 @@ type AudioInputSupervisor struct {
|
|||
|
||||
// Environment variables for OPUS configuration
|
||||
opusEnv []string
|
||||
|
||||
// Pre-warming state
|
||||
prewarmed bool
|
||||
prewarmTime time.Time
|
||||
}
|
||||
|
||||
// NewAudioInputSupervisor creates a new audio input supervisor
|
||||
|
|
@ -52,73 +48,6 @@ func (ais *AudioInputSupervisor) SetOpusConfig(bitrate, complexity, vbr, signalT
|
|||
}
|
||||
}
|
||||
|
||||
// PrewarmSubprocess starts a subprocess in advance to reduce activation latency
|
||||
func (ais *AudioInputSupervisor) PrewarmSubprocess() error {
|
||||
ais.mutex.Lock()
|
||||
defer ais.mutex.Unlock()
|
||||
|
||||
// Don't prewarm if already running or prewarmed
|
||||
if ais.IsRunning() || ais.prewarmed {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check for existing audio input server process first
|
||||
if existingPID, err := ais.findExistingAudioInputProcess(); err == nil {
|
||||
ais.logger.Info().Int("existing_pid", existingPID).Msg("Found existing audio input server process for prewarming")
|
||||
ais.prewarmed = true
|
||||
ais.prewarmTime = time.Now()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create context for subprocess management
|
||||
ais.createContext()
|
||||
|
||||
// Get current executable path
|
||||
execPath, err := os.Executable()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get executable path: %w", err)
|
||||
}
|
||||
|
||||
// Build command arguments (only subprocess flag)
|
||||
args := []string{"--audio-input-server"}
|
||||
|
||||
// Create command for audio input server subprocess
|
||||
cmd := exec.CommandContext(ais.ctx, execPath, args...)
|
||||
|
||||
// Set environment variables for IPC and OPUS configuration
|
||||
env := append(os.Environ(), "JETKVM_AUDIO_INPUT_IPC=true") // Enable IPC mode
|
||||
env = append(env, ais.opusEnv...) // Add OPUS configuration
|
||||
cmd.Env = env
|
||||
|
||||
// Set process group to allow clean termination
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{
|
||||
Setpgid: true,
|
||||
}
|
||||
|
||||
ais.cmd = cmd
|
||||
|
||||
// Start the subprocess
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
ais.cancelContext()
|
||||
return fmt.Errorf("failed to prewarm audio input server process: %w", err)
|
||||
}
|
||||
|
||||
ais.logger.Info().Int("pid", cmd.Process.Pid).Strs("args", args).Strs("opus_env", ais.opusEnv).Msg("Audio input server subprocess prewarmed")
|
||||
|
||||
// Add process to monitoring
|
||||
ais.processMonitor.AddProcess(cmd.Process.Pid, "audio-input-server")
|
||||
|
||||
// Monitor the subprocess in a goroutine
|
||||
go ais.monitorSubprocess()
|
||||
|
||||
// Mark as prewarmed
|
||||
ais.prewarmed = true
|
||||
ais.prewarmTime = time.Now()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start starts the audio input server subprocess
|
||||
func (ais *AudioInputSupervisor) Start() error {
|
||||
ais.mutex.Lock()
|
||||
|
|
@ -131,16 +60,6 @@ func (ais *AudioInputSupervisor) Start() error {
|
|||
return fmt.Errorf("audio input supervisor already running")
|
||||
}
|
||||
|
||||
// Use prewarmed subprocess if available
|
||||
if ais.prewarmed && ais.cmd != nil && ais.cmd.Process != nil {
|
||||
ais.logger.Info().Int("pid", ais.cmd.Process.Pid).Dur("prewarm_age", time.Since(ais.prewarmTime)).Msg("Using prewarmed audio input server subprocess")
|
||||
ais.setRunning(true)
|
||||
ais.prewarmed = false // Reset prewarmed state
|
||||
// Connect client to the server
|
||||
go ais.connectClient()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check for existing audio input server process
|
||||
if existingPID, err := ais.findExistingAudioInputProcess(); err == nil {
|
||||
ais.logger.Info().Int("existing_pid", existingPID).Msg("Found existing audio input server process, connecting to it")
|
||||
|
|
@ -201,31 +120,11 @@ func (ais *AudioInputSupervisor) Start() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// IsPrewarmed returns whether a subprocess is prewarmed and ready
|
||||
func (ais *AudioInputSupervisor) IsPrewarmed() bool {
|
||||
ais.mutex.RLock()
|
||||
defer ais.mutex.RUnlock()
|
||||
return ais.prewarmed
|
||||
}
|
||||
|
||||
// GetPrewarmAge returns how long ago the subprocess was prewarmed
|
||||
func (ais *AudioInputSupervisor) GetPrewarmAge() time.Duration {
|
||||
ais.mutex.RLock()
|
||||
defer ais.mutex.RUnlock()
|
||||
if !ais.prewarmed {
|
||||
return 0
|
||||
}
|
||||
return time.Since(ais.prewarmTime)
|
||||
}
|
||||
|
||||
// Stop stops the audio input server subprocess
|
||||
func (ais *AudioInputSupervisor) Stop() {
|
||||
ais.mutex.Lock()
|
||||
defer ais.mutex.Unlock()
|
||||
|
||||
// Reset prewarmed state
|
||||
ais.prewarmed = false
|
||||
|
||||
if !ais.IsRunning() {
|
||||
return
|
||||
}
|
||||
|
|
@ -319,6 +218,13 @@ func (ais *AudioInputSupervisor) GetClient() *AudioInputClient {
|
|||
return ais.client
|
||||
}
|
||||
|
||||
// GetProcessMetrics returns current process metrics with audio-input-server name
|
||||
func (ais *AudioInputSupervisor) GetProcessMetrics() *ProcessMetrics {
|
||||
metrics := ais.BaseSupervisor.GetProcessMetrics()
|
||||
metrics.ProcessName = "audio-input-server"
|
||||
return metrics
|
||||
}
|
||||
|
||||
// monitorSubprocess monitors the subprocess and handles unexpected exits
|
||||
func (ais *AudioInputSupervisor) monitorSubprocess() {
|
||||
if ais.cmd == nil || ais.cmd.Process == nil {
|
||||
|
|
|
|||
|
|
@ -0,0 +1,244 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNewAudioInputManager(t *testing.T) {
|
||||
manager := NewAudioInputManager()
|
||||
assert.NotNil(t, manager)
|
||||
assert.False(t, manager.IsRunning())
|
||||
assert.False(t, manager.IsReady())
|
||||
}
|
||||
|
||||
func TestAudioInputManagerStart(t *testing.T) {
|
||||
manager := NewAudioInputManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
// Test successful start
|
||||
err := manager.Start()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, manager.IsRunning())
|
||||
|
||||
// Test starting already running manager
|
||||
err = manager.Start()
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "already running")
|
||||
|
||||
// Cleanup
|
||||
manager.Stop()
|
||||
}
|
||||
|
||||
func TestAudioInputManagerStop(t *testing.T) {
|
||||
manager := NewAudioInputManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
// Test stopping non-running manager
|
||||
manager.Stop()
|
||||
assert.False(t, manager.IsRunning())
|
||||
|
||||
// Start and then stop
|
||||
err := manager.Start()
|
||||
require.NoError(t, err)
|
||||
assert.True(t, manager.IsRunning())
|
||||
|
||||
manager.Stop()
|
||||
assert.False(t, manager.IsRunning())
|
||||
}
|
||||
|
||||
func TestAudioInputManagerIsRunning(t *testing.T) {
|
||||
manager := NewAudioInputManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
// Test initial state
|
||||
assert.False(t, manager.IsRunning())
|
||||
|
||||
// Test after start
|
||||
err := manager.Start()
|
||||
require.NoError(t, err)
|
||||
assert.True(t, manager.IsRunning())
|
||||
|
||||
// Test after stop
|
||||
manager.Stop()
|
||||
assert.False(t, manager.IsRunning())
|
||||
}
|
||||
|
||||
func TestAudioInputManagerIsReady(t *testing.T) {
|
||||
manager := NewAudioInputManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
// Test initial state
|
||||
assert.False(t, manager.IsReady())
|
||||
|
||||
// Start manager
|
||||
err := manager.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Give some time for initialization
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Test ready state (may vary based on implementation)
|
||||
// Just ensure the method doesn't panic
|
||||
_ = manager.IsReady()
|
||||
|
||||
// Cleanup
|
||||
manager.Stop()
|
||||
}
|
||||
|
||||
func TestAudioInputManagerGetMetrics(t *testing.T) {
|
||||
manager := NewAudioInputManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
// Test metrics when not running
|
||||
metrics := manager.GetMetrics()
|
||||
assert.NotNil(t, metrics)
|
||||
assert.Equal(t, int64(0), metrics.FramesSent)
|
||||
assert.Equal(t, int64(0), metrics.FramesDropped)
|
||||
assert.Equal(t, int64(0), metrics.BytesProcessed)
|
||||
assert.Equal(t, int64(0), metrics.ConnectionDrops)
|
||||
|
||||
// Start and test metrics
|
||||
err := manager.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
metrics = manager.GetMetrics()
|
||||
assert.NotNil(t, metrics)
|
||||
assert.GreaterOrEqual(t, metrics.FramesSent, int64(0))
|
||||
assert.GreaterOrEqual(t, metrics.FramesDropped, int64(0))
|
||||
assert.GreaterOrEqual(t, metrics.BytesProcessed, int64(0))
|
||||
assert.GreaterOrEqual(t, metrics.ConnectionDrops, int64(0))
|
||||
|
||||
// Cleanup
|
||||
manager.Stop()
|
||||
}
|
||||
|
||||
func TestAudioInputManagerConcurrentOperations(t *testing.T) {
|
||||
manager := NewAudioInputManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Test concurrent start/stop operations
|
||||
for i := 0; i < 10; i++ {
|
||||
wg.Add(2)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
_ = manager.Start()
|
||||
}()
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
manager.Stop()
|
||||
}()
|
||||
}
|
||||
|
||||
// Test concurrent metric access
|
||||
for i := 0; i < 5; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
_ = manager.GetMetrics()
|
||||
}()
|
||||
}
|
||||
|
||||
// Test concurrent status checks
|
||||
for i := 0; i < 5; i++ {
|
||||
wg.Add(2)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
_ = manager.IsRunning()
|
||||
}()
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
_ = manager.IsReady()
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// Cleanup
|
||||
manager.Stop()
|
||||
}
|
||||
|
||||
func TestAudioInputManagerMultipleStartStop(t *testing.T) {
|
||||
manager := NewAudioInputManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
// Test multiple start/stop cycles
|
||||
for i := 0; i < 5; i++ {
|
||||
err := manager.Start()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, manager.IsRunning())
|
||||
|
||||
manager.Stop()
|
||||
assert.False(t, manager.IsRunning())
|
||||
}
|
||||
}
|
||||
|
||||
func TestAudioInputMetrics(t *testing.T) {
|
||||
metrics := &AudioInputMetrics{
|
||||
BaseAudioMetrics: BaseAudioMetrics{
|
||||
FramesProcessed: 100,
|
||||
FramesDropped: 5,
|
||||
BytesProcessed: 1024,
|
||||
ConnectionDrops: 2,
|
||||
AverageLatency: time.Millisecond * 10,
|
||||
LastFrameTime: time.Now(),
|
||||
},
|
||||
FramesSent: 100,
|
||||
}
|
||||
|
||||
assert.Equal(t, int64(100), metrics.FramesSent)
|
||||
assert.Equal(t, int64(5), metrics.FramesDropped)
|
||||
assert.Equal(t, int64(1024), metrics.BytesProcessed)
|
||||
assert.Equal(t, int64(2), metrics.ConnectionDrops)
|
||||
assert.Equal(t, time.Millisecond*10, metrics.AverageLatency)
|
||||
assert.False(t, metrics.LastFrameTime.IsZero())
|
||||
}
|
||||
|
||||
// Benchmark tests
|
||||
func BenchmarkAudioInputManager(b *testing.B) {
|
||||
manager := NewAudioInputManager()
|
||||
|
||||
b.Run("Start", func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = manager.Start()
|
||||
manager.Stop()
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("GetMetrics", func(b *testing.B) {
|
||||
_ = manager.Start()
|
||||
defer manager.Stop()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = manager.GetMetrics()
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("IsRunning", func(b *testing.B) {
|
||||
_ = manager.Start()
|
||||
defer manager.Stop()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = manager.IsRunning()
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("IsReady", func(b *testing.B) {
|
||||
_ = manager.Start()
|
||||
defer manager.Stop()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = manager.IsReady()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
@ -0,0 +1,320 @@
|
|||
//go:build integration
|
||||
// +build integration
|
||||
|
||||
package audio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestIPCCommunication tests the IPC communication between audio components
|
||||
func TestIPCCommunication(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
testFunc func(t *testing.T)
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "AudioOutputIPC",
|
||||
testFunc: testAudioOutputIPC,
|
||||
description: "Test audio output IPC server and client communication",
|
||||
},
|
||||
{
|
||||
name: "AudioInputIPC",
|
||||
testFunc: testAudioInputIPC,
|
||||
description: "Test audio input IPC server and client communication",
|
||||
},
|
||||
{
|
||||
name: "IPCReconnection",
|
||||
testFunc: testIPCReconnection,
|
||||
description: "Test IPC reconnection after connection loss",
|
||||
},
|
||||
{
|
||||
name: "IPCConcurrency",
|
||||
testFunc: testIPCConcurrency,
|
||||
description: "Test concurrent IPC operations",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Logf("Running test: %s - %s", tt.name, tt.description)
|
||||
tt.testFunc(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// testAudioOutputIPC tests the audio output IPC communication
|
||||
func testAudioOutputIPC(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
socketPath := filepath.Join(tempDir, "test_audio_output.sock")
|
||||
|
||||
// Create a test IPC server
|
||||
server := &AudioIPCServer{
|
||||
socketPath: socketPath,
|
||||
logger: getTestLogger(),
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Start server in goroutine
|
||||
var serverErr error
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
serverErr = server.Start(ctx)
|
||||
}()
|
||||
|
||||
// Wait for server to start
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Test client connection
|
||||
conn, err := net.Dial("unix", socketPath)
|
||||
require.NoError(t, err, "Failed to connect to IPC server")
|
||||
defer conn.Close()
|
||||
|
||||
// Test sending a frame message
|
||||
testFrame := []byte("test audio frame data")
|
||||
msg := &OutputMessage{
|
||||
Type: OutputMessageTypeOpusFrame,
|
||||
Timestamp: time.Now().UnixNano(),
|
||||
Data: testFrame,
|
||||
}
|
||||
|
||||
err = writeOutputMessage(conn, msg)
|
||||
require.NoError(t, err, "Failed to write message to IPC")
|
||||
|
||||
// Test heartbeat
|
||||
heartbeatMsg := &OutputMessage{
|
||||
Type: OutputMessageTypeHeartbeat,
|
||||
Timestamp: time.Now().UnixNano(),
|
||||
}
|
||||
|
||||
err = writeOutputMessage(conn, heartbeatMsg)
|
||||
require.NoError(t, err, "Failed to send heartbeat")
|
||||
|
||||
// Clean shutdown
|
||||
cancel()
|
||||
wg.Wait()
|
||||
|
||||
if serverErr != nil && serverErr != context.Canceled {
|
||||
t.Errorf("Server error: %v", serverErr)
|
||||
}
|
||||
}
|
||||
|
||||
// testAudioInputIPC tests the audio input IPC communication
|
||||
func testAudioInputIPC(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
socketPath := filepath.Join(tempDir, "test_audio_input.sock")
|
||||
|
||||
// Create a test input IPC server
|
||||
server := &AudioInputIPCServer{
|
||||
socketPath: socketPath,
|
||||
logger: getTestLogger(),
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Start server
|
||||
var serverErr error
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
serverErr = server.Start(ctx)
|
||||
}()
|
||||
|
||||
// Wait for server to start
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Test client connection
|
||||
conn, err := net.Dial("unix", socketPath)
|
||||
require.NoError(t, err, "Failed to connect to input IPC server")
|
||||
defer conn.Close()
|
||||
|
||||
// Test sending input frame
|
||||
testInputFrame := []byte("test microphone data")
|
||||
inputMsg := &InputMessage{
|
||||
Type: InputMessageTypeOpusFrame,
|
||||
Timestamp: time.Now().UnixNano(),
|
||||
Data: testInputFrame,
|
||||
}
|
||||
|
||||
err = writeInputMessage(conn, inputMsg)
|
||||
require.NoError(t, err, "Failed to write input message")
|
||||
|
||||
// Test configuration message
|
||||
configMsg := &InputMessage{
|
||||
Type: InputMessageTypeConfig,
|
||||
Timestamp: time.Now().UnixNano(),
|
||||
Data: []byte("quality=medium"),
|
||||
}
|
||||
|
||||
err = writeInputMessage(conn, configMsg)
|
||||
require.NoError(t, err, "Failed to send config message")
|
||||
|
||||
// Clean shutdown
|
||||
cancel()
|
||||
wg.Wait()
|
||||
|
||||
if serverErr != nil && serverErr != context.Canceled {
|
||||
t.Errorf("Input server error: %v", serverErr)
|
||||
}
|
||||
}
|
||||
|
||||
// testIPCReconnection tests IPC reconnection scenarios
|
||||
func testIPCReconnection(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
socketPath := filepath.Join(tempDir, "test_reconnect.sock")
|
||||
|
||||
// Create server
|
||||
server := &AudioIPCServer{
|
||||
socketPath: socketPath,
|
||||
logger: getTestLogger(),
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Start server
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
server.Start(ctx)
|
||||
}()
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// First connection
|
||||
conn1, err := net.Dial("unix", socketPath)
|
||||
require.NoError(t, err, "Failed initial connection")
|
||||
|
||||
// Send a message
|
||||
msg := &OutputMessage{
|
||||
Type: OutputMessageTypeOpusFrame,
|
||||
Timestamp: time.Now().UnixNano(),
|
||||
Data: []byte("test data 1"),
|
||||
}
|
||||
err = writeOutputMessage(conn1, msg)
|
||||
require.NoError(t, err, "Failed to send first message")
|
||||
|
||||
// Close connection to simulate disconnect
|
||||
conn1.Close()
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
// Reconnect
|
||||
conn2, err := net.Dial("unix", socketPath)
|
||||
require.NoError(t, err, "Failed to reconnect")
|
||||
defer conn2.Close()
|
||||
|
||||
// Send another message after reconnection
|
||||
msg2 := &OutputMessage{
|
||||
Type: OutputMessageTypeOpusFrame,
|
||||
Timestamp: time.Now().UnixNano(),
|
||||
Data: []byte("test data 2"),
|
||||
}
|
||||
err = writeOutputMessage(conn2, msg2)
|
||||
require.NoError(t, err, "Failed to send message after reconnection")
|
||||
|
||||
cancel()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// testIPCConcurrency tests concurrent IPC operations
|
||||
func testIPCConcurrency(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
socketPath := filepath.Join(tempDir, "test_concurrent.sock")
|
||||
|
||||
server := &AudioIPCServer{
|
||||
socketPath: socketPath,
|
||||
logger: getTestLogger(),
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Start server
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
server.Start(ctx)
|
||||
}()
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Create multiple concurrent connections
|
||||
numClients := 5
|
||||
messagesPerClient := 10
|
||||
|
||||
var clientWg sync.WaitGroup
|
||||
for i := 0; i < numClients; i++ {
|
||||
clientWg.Add(1)
|
||||
go func(clientID int) {
|
||||
defer clientWg.Done()
|
||||
|
||||
conn, err := net.Dial("unix", socketPath)
|
||||
if err != nil {
|
||||
t.Errorf("Client %d failed to connect: %v", clientID, err)
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
// Send multiple messages
|
||||
for j := 0; j < messagesPerClient; j++ {
|
||||
msg := &OutputMessage{
|
||||
Type: OutputMessageTypeOpusFrame,
|
||||
Timestamp: time.Now().UnixNano(),
|
||||
Data: []byte(fmt.Sprintf("client_%d_msg_%d", clientID, j)),
|
||||
}
|
||||
|
||||
if err := writeOutputMessage(conn, msg); err != nil {
|
||||
t.Errorf("Client %d failed to send message %d: %v", clientID, j, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Small delay between messages
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
clientWg.Wait()
|
||||
cancel()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Helper function to get a test logger
|
||||
func getTestLogger() zerolog.Logger {
|
||||
return zerolog.New(os.Stdout).With().Timestamp().Logger()
|
||||
}
|
||||
|
||||
// Helper functions for message writing (simplified versions)
|
||||
func writeOutputMessage(conn net.Conn, msg *OutputMessage) error {
|
||||
// This is a simplified version for testing
|
||||
// In real implementation, this would use the actual protocol
|
||||
data := fmt.Sprintf("%d:%d:%s", msg.Type, msg.Timestamp, string(msg.Data))
|
||||
_, err := conn.Write([]byte(data))
|
||||
return err
|
||||
}
|
||||
|
||||
func writeInputMessage(conn net.Conn, msg *InputMessage) error {
|
||||
// This is a simplified version for testing
|
||||
data := fmt.Sprintf("%d:%d:%s", msg.Type, msg.Timestamp, string(msg.Data))
|
||||
_, err := conn.Write([]byte(data))
|
||||
return err
|
||||
}
|
||||
|
|
@ -0,0 +1,535 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/jetkvm/kvm/internal/logging"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// LatencyProfiler provides comprehensive end-to-end audio latency profiling
|
||||
// with nanosecond precision across the entire WebRTC->IPC->CGO->ALSA pipeline
|
||||
type LatencyProfiler struct {
|
||||
// Atomic counters for thread-safe access (MUST be first for ARM32 alignment)
|
||||
totalMeasurements int64 // Total number of measurements taken
|
||||
webrtcLatencySum int64 // Sum of WebRTC processing latencies (nanoseconds)
|
||||
ipcLatencySum int64 // Sum of IPC communication latencies (nanoseconds)
|
||||
cgoLatencySum int64 // Sum of CGO call latencies (nanoseconds)
|
||||
alsaLatencySum int64 // Sum of ALSA device latencies (nanoseconds)
|
||||
endToEndLatencySum int64 // Sum of complete end-to-end latencies (nanoseconds)
|
||||
validationLatencySum int64 // Sum of validation overhead (nanoseconds)
|
||||
serializationLatencySum int64 // Sum of serialization overhead (nanoseconds)
|
||||
|
||||
// Peak latency tracking
|
||||
maxWebrtcLatency int64 // Maximum WebRTC latency observed (nanoseconds)
|
||||
maxIpcLatency int64 // Maximum IPC latency observed (nanoseconds)
|
||||
maxCgoLatency int64 // Maximum CGO latency observed (nanoseconds)
|
||||
maxAlsaLatency int64 // Maximum ALSA latency observed (nanoseconds)
|
||||
maxEndToEndLatency int64 // Maximum end-to-end latency observed (nanoseconds)
|
||||
|
||||
// Configuration and control
|
||||
config LatencyProfilerConfig
|
||||
logger zerolog.Logger
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
running int32 // Atomic flag for profiler state
|
||||
enabled int32 // Atomic flag for measurement collection
|
||||
|
||||
// Detailed measurement storage
|
||||
measurements []DetailedLatencyMeasurement
|
||||
measurementMutex sync.RWMutex
|
||||
measurementIndex int
|
||||
|
||||
// High-resolution timing
|
||||
timeSource func() int64 // Nanosecond precision time source
|
||||
}
|
||||
|
||||
// LatencyProfilerConfig defines profiler configuration
|
||||
type LatencyProfilerConfig struct {
|
||||
MaxMeasurements int // Maximum measurements to store in memory
|
||||
SamplingRate float64 // Sampling rate (0.0-1.0, 1.0 = profile every frame)
|
||||
ReportingInterval time.Duration // How often to log profiling reports
|
||||
ThresholdWarning time.Duration // Latency threshold for warnings
|
||||
ThresholdCritical time.Duration // Latency threshold for critical alerts
|
||||
EnableDetailedTrace bool // Enable detailed per-component tracing
|
||||
EnableHistogram bool // Enable latency histogram collection
|
||||
}
|
||||
|
||||
// DetailedLatencyMeasurement captures comprehensive latency breakdown
|
||||
type DetailedLatencyMeasurement struct {
|
||||
Timestamp time.Time // When the measurement was taken
|
||||
FrameID uint64 // Unique frame identifier for tracing
|
||||
WebRTCLatency time.Duration // WebRTC processing time
|
||||
IPCLatency time.Duration // IPC communication time
|
||||
CGOLatency time.Duration // CGO call overhead
|
||||
ALSALatency time.Duration // ALSA device processing time
|
||||
ValidationLatency time.Duration // Frame validation overhead
|
||||
SerializationLatency time.Duration // Data serialization overhead
|
||||
EndToEndLatency time.Duration // Complete pipeline latency
|
||||
Source string // Source component (input/output)
|
||||
FrameSize int // Size of the audio frame in bytes
|
||||
CPUUsage float64 // CPU usage at time of measurement
|
||||
MemoryUsage uint64 // Memory usage at time of measurement
|
||||
}
|
||||
|
||||
// LatencyProfileReport contains aggregated profiling results
|
||||
type LatencyProfileReport struct {
|
||||
TotalMeasurements int64 // Total measurements taken
|
||||
TimeRange time.Duration // Time span of measurements
|
||||
|
||||
// Average latencies
|
||||
AvgWebRTCLatency time.Duration
|
||||
AvgIPCLatency time.Duration
|
||||
AvgCGOLatency time.Duration
|
||||
AvgALSALatency time.Duration
|
||||
AvgEndToEndLatency time.Duration
|
||||
AvgValidationLatency time.Duration
|
||||
AvgSerializationLatency time.Duration
|
||||
|
||||
// Peak latencies
|
||||
MaxWebRTCLatency time.Duration
|
||||
MaxIPCLatency time.Duration
|
||||
MaxCGOLatency time.Duration
|
||||
MaxALSALatency time.Duration
|
||||
MaxEndToEndLatency time.Duration
|
||||
|
||||
// Performance analysis
|
||||
BottleneckComponent string // Component with highest average latency
|
||||
LatencyDistribution map[string]int // Histogram of latency ranges
|
||||
Throughput float64 // Frames per second processed
|
||||
}
|
||||
|
||||
// FrameLatencyTracker tracks latency for a single audio frame through the pipeline
|
||||
type FrameLatencyTracker struct {
|
||||
frameID uint64
|
||||
startTime int64 // Nanosecond timestamp
|
||||
webrtcStartTime int64
|
||||
ipcStartTime int64
|
||||
cgoStartTime int64
|
||||
alsaStartTime int64
|
||||
validationStartTime int64
|
||||
serializationStartTime int64
|
||||
frameSize int
|
||||
source string
|
||||
}
|
||||
|
||||
// Global profiler instance
|
||||
var (
|
||||
globalLatencyProfiler unsafe.Pointer // *LatencyProfiler
|
||||
profilerInitialized int32
|
||||
)
|
||||
|
||||
// DefaultLatencyProfilerConfig returns default profiler configuration
|
||||
func DefaultLatencyProfilerConfig() LatencyProfilerConfig {
|
||||
return LatencyProfilerConfig{
|
||||
MaxMeasurements: 10000,
|
||||
SamplingRate: 0.1, // Profile 10% of frames to minimize overhead
|
||||
ReportingInterval: 30 * time.Second,
|
||||
ThresholdWarning: 50 * time.Millisecond,
|
||||
ThresholdCritical: 100 * time.Millisecond,
|
||||
EnableDetailedTrace: false, // Disabled by default for performance
|
||||
EnableHistogram: true,
|
||||
}
|
||||
}
|
||||
|
||||
// NewLatencyProfiler creates a new latency profiler
|
||||
func NewLatencyProfiler(config LatencyProfilerConfig) *LatencyProfiler {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "latency-profiler").Logger()
|
||||
|
||||
// Validate configuration
|
||||
if config.MaxMeasurements <= 0 {
|
||||
config.MaxMeasurements = 10000
|
||||
}
|
||||
if config.SamplingRate < 0.0 || config.SamplingRate > 1.0 {
|
||||
config.SamplingRate = 0.1
|
||||
}
|
||||
if config.ReportingInterval <= 0 {
|
||||
config.ReportingInterval = 30 * time.Second
|
||||
}
|
||||
|
||||
profiler := &LatencyProfiler{
|
||||
config: config,
|
||||
logger: logger,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
measurements: make([]DetailedLatencyMeasurement, config.MaxMeasurements),
|
||||
timeSource: func() int64 { return time.Now().UnixNano() },
|
||||
}
|
||||
|
||||
// Initialize peak latencies to zero
|
||||
atomic.StoreInt64(&profiler.maxWebrtcLatency, 0)
|
||||
atomic.StoreInt64(&profiler.maxIpcLatency, 0)
|
||||
atomic.StoreInt64(&profiler.maxCgoLatency, 0)
|
||||
atomic.StoreInt64(&profiler.maxAlsaLatency, 0)
|
||||
atomic.StoreInt64(&profiler.maxEndToEndLatency, 0)
|
||||
|
||||
return profiler
|
||||
}
|
||||
|
||||
// Start begins latency profiling
|
||||
func (lp *LatencyProfiler) Start() error {
|
||||
if !atomic.CompareAndSwapInt32(&lp.running, 0, 1) {
|
||||
return fmt.Errorf("latency profiler already running")
|
||||
}
|
||||
|
||||
// Enable measurement collection
|
||||
atomic.StoreInt32(&lp.enabled, 1)
|
||||
|
||||
// Start reporting goroutine
|
||||
go lp.reportingLoop()
|
||||
|
||||
lp.logger.Info().Float64("sampling_rate", lp.config.SamplingRate).Msg("latency profiler started")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop stops latency profiling
|
||||
func (lp *LatencyProfiler) Stop() {
|
||||
if !atomic.CompareAndSwapInt32(&lp.running, 1, 0) {
|
||||
return
|
||||
}
|
||||
|
||||
// Disable measurement collection
|
||||
atomic.StoreInt32(&lp.enabled, 0)
|
||||
|
||||
// Cancel context to stop reporting
|
||||
lp.cancel()
|
||||
|
||||
lp.logger.Info().Msg("latency profiler stopped")
|
||||
}
|
||||
|
||||
// IsEnabled returns whether profiling is currently enabled
|
||||
func (lp *LatencyProfiler) IsEnabled() bool {
|
||||
return atomic.LoadInt32(&lp.enabled) == 1
|
||||
}
|
||||
|
||||
// StartFrameTracking begins tracking latency for a new audio frame
|
||||
func (lp *LatencyProfiler) StartFrameTracking(frameID uint64, frameSize int, source string) *FrameLatencyTracker {
|
||||
if !lp.IsEnabled() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Apply sampling rate to reduce profiling overhead
|
||||
if lp.config.SamplingRate < 1.0 {
|
||||
// Simple sampling based on frame ID
|
||||
if float64(frameID%100)/100.0 > lp.config.SamplingRate {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
now := lp.timeSource()
|
||||
return &FrameLatencyTracker{
|
||||
frameID: frameID,
|
||||
startTime: now,
|
||||
frameSize: frameSize,
|
||||
source: source,
|
||||
}
|
||||
}
|
||||
|
||||
// TrackWebRTCStart marks the start of WebRTC processing
|
||||
func (tracker *FrameLatencyTracker) TrackWebRTCStart() {
|
||||
if tracker != nil {
|
||||
tracker.webrtcStartTime = time.Now().UnixNano()
|
||||
}
|
||||
}
|
||||
|
||||
// TrackIPCStart marks the start of IPC communication
|
||||
func (tracker *FrameLatencyTracker) TrackIPCStart() {
|
||||
if tracker != nil {
|
||||
tracker.ipcStartTime = time.Now().UnixNano()
|
||||
}
|
||||
}
|
||||
|
||||
// TrackCGOStart marks the start of CGO processing
|
||||
func (tracker *FrameLatencyTracker) TrackCGOStart() {
|
||||
if tracker != nil {
|
||||
tracker.cgoStartTime = time.Now().UnixNano()
|
||||
}
|
||||
}
|
||||
|
||||
// TrackALSAStart marks the start of ALSA device processing
|
||||
func (tracker *FrameLatencyTracker) TrackALSAStart() {
|
||||
if tracker != nil {
|
||||
tracker.alsaStartTime = time.Now().UnixNano()
|
||||
}
|
||||
}
|
||||
|
||||
// TrackValidationStart marks the start of frame validation
|
||||
func (tracker *FrameLatencyTracker) TrackValidationStart() {
|
||||
if tracker != nil {
|
||||
tracker.validationStartTime = time.Now().UnixNano()
|
||||
}
|
||||
}
|
||||
|
||||
// TrackSerializationStart marks the start of data serialization
|
||||
func (tracker *FrameLatencyTracker) TrackSerializationStart() {
|
||||
if tracker != nil {
|
||||
tracker.serializationStartTime = time.Now().UnixNano()
|
||||
}
|
||||
}
|
||||
|
||||
// FinishTracking completes frame tracking and records the measurement
|
||||
func (lp *LatencyProfiler) FinishTracking(tracker *FrameLatencyTracker) {
|
||||
if tracker == nil || !lp.IsEnabled() {
|
||||
return
|
||||
}
|
||||
|
||||
endTime := lp.timeSource()
|
||||
|
||||
// Calculate component latencies
|
||||
var webrtcLatency, ipcLatency, cgoLatency, alsaLatency, validationLatency, serializationLatency time.Duration
|
||||
|
||||
if tracker.webrtcStartTime > 0 {
|
||||
webrtcLatency = time.Duration(tracker.ipcStartTime - tracker.webrtcStartTime)
|
||||
}
|
||||
if tracker.ipcStartTime > 0 {
|
||||
ipcLatency = time.Duration(tracker.cgoStartTime - tracker.ipcStartTime)
|
||||
}
|
||||
if tracker.cgoStartTime > 0 {
|
||||
cgoLatency = time.Duration(tracker.alsaStartTime - tracker.cgoStartTime)
|
||||
}
|
||||
if tracker.alsaStartTime > 0 {
|
||||
alsaLatency = time.Duration(endTime - tracker.alsaStartTime)
|
||||
}
|
||||
if tracker.validationStartTime > 0 {
|
||||
validationLatency = time.Duration(tracker.ipcStartTime - tracker.validationStartTime)
|
||||
}
|
||||
if tracker.serializationStartTime > 0 {
|
||||
serializationLatency = time.Duration(tracker.cgoStartTime - tracker.serializationStartTime)
|
||||
}
|
||||
|
||||
endToEndLatency := time.Duration(endTime - tracker.startTime)
|
||||
|
||||
// Update atomic counters
|
||||
atomic.AddInt64(&lp.totalMeasurements, 1)
|
||||
atomic.AddInt64(&lp.webrtcLatencySum, webrtcLatency.Nanoseconds())
|
||||
atomic.AddInt64(&lp.ipcLatencySum, ipcLatency.Nanoseconds())
|
||||
atomic.AddInt64(&lp.cgoLatencySum, cgoLatency.Nanoseconds())
|
||||
atomic.AddInt64(&lp.alsaLatencySum, alsaLatency.Nanoseconds())
|
||||
atomic.AddInt64(&lp.endToEndLatencySum, endToEndLatency.Nanoseconds())
|
||||
atomic.AddInt64(&lp.validationLatencySum, validationLatency.Nanoseconds())
|
||||
atomic.AddInt64(&lp.serializationLatencySum, serializationLatency.Nanoseconds())
|
||||
|
||||
// Update peak latencies
|
||||
lp.updatePeakLatency(&lp.maxWebrtcLatency, webrtcLatency.Nanoseconds())
|
||||
lp.updatePeakLatency(&lp.maxIpcLatency, ipcLatency.Nanoseconds())
|
||||
lp.updatePeakLatency(&lp.maxCgoLatency, cgoLatency.Nanoseconds())
|
||||
lp.updatePeakLatency(&lp.maxAlsaLatency, alsaLatency.Nanoseconds())
|
||||
lp.updatePeakLatency(&lp.maxEndToEndLatency, endToEndLatency.Nanoseconds())
|
||||
|
||||
// Store detailed measurement if enabled
|
||||
if lp.config.EnableDetailedTrace {
|
||||
lp.storeMeasurement(DetailedLatencyMeasurement{
|
||||
Timestamp: time.Now(),
|
||||
FrameID: tracker.frameID,
|
||||
WebRTCLatency: webrtcLatency,
|
||||
IPCLatency: ipcLatency,
|
||||
CGOLatency: cgoLatency,
|
||||
ALSALatency: alsaLatency,
|
||||
ValidationLatency: validationLatency,
|
||||
SerializationLatency: serializationLatency,
|
||||
EndToEndLatency: endToEndLatency,
|
||||
Source: tracker.source,
|
||||
FrameSize: tracker.frameSize,
|
||||
CPUUsage: lp.getCurrentCPUUsage(),
|
||||
MemoryUsage: lp.getCurrentMemoryUsage(),
|
||||
})
|
||||
}
|
||||
|
||||
// Check for threshold violations
|
||||
if endToEndLatency > lp.config.ThresholdCritical {
|
||||
lp.logger.Error().Dur("latency", endToEndLatency).Uint64("frame_id", tracker.frameID).
|
||||
Str("source", tracker.source).Msg("critical latency threshold exceeded")
|
||||
} else if endToEndLatency > lp.config.ThresholdWarning {
|
||||
lp.logger.Warn().Dur("latency", endToEndLatency).Uint64("frame_id", tracker.frameID).
|
||||
Str("source", tracker.source).Msg("warning latency threshold exceeded")
|
||||
}
|
||||
}
|
||||
|
||||
// updatePeakLatency atomically updates peak latency if new value is higher
|
||||
func (lp *LatencyProfiler) updatePeakLatency(peakPtr *int64, newLatency int64) {
|
||||
for {
|
||||
current := atomic.LoadInt64(peakPtr)
|
||||
if newLatency <= current || atomic.CompareAndSwapInt64(peakPtr, current, newLatency) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// storeMeasurement stores a detailed measurement in the circular buffer
|
||||
func (lp *LatencyProfiler) storeMeasurement(measurement DetailedLatencyMeasurement) {
|
||||
lp.measurementMutex.Lock()
|
||||
defer lp.measurementMutex.Unlock()
|
||||
|
||||
lp.measurements[lp.measurementIndex] = measurement
|
||||
lp.measurementIndex = (lp.measurementIndex + 1) % len(lp.measurements)
|
||||
}
|
||||
|
||||
// GetReport generates a comprehensive latency profiling report
|
||||
func (lp *LatencyProfiler) GetReport() LatencyProfileReport {
|
||||
totalMeasurements := atomic.LoadInt64(&lp.totalMeasurements)
|
||||
if totalMeasurements == 0 {
|
||||
return LatencyProfileReport{}
|
||||
}
|
||||
|
||||
// Calculate averages
|
||||
avgWebRTC := time.Duration(atomic.LoadInt64(&lp.webrtcLatencySum) / totalMeasurements)
|
||||
avgIPC := time.Duration(atomic.LoadInt64(&lp.ipcLatencySum) / totalMeasurements)
|
||||
avgCGO := time.Duration(atomic.LoadInt64(&lp.cgoLatencySum) / totalMeasurements)
|
||||
avgALSA := time.Duration(atomic.LoadInt64(&lp.alsaLatencySum) / totalMeasurements)
|
||||
avgEndToEnd := time.Duration(atomic.LoadInt64(&lp.endToEndLatencySum) / totalMeasurements)
|
||||
avgValidation := time.Duration(atomic.LoadInt64(&lp.validationLatencySum) / totalMeasurements)
|
||||
avgSerialization := time.Duration(atomic.LoadInt64(&lp.serializationLatencySum) / totalMeasurements)
|
||||
|
||||
// Get peak latencies
|
||||
maxWebRTC := time.Duration(atomic.LoadInt64(&lp.maxWebrtcLatency))
|
||||
maxIPC := time.Duration(atomic.LoadInt64(&lp.maxIpcLatency))
|
||||
maxCGO := time.Duration(atomic.LoadInt64(&lp.maxCgoLatency))
|
||||
maxALSA := time.Duration(atomic.LoadInt64(&lp.maxAlsaLatency))
|
||||
maxEndToEnd := time.Duration(atomic.LoadInt64(&lp.maxEndToEndLatency))
|
||||
|
||||
// Determine bottleneck component
|
||||
bottleneck := "WebRTC"
|
||||
maxAvg := avgWebRTC
|
||||
if avgIPC > maxAvg {
|
||||
bottleneck = "IPC"
|
||||
maxAvg = avgIPC
|
||||
}
|
||||
if avgCGO > maxAvg {
|
||||
bottleneck = "CGO"
|
||||
maxAvg = avgCGO
|
||||
}
|
||||
if avgALSA > maxAvg {
|
||||
bottleneck = "ALSA"
|
||||
}
|
||||
|
||||
return LatencyProfileReport{
|
||||
TotalMeasurements: totalMeasurements,
|
||||
AvgWebRTCLatency: avgWebRTC,
|
||||
AvgIPCLatency: avgIPC,
|
||||
AvgCGOLatency: avgCGO,
|
||||
AvgALSALatency: avgALSA,
|
||||
AvgEndToEndLatency: avgEndToEnd,
|
||||
AvgValidationLatency: avgValidation,
|
||||
AvgSerializationLatency: avgSerialization,
|
||||
MaxWebRTCLatency: maxWebRTC,
|
||||
MaxIPCLatency: maxIPC,
|
||||
MaxCGOLatency: maxCGO,
|
||||
MaxALSALatency: maxALSA,
|
||||
MaxEndToEndLatency: maxEndToEnd,
|
||||
BottleneckComponent: bottleneck,
|
||||
}
|
||||
}
|
||||
|
||||
// reportingLoop periodically logs profiling reports
|
||||
func (lp *LatencyProfiler) reportingLoop() {
|
||||
ticker := time.NewTicker(lp.config.ReportingInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-lp.ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
report := lp.GetReport()
|
||||
if report.TotalMeasurements > 0 {
|
||||
lp.logReport(report)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// logReport logs a comprehensive profiling report
|
||||
func (lp *LatencyProfiler) logReport(report LatencyProfileReport) {
|
||||
lp.logger.Info().
|
||||
Int64("total_measurements", report.TotalMeasurements).
|
||||
Dur("avg_webrtc_latency", report.AvgWebRTCLatency).
|
||||
Dur("avg_ipc_latency", report.AvgIPCLatency).
|
||||
Dur("avg_cgo_latency", report.AvgCGOLatency).
|
||||
Dur("avg_alsa_latency", report.AvgALSALatency).
|
||||
Dur("avg_end_to_end_latency", report.AvgEndToEndLatency).
|
||||
Dur("avg_validation_latency", report.AvgValidationLatency).
|
||||
Dur("avg_serialization_latency", report.AvgSerializationLatency).
|
||||
Dur("max_webrtc_latency", report.MaxWebRTCLatency).
|
||||
Dur("max_ipc_latency", report.MaxIPCLatency).
|
||||
Dur("max_cgo_latency", report.MaxCGOLatency).
|
||||
Dur("max_alsa_latency", report.MaxALSALatency).
|
||||
Dur("max_end_to_end_latency", report.MaxEndToEndLatency).
|
||||
Str("bottleneck_component", report.BottleneckComponent).
|
||||
Msg("latency profiling report")
|
||||
}
|
||||
|
||||
// getCurrentCPUUsage returns current CPU usage percentage
|
||||
func (lp *LatencyProfiler) getCurrentCPUUsage() float64 {
|
||||
// Simplified CPU usage - in production, this would use more sophisticated monitoring
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
return float64(runtime.NumGoroutine()) / 100.0 // Rough approximation
|
||||
}
|
||||
|
||||
// getCurrentMemoryUsage returns current memory usage in bytes
|
||||
func (lp *LatencyProfiler) getCurrentMemoryUsage() uint64 {
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
return m.Alloc
|
||||
}
|
||||
|
||||
// GetGlobalLatencyProfiler returns the global latency profiler instance
|
||||
func GetGlobalLatencyProfiler() *LatencyProfiler {
|
||||
ptr := atomic.LoadPointer(&globalLatencyProfiler)
|
||||
if ptr != nil {
|
||||
return (*LatencyProfiler)(ptr)
|
||||
}
|
||||
|
||||
// Initialize on first use
|
||||
if atomic.CompareAndSwapInt32(&profilerInitialized, 0, 1) {
|
||||
config := DefaultLatencyProfilerConfig()
|
||||
profiler := NewLatencyProfiler(config)
|
||||
atomic.StorePointer(&globalLatencyProfiler, unsafe.Pointer(profiler))
|
||||
return profiler
|
||||
}
|
||||
|
||||
// Another goroutine initialized it, try again
|
||||
ptr = atomic.LoadPointer(&globalLatencyProfiler)
|
||||
if ptr != nil {
|
||||
return (*LatencyProfiler)(ptr)
|
||||
}
|
||||
|
||||
// Fallback: create a new profiler
|
||||
config := DefaultLatencyProfilerConfig()
|
||||
return NewLatencyProfiler(config)
|
||||
}
|
||||
|
||||
// EnableLatencyProfiling enables the global latency profiler
|
||||
func EnableLatencyProfiling() error {
|
||||
profiler := GetGlobalLatencyProfiler()
|
||||
return profiler.Start()
|
||||
}
|
||||
|
||||
// DisableLatencyProfiling disables the global latency profiler
|
||||
func DisableLatencyProfiling() {
|
||||
ptr := atomic.LoadPointer(&globalLatencyProfiler)
|
||||
if ptr != nil {
|
||||
profiler := (*LatencyProfiler)(ptr)
|
||||
profiler.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
// ProfileFrameLatency is a convenience function to profile a single frame's latency
|
||||
func ProfileFrameLatency(frameID uint64, frameSize int, source string, fn func(*FrameLatencyTracker)) {
|
||||
profiler := GetGlobalLatencyProfiler()
|
||||
if !profiler.IsEnabled() {
|
||||
fn(nil)
|
||||
return
|
||||
}
|
||||
|
||||
tracker := profiler.StartFrameTracking(frameID, frameSize, source)
|
||||
defer profiler.FinishTracking(tracker)
|
||||
fn(tracker)
|
||||
}
|
||||
|
|
@ -0,0 +1,323 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// AudioLoggerStandards provides standardized logging patterns for audio components
|
||||
type AudioLoggerStandards struct {
|
||||
logger zerolog.Logger
|
||||
component string
|
||||
}
|
||||
|
||||
// NewAudioLogger creates a new standardized logger for an audio component
|
||||
func NewAudioLogger(logger zerolog.Logger, component string) *AudioLoggerStandards {
|
||||
return &AudioLoggerStandards{
|
||||
logger: logger.With().Str("component", component).Logger(),
|
||||
component: component,
|
||||
}
|
||||
}
|
||||
|
||||
// Component Lifecycle Logging
|
||||
|
||||
// LogComponentStarting logs component initialization start
|
||||
func (als *AudioLoggerStandards) LogComponentStarting() {
|
||||
als.logger.Debug().Msg("starting component")
|
||||
}
|
||||
|
||||
// LogComponentStarted logs successful component start
|
||||
func (als *AudioLoggerStandards) LogComponentStarted() {
|
||||
als.logger.Debug().Msg("component started successfully")
|
||||
}
|
||||
|
||||
// LogComponentStopping logs component shutdown start
|
||||
func (als *AudioLoggerStandards) LogComponentStopping() {
|
||||
als.logger.Debug().Msg("stopping component")
|
||||
}
|
||||
|
||||
// LogComponentStopped logs successful component stop
|
||||
func (als *AudioLoggerStandards) LogComponentStopped() {
|
||||
als.logger.Debug().Msg("component stopped")
|
||||
}
|
||||
|
||||
// LogComponentReady logs component ready state
|
||||
func (als *AudioLoggerStandards) LogComponentReady() {
|
||||
als.logger.Info().Msg("component ready")
|
||||
}
|
||||
|
||||
// Error Logging with Context
|
||||
|
||||
// LogError logs a general error with context
|
||||
func (als *AudioLoggerStandards) LogError(err error, msg string) {
|
||||
als.logger.Error().Err(err).Msg(msg)
|
||||
}
|
||||
|
||||
// LogErrorWithContext logs an error with additional context fields
|
||||
func (als *AudioLoggerStandards) LogErrorWithContext(err error, msg string, fields map[string]interface{}) {
|
||||
event := als.logger.Error().Err(err)
|
||||
for key, value := range fields {
|
||||
event = event.Interface(key, value)
|
||||
}
|
||||
event.Msg(msg)
|
||||
}
|
||||
|
||||
// LogValidationError logs validation failures with specific context
|
||||
func (als *AudioLoggerStandards) LogValidationError(err error, validationType string, value interface{}) {
|
||||
als.logger.Error().Err(err).
|
||||
Str("validation_type", validationType).
|
||||
Interface("invalid_value", value).
|
||||
Msg("validation failed")
|
||||
}
|
||||
|
||||
// LogConnectionError logs connection-related errors
|
||||
func (als *AudioLoggerStandards) LogConnectionError(err error, endpoint string, retryCount int) {
|
||||
als.logger.Error().Err(err).
|
||||
Str("endpoint", endpoint).
|
||||
Int("retry_count", retryCount).
|
||||
Msg("connection failed")
|
||||
}
|
||||
|
||||
// LogProcessError logs process-related errors with PID context
|
||||
func (als *AudioLoggerStandards) LogProcessError(err error, pid int, msg string) {
|
||||
als.logger.Error().Err(err).
|
||||
Int("pid", pid).
|
||||
Msg(msg)
|
||||
}
|
||||
|
||||
// Performance and Metrics Logging
|
||||
|
||||
// LogPerformanceMetrics logs standardized performance metrics
|
||||
func (als *AudioLoggerStandards) LogPerformanceMetrics(metrics map[string]interface{}) {
|
||||
event := als.logger.Info()
|
||||
for key, value := range metrics {
|
||||
event = event.Interface(key, value)
|
||||
}
|
||||
event.Msg("performance metrics")
|
||||
}
|
||||
|
||||
// LogLatencyMetrics logs latency-specific metrics
|
||||
func (als *AudioLoggerStandards) LogLatencyMetrics(current, average, max time.Duration, jitter time.Duration) {
|
||||
als.logger.Info().
|
||||
Dur("current_latency", current).
|
||||
Dur("average_latency", average).
|
||||
Dur("max_latency", max).
|
||||
Dur("jitter", jitter).
|
||||
Msg("latency metrics")
|
||||
}
|
||||
|
||||
// LogFrameMetrics logs frame processing metrics
|
||||
func (als *AudioLoggerStandards) LogFrameMetrics(processed, dropped int64, rate float64) {
|
||||
als.logger.Info().
|
||||
Int64("frames_processed", processed).
|
||||
Int64("frames_dropped", dropped).
|
||||
Float64("processing_rate", rate).
|
||||
Msg("frame processing metrics")
|
||||
}
|
||||
|
||||
// LogBufferMetrics logs buffer utilization metrics
|
||||
func (als *AudioLoggerStandards) LogBufferMetrics(size, used, peak int, utilizationPercent float64) {
|
||||
als.logger.Info().
|
||||
Int("buffer_size", size).
|
||||
Int("buffer_used", used).
|
||||
Int("buffer_peak", peak).
|
||||
Float64("utilization_percent", utilizationPercent).
|
||||
Msg("buffer metrics")
|
||||
}
|
||||
|
||||
// Warning Logging
|
||||
|
||||
// LogWarning logs a general warning
|
||||
func (als *AudioLoggerStandards) LogWarning(msg string) {
|
||||
als.logger.Warn().Msg(msg)
|
||||
}
|
||||
|
||||
// LogWarningWithError logs a warning with error context
|
||||
func (als *AudioLoggerStandards) LogWarningWithError(err error, msg string) {
|
||||
als.logger.Warn().Err(err).Msg(msg)
|
||||
}
|
||||
|
||||
// LogThresholdWarning logs warnings when thresholds are exceeded
|
||||
func (als *AudioLoggerStandards) LogThresholdWarning(metric string, current, threshold interface{}, msg string) {
|
||||
als.logger.Warn().
|
||||
Str("metric", metric).
|
||||
Interface("current_value", current).
|
||||
Interface("threshold", threshold).
|
||||
Msg(msg)
|
||||
}
|
||||
|
||||
// LogRetryWarning logs retry attempts with context
|
||||
func (als *AudioLoggerStandards) LogRetryWarning(operation string, attempt, maxAttempts int, delay time.Duration) {
|
||||
als.logger.Warn().
|
||||
Str("operation", operation).
|
||||
Int("attempt", attempt).
|
||||
Int("max_attempts", maxAttempts).
|
||||
Dur("retry_delay", delay).
|
||||
Msg("retrying operation")
|
||||
}
|
||||
|
||||
// LogRecoveryWarning logs recovery from error conditions
|
||||
func (als *AudioLoggerStandards) LogRecoveryWarning(condition string, duration time.Duration) {
|
||||
als.logger.Warn().
|
||||
Str("condition", condition).
|
||||
Dur("recovery_time", duration).
|
||||
Msg("recovered from error condition")
|
||||
}
|
||||
|
||||
// Debug and Trace Logging
|
||||
|
||||
// LogDebug logs debug information
|
||||
func (als *AudioLoggerStandards) LogDebug(msg string) {
|
||||
als.logger.Debug().Msg(msg)
|
||||
}
|
||||
|
||||
// LogDebugWithFields logs debug information with structured fields
|
||||
func (als *AudioLoggerStandards) LogDebugWithFields(msg string, fields map[string]interface{}) {
|
||||
event := als.logger.Debug()
|
||||
for key, value := range fields {
|
||||
event = event.Interface(key, value)
|
||||
}
|
||||
event.Msg(msg)
|
||||
}
|
||||
|
||||
// LogOperationTrace logs operation tracing for debugging
|
||||
func (als *AudioLoggerStandards) LogOperationTrace(operation string, duration time.Duration, success bool) {
|
||||
als.logger.Debug().
|
||||
Str("operation", operation).
|
||||
Dur("duration", duration).
|
||||
Bool("success", success).
|
||||
Msg("operation trace")
|
||||
}
|
||||
|
||||
// LogDataFlow logs data flow for debugging
|
||||
func (als *AudioLoggerStandards) LogDataFlow(source, destination string, bytes int, frameCount int) {
|
||||
als.logger.Debug().
|
||||
Str("source", source).
|
||||
Str("destination", destination).
|
||||
Int("bytes", bytes).
|
||||
Int("frame_count", frameCount).
|
||||
Msg("data flow")
|
||||
}
|
||||
|
||||
// Configuration and State Logging
|
||||
|
||||
// LogConfigurationChange logs configuration updates
|
||||
func (als *AudioLoggerStandards) LogConfigurationChange(configType string, oldValue, newValue interface{}) {
|
||||
als.logger.Info().
|
||||
Str("config_type", configType).
|
||||
Interface("old_value", oldValue).
|
||||
Interface("new_value", newValue).
|
||||
Msg("configuration changed")
|
||||
}
|
||||
|
||||
// LogStateTransition logs component state changes
|
||||
func (als *AudioLoggerStandards) LogStateTransition(fromState, toState string, reason string) {
|
||||
als.logger.Info().
|
||||
Str("from_state", fromState).
|
||||
Str("to_state", toState).
|
||||
Str("reason", reason).
|
||||
Msg("state transition")
|
||||
}
|
||||
|
||||
// LogResourceAllocation logs resource allocation/deallocation
|
||||
func (als *AudioLoggerStandards) LogResourceAllocation(resourceType string, allocated bool, amount interface{}) {
|
||||
level := als.logger.Debug()
|
||||
if allocated {
|
||||
level.Str("action", "allocated")
|
||||
} else {
|
||||
level.Str("action", "deallocated")
|
||||
}
|
||||
level.Str("resource_type", resourceType).
|
||||
Interface("amount", amount).
|
||||
Msg("resource allocation")
|
||||
}
|
||||
|
||||
// Network and IPC Logging
|
||||
|
||||
// LogConnectionEvent logs connection lifecycle events
|
||||
func (als *AudioLoggerStandards) LogConnectionEvent(event, endpoint string, connectionID string) {
|
||||
als.logger.Info().
|
||||
Str("event", event).
|
||||
Str("endpoint", endpoint).
|
||||
Str("connection_id", connectionID).
|
||||
Msg("connection event")
|
||||
}
|
||||
|
||||
// LogIPCEvent logs IPC communication events
|
||||
func (als *AudioLoggerStandards) LogIPCEvent(event, socketPath string, bytes int) {
|
||||
als.logger.Debug().
|
||||
Str("event", event).
|
||||
Str("socket_path", socketPath).
|
||||
Int("bytes", bytes).
|
||||
Msg("IPC event")
|
||||
}
|
||||
|
||||
// LogNetworkStats logs network statistics
|
||||
func (als *AudioLoggerStandards) LogNetworkStats(sent, received int64, latency time.Duration, packetLoss float64) {
|
||||
als.logger.Info().
|
||||
Int64("bytes_sent", sent).
|
||||
Int64("bytes_received", received).
|
||||
Dur("network_latency", latency).
|
||||
Float64("packet_loss_percent", packetLoss).
|
||||
Msg("network statistics")
|
||||
}
|
||||
|
||||
// Process and System Logging
|
||||
|
||||
// LogProcessEvent logs process lifecycle events
|
||||
func (als *AudioLoggerStandards) LogProcessEvent(event string, pid int, exitCode *int) {
|
||||
event_log := als.logger.Info().
|
||||
Str("event", event).
|
||||
Int("pid", pid)
|
||||
if exitCode != nil {
|
||||
event_log = event_log.Int("exit_code", *exitCode)
|
||||
}
|
||||
event_log.Msg("process event")
|
||||
}
|
||||
|
||||
// LogSystemResource logs system resource usage
|
||||
func (als *AudioLoggerStandards) LogSystemResource(cpuPercent, memoryMB float64, goroutines int) {
|
||||
als.logger.Info().
|
||||
Float64("cpu_percent", cpuPercent).
|
||||
Float64("memory_mb", memoryMB).
|
||||
Int("goroutines", goroutines).
|
||||
Msg("system resources")
|
||||
}
|
||||
|
||||
// LogPriorityChange logs thread priority changes
|
||||
func (als *AudioLoggerStandards) LogPriorityChange(tid, oldPriority, newPriority int, policy string) {
|
||||
als.logger.Debug().
|
||||
Int("tid", tid).
|
||||
Int("old_priority", oldPriority).
|
||||
Int("new_priority", newPriority).
|
||||
Str("policy", policy).
|
||||
Msg("thread priority changed")
|
||||
}
|
||||
|
||||
// Utility Functions
|
||||
|
||||
// GetLogger returns the underlying zerolog.Logger for advanced usage
|
||||
func (als *AudioLoggerStandards) GetLogger() zerolog.Logger {
|
||||
return als.logger
|
||||
}
|
||||
|
||||
// WithFields returns a new logger with additional persistent fields
|
||||
func (als *AudioLoggerStandards) WithFields(fields map[string]interface{}) *AudioLoggerStandards {
|
||||
event := als.logger.With()
|
||||
for key, value := range fields {
|
||||
event = event.Interface(key, value)
|
||||
}
|
||||
return &AudioLoggerStandards{
|
||||
logger: event.Logger(),
|
||||
component: als.component,
|
||||
}
|
||||
}
|
||||
|
||||
// WithSubComponent creates a logger for a sub-component
|
||||
func (als *AudioLoggerStandards) WithSubComponent(subComponent string) *AudioLoggerStandards {
|
||||
return &AudioLoggerStandards{
|
||||
logger: als.logger.With().Str("sub_component", subComponent).Logger(),
|
||||
component: als.component + "." + subComponent,
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,201 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/jetkvm/kvm/internal/logging"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// MemoryMetrics provides comprehensive memory allocation statistics
|
||||
type MemoryMetrics struct {
|
||||
// Runtime memory statistics
|
||||
RuntimeStats RuntimeMemoryStats `json:"runtime_stats"`
|
||||
// Audio buffer pool statistics
|
||||
BufferPools AudioBufferPoolStats `json:"buffer_pools"`
|
||||
// Zero-copy frame pool statistics
|
||||
ZeroCopyPool ZeroCopyFramePoolStats `json:"zero_copy_pool"`
|
||||
// Message pool statistics
|
||||
MessagePool MessagePoolStats `json:"message_pool"`
|
||||
// Batch processor statistics
|
||||
BatchProcessor BatchProcessorMemoryStats `json:"batch_processor,omitempty"`
|
||||
// Collection timestamp
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
}
|
||||
|
||||
// RuntimeMemoryStats provides Go runtime memory statistics
|
||||
type RuntimeMemoryStats struct {
|
||||
Alloc uint64 `json:"alloc"` // Bytes allocated and not yet freed
|
||||
TotalAlloc uint64 `json:"total_alloc"` // Total bytes allocated (cumulative)
|
||||
Sys uint64 `json:"sys"` // Total bytes obtained from OS
|
||||
Lookups uint64 `json:"lookups"` // Number of pointer lookups
|
||||
Mallocs uint64 `json:"mallocs"` // Number of mallocs
|
||||
Frees uint64 `json:"frees"` // Number of frees
|
||||
HeapAlloc uint64 `json:"heap_alloc"` // Bytes allocated and not yet freed (heap)
|
||||
HeapSys uint64 `json:"heap_sys"` // Bytes obtained from OS for heap
|
||||
HeapIdle uint64 `json:"heap_idle"` // Bytes in idle spans
|
||||
HeapInuse uint64 `json:"heap_inuse"` // Bytes in non-idle spans
|
||||
HeapReleased uint64 `json:"heap_released"` // Bytes released to OS
|
||||
HeapObjects uint64 `json:"heap_objects"` // Total number of allocated objects
|
||||
StackInuse uint64 `json:"stack_inuse"` // Bytes used by stack spans
|
||||
StackSys uint64 `json:"stack_sys"` // Bytes obtained from OS for stack
|
||||
MSpanInuse uint64 `json:"mspan_inuse"` // Bytes used by mspan structures
|
||||
MSpanSys uint64 `json:"mspan_sys"` // Bytes obtained from OS for mspan
|
||||
MCacheInuse uint64 `json:"mcache_inuse"` // Bytes used by mcache structures
|
||||
MCacheSys uint64 `json:"mcache_sys"` // Bytes obtained from OS for mcache
|
||||
BuckHashSys uint64 `json:"buck_hash_sys"` // Bytes used by profiling bucket hash table
|
||||
GCSys uint64 `json:"gc_sys"` // Bytes used for garbage collection metadata
|
||||
OtherSys uint64 `json:"other_sys"` // Bytes used for other system allocations
|
||||
NextGC uint64 `json:"next_gc"` // Target heap size for next GC
|
||||
LastGC uint64 `json:"last_gc"` // Time of last GC (nanoseconds since epoch)
|
||||
PauseTotalNs uint64 `json:"pause_total_ns"` // Total GC pause time
|
||||
NumGC uint32 `json:"num_gc"` // Number of completed GC cycles
|
||||
NumForcedGC uint32 `json:"num_forced_gc"` // Number of forced GC cycles
|
||||
GCCPUFraction float64 `json:"gc_cpu_fraction"` // Fraction of CPU time used by GC
|
||||
}
|
||||
|
||||
// BatchProcessorMemoryStats provides batch processor memory statistics
|
||||
type BatchProcessorMemoryStats struct {
|
||||
Initialized bool `json:"initialized"`
|
||||
Running bool `json:"running"`
|
||||
Stats BatchAudioStats `json:"stats"`
|
||||
BufferPool AudioBufferPoolDetailedStats `json:"buffer_pool,omitempty"`
|
||||
}
|
||||
|
||||
// GetBatchAudioProcessor is defined in batch_audio.go
|
||||
// BatchAudioStats is defined in batch_audio.go
|
||||
|
||||
var memoryMetricsLogger *zerolog.Logger
|
||||
|
||||
func getMemoryMetricsLogger() *zerolog.Logger {
|
||||
if memoryMetricsLogger == nil {
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "memory-metrics").Logger()
|
||||
memoryMetricsLogger = &logger
|
||||
}
|
||||
return memoryMetricsLogger
|
||||
}
|
||||
|
||||
// CollectMemoryMetrics gathers comprehensive memory allocation statistics
|
||||
func CollectMemoryMetrics() MemoryMetrics {
|
||||
// Collect runtime memory statistics
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
|
||||
runtimeStats := RuntimeMemoryStats{
|
||||
Alloc: m.Alloc,
|
||||
TotalAlloc: m.TotalAlloc,
|
||||
Sys: m.Sys,
|
||||
Lookups: m.Lookups,
|
||||
Mallocs: m.Mallocs,
|
||||
Frees: m.Frees,
|
||||
HeapAlloc: m.HeapAlloc,
|
||||
HeapSys: m.HeapSys,
|
||||
HeapIdle: m.HeapIdle,
|
||||
HeapInuse: m.HeapInuse,
|
||||
HeapReleased: m.HeapReleased,
|
||||
HeapObjects: m.HeapObjects,
|
||||
StackInuse: m.StackInuse,
|
||||
StackSys: m.StackSys,
|
||||
MSpanInuse: m.MSpanInuse,
|
||||
MSpanSys: m.MSpanSys,
|
||||
MCacheInuse: m.MCacheInuse,
|
||||
MCacheSys: m.MCacheSys,
|
||||
BuckHashSys: m.BuckHashSys,
|
||||
GCSys: m.GCSys,
|
||||
OtherSys: m.OtherSys,
|
||||
NextGC: m.NextGC,
|
||||
LastGC: m.LastGC,
|
||||
PauseTotalNs: m.PauseTotalNs,
|
||||
NumGC: m.NumGC,
|
||||
NumForcedGC: m.NumForcedGC,
|
||||
GCCPUFraction: m.GCCPUFraction,
|
||||
}
|
||||
|
||||
// Collect audio buffer pool statistics
|
||||
bufferPoolStats := GetAudioBufferPoolStats()
|
||||
|
||||
// Collect zero-copy frame pool statistics
|
||||
zeroCopyStats := GetGlobalZeroCopyPoolStats()
|
||||
|
||||
// Collect message pool statistics
|
||||
messagePoolStats := GetGlobalMessagePoolStats()
|
||||
|
||||
// Collect batch processor statistics if available
|
||||
var batchStats BatchProcessorMemoryStats
|
||||
if processor := GetBatchAudioProcessor(); processor != nil {
|
||||
batchStats.Initialized = true
|
||||
batchStats.Running = processor.IsRunning()
|
||||
batchStats.Stats = processor.GetStats()
|
||||
// Note: BatchAudioProcessor uses sync.Pool, detailed stats not available
|
||||
}
|
||||
|
||||
return MemoryMetrics{
|
||||
RuntimeStats: runtimeStats,
|
||||
BufferPools: bufferPoolStats,
|
||||
ZeroCopyPool: zeroCopyStats,
|
||||
MessagePool: messagePoolStats,
|
||||
BatchProcessor: batchStats,
|
||||
Timestamp: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
// HandleMemoryMetrics provides an HTTP handler for memory metrics
|
||||
func HandleMemoryMetrics(w http.ResponseWriter, r *http.Request) {
|
||||
logger := getMemoryMetricsLogger()
|
||||
|
||||
if r.Method != http.MethodGet {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
metrics := CollectMemoryMetrics()
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Header().Set("Cache-Control", "no-cache")
|
||||
|
||||
encoder := json.NewEncoder(w)
|
||||
encoder.SetIndent("", " ")
|
||||
|
||||
if err := encoder.Encode(metrics); err != nil {
|
||||
logger.Error().Err(err).Msg("failed to encode memory metrics")
|
||||
http.Error(w, "Internal server error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
logger.Debug().Msg("memory metrics served")
|
||||
}
|
||||
|
||||
// LogMemoryMetrics logs current memory metrics for debugging
|
||||
func LogMemoryMetrics() {
|
||||
logger := getMemoryMetricsLogger()
|
||||
metrics := CollectMemoryMetrics()
|
||||
|
||||
logger.Info().
|
||||
Uint64("heap_alloc_mb", metrics.RuntimeStats.HeapAlloc/uint64(GetConfig().BytesToMBDivisor)).
|
||||
Uint64("heap_sys_mb", metrics.RuntimeStats.HeapSys/uint64(GetConfig().BytesToMBDivisor)).
|
||||
Uint64("heap_objects", metrics.RuntimeStats.HeapObjects).
|
||||
Uint32("num_gc", metrics.RuntimeStats.NumGC).
|
||||
Float64("gc_cpu_fraction", metrics.RuntimeStats.GCCPUFraction).
|
||||
Float64("buffer_pool_hit_rate", metrics.BufferPools.FramePoolHitRate).
|
||||
Float64("zero_copy_hit_rate", metrics.ZeroCopyPool.HitRate).
|
||||
Float64("message_pool_hit_rate", metrics.MessagePool.HitRate).
|
||||
Msg("memory metrics snapshot")
|
||||
}
|
||||
|
||||
// StartMemoryMetricsLogging starts periodic memory metrics logging
|
||||
func StartMemoryMetricsLogging(interval time.Duration) {
|
||||
logger := getMemoryMetricsLogger()
|
||||
logger.Debug().Dur("interval", interval).Msg("memory metrics logging started")
|
||||
|
||||
go func() {
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for range ticker.C {
|
||||
LogMemoryMetrics()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
|
@ -288,7 +288,45 @@ var (
|
|||
)
|
||||
|
||||
// Device health metrics
|
||||
// Removed device health metrics - functionality not used
|
||||
deviceHealthStatus = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_device_health_status",
|
||||
Help: "Current device health status (0=Healthy, 1=Degraded, 2=Failing, 3=Critical)",
|
||||
},
|
||||
[]string{"device_type"}, // device_type: capture, playback
|
||||
)
|
||||
|
||||
deviceHealthScore = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_device_health_score",
|
||||
Help: "Device health score (0.0-1.0, higher is better)",
|
||||
},
|
||||
[]string{"device_type"}, // device_type: capture, playback
|
||||
)
|
||||
|
||||
deviceConsecutiveErrors = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_device_consecutive_errors",
|
||||
Help: "Number of consecutive errors for device",
|
||||
},
|
||||
[]string{"device_type"}, // device_type: capture, playback
|
||||
)
|
||||
|
||||
deviceTotalErrors = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "jetkvm_audio_device_total_errors",
|
||||
Help: "Total number of errors for device",
|
||||
},
|
||||
[]string{"device_type"}, // device_type: capture, playback
|
||||
)
|
||||
|
||||
deviceLatencySpikes = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "jetkvm_audio_device_latency_spikes_total",
|
||||
Help: "Total number of latency spikes for device",
|
||||
},
|
||||
[]string{"device_type"}, // device_type: capture, playback
|
||||
)
|
||||
|
||||
// Memory metrics
|
||||
memoryHeapAllocBytes = promauto.NewGauge(
|
||||
|
|
@ -398,7 +436,11 @@ var (
|
|||
micBytesProcessedValue int64
|
||||
micConnectionDropsValue int64
|
||||
|
||||
// Atomic counters for device health metrics - functionality removed, no longer used
|
||||
// Atomic counters for device health metrics
|
||||
deviceCaptureErrorsValue int64
|
||||
devicePlaybackErrorsValue int64
|
||||
deviceCaptureSpikesValue int64
|
||||
devicePlaybackSpikesValue int64
|
||||
|
||||
// Atomic counter for memory GC
|
||||
memoryGCCountValue uint32
|
||||
|
|
@ -597,8 +639,34 @@ func UpdateSocketBufferMetrics(component, bufferType string, size, utilization f
|
|||
atomic.StoreInt64(&lastMetricsUpdate, time.Now().Unix())
|
||||
}
|
||||
|
||||
// UpdateDeviceHealthMetrics - Device health monitoring functionality has been removed
|
||||
// This function is no longer used as device health monitoring is not implemented
|
||||
// UpdateDeviceHealthMetrics updates device health metrics
|
||||
func UpdateDeviceHealthMetrics(deviceType string, status int, healthScore float64, consecutiveErrors, totalErrors, latencySpikes int64) {
|
||||
metricsUpdateMutex.Lock()
|
||||
defer metricsUpdateMutex.Unlock()
|
||||
|
||||
deviceHealthStatus.WithLabelValues(deviceType).Set(float64(status))
|
||||
deviceHealthScore.WithLabelValues(deviceType).Set(healthScore)
|
||||
deviceConsecutiveErrors.WithLabelValues(deviceType).Set(float64(consecutiveErrors))
|
||||
|
||||
// Update error counters with delta calculation
|
||||
var prevErrors, prevSpikes int64
|
||||
if deviceType == "capture" {
|
||||
prevErrors = atomic.SwapInt64(&deviceCaptureErrorsValue, totalErrors)
|
||||
prevSpikes = atomic.SwapInt64(&deviceCaptureSpikesValue, latencySpikes)
|
||||
} else {
|
||||
prevErrors = atomic.SwapInt64(&devicePlaybackErrorsValue, totalErrors)
|
||||
prevSpikes = atomic.SwapInt64(&devicePlaybackSpikesValue, latencySpikes)
|
||||
}
|
||||
|
||||
if prevErrors > 0 && totalErrors > prevErrors {
|
||||
deviceTotalErrors.WithLabelValues(deviceType).Add(float64(totalErrors - prevErrors))
|
||||
}
|
||||
if prevSpikes > 0 && latencySpikes > prevSpikes {
|
||||
deviceLatencySpikes.WithLabelValues(deviceType).Add(float64(latencySpikes - prevSpikes))
|
||||
}
|
||||
|
||||
atomic.StoreInt64(&lastMetricsUpdate, time.Now().Unix())
|
||||
}
|
||||
|
||||
// UpdateMemoryMetrics updates memory metrics
|
||||
func UpdateMemoryMetrics() {
|
||||
|
|
|
|||
|
|
@ -97,6 +97,7 @@ type AudioSupervisorInterface interface {
|
|||
Stop() error
|
||||
IsRunning() bool
|
||||
GetProcessPID() int
|
||||
GetProcessMetrics() *ProcessMetrics
|
||||
}
|
||||
|
||||
type AudioServerInterface interface {
|
||||
|
|
|
|||
|
|
@ -145,6 +145,20 @@ func (aom *AudioOutputManager) GetComprehensiveMetrics() map[string]interface{}
|
|||
return comprehensiveMetrics
|
||||
}
|
||||
|
||||
// LogPerformanceStats logs current performance statistics
|
||||
func (aom *AudioOutputManager) LogPerformanceStats() {
|
||||
metrics := aom.GetMetrics()
|
||||
aom.logger.Info().
|
||||
Int64("frames_received", metrics.FramesReceived).
|
||||
Int64("frames_dropped", metrics.FramesDropped).
|
||||
Int64("bytes_processed", metrics.BytesProcessed).
|
||||
Int64("connection_drops", metrics.ConnectionDrops).
|
||||
Float64("average_latency_ms", float64(metrics.AverageLatency.Nanoseconds())/1e6).
|
||||
Bool("running", aom.IsRunning()).
|
||||
Bool("ready", aom.IsReady()).
|
||||
Msg("Audio output manager performance stats")
|
||||
}
|
||||
|
||||
// GetStreamer returns the streamer for advanced operations
|
||||
func (aom *AudioOutputManager) GetStreamer() *AudioOutputStreamer {
|
||||
return aom.streamer
|
||||
|
|
|
|||
|
|
@ -0,0 +1,277 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestAudioOutputManager tests the AudioOutputManager component
|
||||
func TestAudioOutputManager(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
testFunc func(t *testing.T)
|
||||
}{
|
||||
{"Start", testAudioOutputManagerStart},
|
||||
{"Stop", testAudioOutputManagerStop},
|
||||
{"StartStop", testAudioOutputManagerStartStop},
|
||||
{"IsRunning", testAudioOutputManagerIsRunning},
|
||||
{"IsReady", testAudioOutputManagerIsReady},
|
||||
{"GetMetrics", testAudioOutputManagerGetMetrics},
|
||||
{"ConcurrentOperations", testAudioOutputManagerConcurrent},
|
||||
{"MultipleStarts", testAudioOutputManagerMultipleStarts},
|
||||
{"MultipleStops", testAudioOutputManagerMultipleStops},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.testFunc(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testAudioOutputManagerStart(t *testing.T) {
|
||||
manager := NewAudioOutputManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
// Test initial state
|
||||
assert.False(t, manager.IsRunning())
|
||||
assert.False(t, manager.IsReady())
|
||||
|
||||
// Test start
|
||||
err := manager.Start()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, manager.IsRunning())
|
||||
|
||||
// Cleanup
|
||||
manager.Stop()
|
||||
}
|
||||
|
||||
func testAudioOutputManagerStop(t *testing.T) {
|
||||
manager := NewAudioOutputManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
// Start first
|
||||
err := manager.Start()
|
||||
require.NoError(t, err)
|
||||
assert.True(t, manager.IsRunning())
|
||||
|
||||
// Test stop
|
||||
manager.Stop()
|
||||
assert.False(t, manager.IsRunning())
|
||||
assert.False(t, manager.IsReady())
|
||||
}
|
||||
|
||||
func testAudioOutputManagerStartStop(t *testing.T) {
|
||||
manager := NewAudioOutputManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
// Test multiple start/stop cycles
|
||||
for i := 0; i < 3; i++ {
|
||||
// Start
|
||||
err := manager.Start()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, manager.IsRunning())
|
||||
|
||||
// Stop
|
||||
manager.Stop()
|
||||
assert.False(t, manager.IsRunning())
|
||||
}
|
||||
}
|
||||
|
||||
func testAudioOutputManagerIsRunning(t *testing.T) {
|
||||
manager := NewAudioOutputManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
// Initially not running
|
||||
assert.False(t, manager.IsRunning())
|
||||
|
||||
// Start and check
|
||||
err := manager.Start()
|
||||
require.NoError(t, err)
|
||||
assert.True(t, manager.IsRunning())
|
||||
|
||||
// Stop and check
|
||||
manager.Stop()
|
||||
assert.False(t, manager.IsRunning())
|
||||
}
|
||||
|
||||
func testAudioOutputManagerIsReady(t *testing.T) {
|
||||
manager := NewAudioOutputManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
// Initially not ready
|
||||
assert.False(t, manager.IsReady())
|
||||
|
||||
// Start and check ready state
|
||||
err := manager.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Give some time for initialization
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Stop
|
||||
manager.Stop()
|
||||
assert.False(t, manager.IsReady())
|
||||
}
|
||||
|
||||
func testAudioOutputManagerGetMetrics(t *testing.T) {
|
||||
manager := NewAudioOutputManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
// Test metrics when not running
|
||||
metrics := manager.GetMetrics()
|
||||
assert.NotNil(t, metrics)
|
||||
|
||||
// Start and test metrics
|
||||
err := manager.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
metrics = manager.GetMetrics()
|
||||
assert.NotNil(t, metrics)
|
||||
|
||||
// Cleanup
|
||||
manager.Stop()
|
||||
}
|
||||
|
||||
func testAudioOutputManagerConcurrent(t *testing.T) {
|
||||
manager := NewAudioOutputManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
const numGoroutines = 10
|
||||
|
||||
// Test concurrent starts
|
||||
wg.Add(numGoroutines)
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
manager.Start()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Should be running
|
||||
assert.True(t, manager.IsRunning())
|
||||
|
||||
// Test concurrent stops
|
||||
wg.Add(numGoroutines)
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
manager.Stop()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Should be stopped
|
||||
assert.False(t, manager.IsRunning())
|
||||
}
|
||||
|
||||
func testAudioOutputManagerMultipleStarts(t *testing.T) {
|
||||
manager := NewAudioOutputManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
// First start should succeed
|
||||
err := manager.Start()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, manager.IsRunning())
|
||||
|
||||
// Subsequent starts should be no-op
|
||||
err = manager.Start()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, manager.IsRunning())
|
||||
|
||||
err = manager.Start()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, manager.IsRunning())
|
||||
|
||||
// Cleanup
|
||||
manager.Stop()
|
||||
}
|
||||
|
||||
func testAudioOutputManagerMultipleStops(t *testing.T) {
|
||||
manager := NewAudioOutputManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
// Start first
|
||||
err := manager.Start()
|
||||
require.NoError(t, err)
|
||||
assert.True(t, manager.IsRunning())
|
||||
|
||||
// First stop should work
|
||||
manager.Stop()
|
||||
assert.False(t, manager.IsRunning())
|
||||
|
||||
// Subsequent stops should be no-op
|
||||
manager.Stop()
|
||||
assert.False(t, manager.IsRunning())
|
||||
|
||||
manager.Stop()
|
||||
assert.False(t, manager.IsRunning())
|
||||
}
|
||||
|
||||
// TestAudioOutputMetrics tests the AudioOutputMetrics functionality
|
||||
func TestAudioOutputMetrics(t *testing.T) {
|
||||
metrics := &AudioOutputMetrics{}
|
||||
|
||||
// Test initial state
|
||||
assert.Equal(t, int64(0), metrics.FramesReceived)
|
||||
assert.Equal(t, int64(0), metrics.FramesDropped)
|
||||
assert.Equal(t, int64(0), metrics.BytesProcessed)
|
||||
assert.Equal(t, int64(0), metrics.ConnectionDrops)
|
||||
assert.Equal(t, time.Duration(0), metrics.AverageLatency)
|
||||
assert.True(t, metrics.LastFrameTime.IsZero())
|
||||
|
||||
// Test field assignment
|
||||
metrics.FramesReceived = 100
|
||||
metrics.FramesDropped = 5
|
||||
metrics.BytesProcessed = 1024
|
||||
metrics.ConnectionDrops = 2
|
||||
metrics.AverageLatency = 10 * time.Millisecond
|
||||
metrics.LastFrameTime = time.Now()
|
||||
|
||||
// Verify assignments
|
||||
assert.Equal(t, int64(100), metrics.FramesReceived)
|
||||
assert.Equal(t, int64(5), metrics.FramesDropped)
|
||||
assert.Equal(t, int64(1024), metrics.BytesProcessed)
|
||||
assert.Equal(t, int64(2), metrics.ConnectionDrops)
|
||||
assert.Equal(t, 10*time.Millisecond, metrics.AverageLatency)
|
||||
assert.False(t, metrics.LastFrameTime.IsZero())
|
||||
}
|
||||
|
||||
// BenchmarkAudioOutputManager benchmarks the AudioOutputManager operations
|
||||
func BenchmarkAudioOutputManager(b *testing.B) {
|
||||
b.Run("Start", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
manager := NewAudioOutputManager()
|
||||
manager.Start()
|
||||
manager.Stop()
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("IsRunning", func(b *testing.B) {
|
||||
manager := NewAudioOutputManager()
|
||||
manager.Start()
|
||||
defer manager.Stop()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
manager.IsRunning()
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("GetMetrics", func(b *testing.B) {
|
||||
manager := NewAudioOutputManager()
|
||||
manager.Start()
|
||||
defer manager.Stop()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
manager.GetMetrics()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
@ -18,15 +18,12 @@ import (
|
|||
|
||||
// AudioOutputStreamer manages high-performance audio output streaming
|
||||
type AudioOutputStreamer struct {
|
||||
// Atomic int64 fields MUST be first for ARM32 alignment (8-byte alignment required)
|
||||
// Performance metrics (atomic operations for thread safety)
|
||||
processedFrames int64 // Total processed frames counter (atomic)
|
||||
droppedFrames int64 // Dropped frames counter (atomic)
|
||||
processingTime int64 // Average processing time in nanoseconds (atomic)
|
||||
lastStatsTime int64 // Last statistics update time (atomic)
|
||||
|
||||
// Other fields after atomic int64 fields
|
||||
sampleRate int32 // Sample every N frames (default: 10)
|
||||
|
||||
client *AudioOutputClient
|
||||
bufferPool *AudioBufferPool
|
||||
ctx context.Context
|
||||
|
|
@ -73,7 +70,6 @@ func NewAudioOutputStreamer() (*AudioOutputStreamer, error) {
|
|||
processingChan: make(chan []byte, GetConfig().ChannelBufferSize), // Large buffer for smooth processing
|
||||
statsInterval: GetConfig().StatsUpdateInterval, // Statistics interval from config
|
||||
lastStatsTime: time.Now().UnixNano(),
|
||||
sampleRate: 10, // Update metrics every 10 frames to reduce atomic ops
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
@ -112,9 +108,6 @@ func (s *AudioOutputStreamer) Stop() {
|
|||
s.running = false
|
||||
s.cancel()
|
||||
|
||||
// Flush any pending sampled metrics before stopping
|
||||
s.flushPendingMetrics()
|
||||
|
||||
// Close processing channel to signal goroutines (only if not already closed)
|
||||
if !s.chanClosed {
|
||||
close(s.processingChan)
|
||||
|
|
@ -132,14 +125,9 @@ func (s *AudioOutputStreamer) Stop() {
|
|||
func (s *AudioOutputStreamer) streamLoop() {
|
||||
defer s.wg.Done()
|
||||
|
||||
// Only pin to OS thread for high-throughput scenarios to reduce scheduler interference
|
||||
config := GetConfig()
|
||||
useThreadOptimizations := config.MaxAudioProcessorWorkers > 8
|
||||
|
||||
if useThreadOptimizations {
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
}
|
||||
// Pin goroutine to OS thread for consistent performance
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
// Adaptive timing for frame reading
|
||||
frameInterval := time.Duration(GetConfig().OutputStreamingFrameIntervalMS) * time.Millisecond // 50 FPS base rate
|
||||
|
|
@ -200,15 +188,19 @@ func (s *AudioOutputStreamer) streamLoop() {
|
|||
func (s *AudioOutputStreamer) processingLoop() {
|
||||
defer s.wg.Done()
|
||||
|
||||
// Only use thread optimizations for high-throughput scenarios
|
||||
config := GetConfig()
|
||||
useThreadOptimizations := config.MaxAudioProcessorWorkers > 8
|
||||
// Pin goroutine to OS thread for consistent performance
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
if useThreadOptimizations {
|
||||
// Pin goroutine to OS thread for consistent performance
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
// Set high priority for audio output processing
|
||||
if err := SetAudioThreadPriority(); err != nil {
|
||||
getOutputStreamingLogger().Warn().Err(err).Msg("Failed to set audio output processing priority")
|
||||
}
|
||||
defer func() {
|
||||
if err := ResetThreadPriority(); err != nil {
|
||||
getOutputStreamingLogger().Warn().Err(err).Msg("Failed to reset thread priority")
|
||||
}
|
||||
}()
|
||||
|
||||
for frameData := range s.processingChan {
|
||||
// Process frame and return buffer to pool after processing
|
||||
|
|
@ -217,23 +209,15 @@ func (s *AudioOutputStreamer) processingLoop() {
|
|||
|
||||
if _, err := s.client.ReceiveFrame(); err != nil {
|
||||
if s.client.IsConnected() {
|
||||
// Sample logging to reduce overhead - log every 50th error
|
||||
if atomic.LoadInt64(&s.droppedFrames)%50 == 0 && getOutputStreamingLogger().GetLevel() <= zerolog.WarnLevel {
|
||||
getOutputStreamingLogger().Warn().Err(err).Msg("Error reading audio frame from output server")
|
||||
}
|
||||
s.recordFrameDropped()
|
||||
getOutputStreamingLogger().Warn().Err(err).Msg("Error reading audio frame from output server")
|
||||
atomic.AddInt64(&s.droppedFrames, 1)
|
||||
}
|
||||
// Try to reconnect if disconnected
|
||||
if !s.client.IsConnected() {
|
||||
if err := s.client.Connect(); err != nil {
|
||||
// Only log reconnection failures if warn level enabled
|
||||
if getOutputStreamingLogger().GetLevel() <= zerolog.WarnLevel {
|
||||
getOutputStreamingLogger().Warn().Err(err).Msg("Failed to reconnect")
|
||||
}
|
||||
getOutputStreamingLogger().Warn().Err(err).Msg("Failed to reconnect")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
s.recordFrameProcessed()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
|
@ -274,23 +258,8 @@ func (s *AudioOutputStreamer) reportStatistics() {
|
|||
}
|
||||
}
|
||||
|
||||
// recordFrameProcessed records a processed frame with sampling optimization
|
||||
func (s *AudioOutputStreamer) recordFrameProcessed() {
|
||||
}
|
||||
|
||||
// recordFrameDropped records a dropped frame with sampling optimization
|
||||
func (s *AudioOutputStreamer) recordFrameDropped() {
|
||||
}
|
||||
|
||||
// flushPendingMetrics flushes any pending sampled metrics to atomic counters
|
||||
func (s *AudioOutputStreamer) flushPendingMetrics() {
|
||||
}
|
||||
|
||||
// GetStats returns streaming statistics with pending metrics flushed
|
||||
// GetStats returns streaming statistics
|
||||
func (s *AudioOutputStreamer) GetStats() (processed, dropped int64, avgProcessingTime time.Duration) {
|
||||
// Flush pending metrics for accurate reading
|
||||
s.flushPendingMetrics()
|
||||
|
||||
processed = atomic.LoadInt64(&s.processedFrames)
|
||||
dropped = atomic.LoadInt64(&s.droppedFrames)
|
||||
processingTimeNs := atomic.LoadInt64(&s.processingTime)
|
||||
|
|
@ -300,9 +269,6 @@ func (s *AudioOutputStreamer) GetStats() (processed, dropped int64, avgProcessin
|
|||
|
||||
// GetDetailedStats returns comprehensive streaming statistics
|
||||
func (s *AudioOutputStreamer) GetDetailedStats() map[string]interface{} {
|
||||
// Flush pending metrics for accurate reading
|
||||
s.flushPendingMetrics()
|
||||
|
||||
processed := atomic.LoadInt64(&s.processedFrames)
|
||||
dropped := atomic.LoadInt64(&s.droppedFrames)
|
||||
processingTime := atomic.LoadInt64(&s.processingTime)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,341 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestAudioOutputStreamer tests the AudioOutputStreamer component
|
||||
func TestAudioOutputStreamer(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
testFunc func(t *testing.T)
|
||||
}{
|
||||
{"NewAudioOutputStreamer", testNewAudioOutputStreamer},
|
||||
{"Start", testAudioOutputStreamerStart},
|
||||
{"Stop", testAudioOutputStreamerStop},
|
||||
{"StartStop", testAudioOutputStreamerStartStop},
|
||||
{"GetStats", testAudioOutputStreamerGetStats},
|
||||
{"GetDetailedStats", testAudioOutputStreamerGetDetailedStats},
|
||||
{"UpdateBatchSize", testAudioOutputStreamerUpdateBatchSize},
|
||||
{"ReportLatency", testAudioOutputStreamerReportLatency},
|
||||
{"ConcurrentOperations", testAudioOutputStreamerConcurrent},
|
||||
{"MultipleStarts", testAudioOutputStreamerMultipleStarts},
|
||||
{"MultipleStops", testAudioOutputStreamerMultipleStops},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.testFunc(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testNewAudioOutputStreamer(t *testing.T) {
|
||||
streamer, err := NewAudioOutputStreamer()
|
||||
if err != nil {
|
||||
// If creation fails due to missing dependencies, skip the test
|
||||
t.Skipf("Skipping test due to missing dependencies: %v", err)
|
||||
return
|
||||
}
|
||||
require.NotNil(t, streamer)
|
||||
|
||||
// Test initial state
|
||||
processed, dropped, avgTime := streamer.GetStats()
|
||||
assert.GreaterOrEqual(t, processed, int64(0))
|
||||
assert.GreaterOrEqual(t, dropped, int64(0))
|
||||
assert.GreaterOrEqual(t, avgTime, time.Duration(0))
|
||||
|
||||
// Cleanup
|
||||
streamer.Stop()
|
||||
}
|
||||
|
||||
func testAudioOutputStreamerStart(t *testing.T) {
|
||||
streamer, err := NewAudioOutputStreamer()
|
||||
if err != nil {
|
||||
t.Skipf("Skipping test due to missing dependencies: %v", err)
|
||||
return
|
||||
}
|
||||
require.NotNil(t, streamer)
|
||||
|
||||
// Test start
|
||||
err = streamer.Start()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Cleanup
|
||||
streamer.Stop()
|
||||
}
|
||||
|
||||
func testAudioOutputStreamerStop(t *testing.T) {
|
||||
streamer, err := NewAudioOutputStreamer()
|
||||
if err != nil {
|
||||
t.Skipf("Skipping test due to missing dependencies: %v", err)
|
||||
return
|
||||
}
|
||||
require.NotNil(t, streamer)
|
||||
|
||||
// Start first
|
||||
err = streamer.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test stop
|
||||
streamer.Stop()
|
||||
|
||||
// Multiple stops should be safe
|
||||
streamer.Stop()
|
||||
streamer.Stop()
|
||||
}
|
||||
|
||||
func testAudioOutputStreamerStartStop(t *testing.T) {
|
||||
streamer, err := NewAudioOutputStreamer()
|
||||
if err != nil {
|
||||
t.Skipf("Skipping test due to missing dependencies: %v", err)
|
||||
return
|
||||
}
|
||||
require.NotNil(t, streamer)
|
||||
|
||||
// Test multiple start/stop cycles
|
||||
for i := 0; i < 3; i++ {
|
||||
// Start
|
||||
err = streamer.Start()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Stop
|
||||
streamer.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
func testAudioOutputStreamerGetStats(t *testing.T) {
|
||||
streamer, err := NewAudioOutputStreamer()
|
||||
if err != nil {
|
||||
t.Skipf("Skipping test due to missing dependencies: %v", err)
|
||||
return
|
||||
}
|
||||
require.NotNil(t, streamer)
|
||||
|
||||
// Test stats when not running
|
||||
processed, dropped, avgTime := streamer.GetStats()
|
||||
assert.Equal(t, int64(0), processed)
|
||||
assert.Equal(t, int64(0), dropped)
|
||||
assert.GreaterOrEqual(t, avgTime, time.Duration(0))
|
||||
|
||||
// Start and test stats
|
||||
err = streamer.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
processed, dropped, avgTime = streamer.GetStats()
|
||||
assert.GreaterOrEqual(t, processed, int64(0))
|
||||
assert.GreaterOrEqual(t, dropped, int64(0))
|
||||
assert.GreaterOrEqual(t, avgTime, time.Duration(0))
|
||||
|
||||
// Cleanup
|
||||
streamer.Stop()
|
||||
}
|
||||
|
||||
func testAudioOutputStreamerGetDetailedStats(t *testing.T) {
|
||||
streamer, err := NewAudioOutputStreamer()
|
||||
if err != nil {
|
||||
t.Skipf("Skipping test due to missing dependencies: %v", err)
|
||||
return
|
||||
}
|
||||
require.NotNil(t, streamer)
|
||||
|
||||
// Test detailed stats
|
||||
stats := streamer.GetDetailedStats()
|
||||
assert.NotNil(t, stats)
|
||||
assert.Contains(t, stats, "processed_frames")
|
||||
assert.Contains(t, stats, "dropped_frames")
|
||||
assert.Contains(t, stats, "batch_size")
|
||||
assert.Contains(t, stats, "connected")
|
||||
assert.Equal(t, int64(0), stats["processed_frames"])
|
||||
assert.Equal(t, int64(0), stats["dropped_frames"])
|
||||
|
||||
// Start and test detailed stats
|
||||
err = streamer.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
stats = streamer.GetDetailedStats()
|
||||
assert.NotNil(t, stats)
|
||||
assert.Contains(t, stats, "processed_frames")
|
||||
assert.Contains(t, stats, "dropped_frames")
|
||||
|
||||
// Cleanup
|
||||
streamer.Stop()
|
||||
}
|
||||
|
||||
func testAudioOutputStreamerUpdateBatchSize(t *testing.T) {
|
||||
streamer, err := NewAudioOutputStreamer()
|
||||
if err != nil {
|
||||
t.Skipf("Skipping test due to missing dependencies: %v", err)
|
||||
return
|
||||
}
|
||||
require.NotNil(t, streamer)
|
||||
|
||||
// Test updating batch size (no parameters, uses adaptive manager)
|
||||
streamer.UpdateBatchSize()
|
||||
streamer.UpdateBatchSize()
|
||||
streamer.UpdateBatchSize()
|
||||
|
||||
// Cleanup
|
||||
streamer.Stop()
|
||||
}
|
||||
|
||||
func testAudioOutputStreamerReportLatency(t *testing.T) {
|
||||
streamer, err := NewAudioOutputStreamer()
|
||||
if err != nil {
|
||||
t.Skipf("Skipping test due to missing dependencies: %v", err)
|
||||
return
|
||||
}
|
||||
require.NotNil(t, streamer)
|
||||
|
||||
// Test reporting latency
|
||||
streamer.ReportLatency(10 * time.Millisecond)
|
||||
streamer.ReportLatency(5 * time.Millisecond)
|
||||
streamer.ReportLatency(15 * time.Millisecond)
|
||||
|
||||
// Cleanup
|
||||
streamer.Stop()
|
||||
}
|
||||
|
||||
func testAudioOutputStreamerConcurrent(t *testing.T) {
|
||||
streamer, err := NewAudioOutputStreamer()
|
||||
if err != nil {
|
||||
t.Skipf("Skipping test due to missing dependencies: %v", err)
|
||||
return
|
||||
}
|
||||
require.NotNil(t, streamer)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
const numGoroutines = 10
|
||||
|
||||
// Test concurrent starts
|
||||
wg.Add(numGoroutines)
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
streamer.Start()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Test concurrent operations
|
||||
wg.Add(numGoroutines * 3)
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
streamer.GetStats()
|
||||
}()
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
streamer.UpdateBatchSize()
|
||||
}()
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
streamer.ReportLatency(10 * time.Millisecond)
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Test concurrent stops
|
||||
wg.Add(numGoroutines)
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
streamer.Stop()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func testAudioOutputStreamerMultipleStarts(t *testing.T) {
|
||||
streamer, err := NewAudioOutputStreamer()
|
||||
if err != nil {
|
||||
t.Skipf("Skipping test due to missing dependencies: %v", err)
|
||||
return
|
||||
}
|
||||
require.NotNil(t, streamer)
|
||||
|
||||
// First start should succeed
|
||||
err = streamer.Start()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Subsequent starts should return error
|
||||
err = streamer.Start()
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "already running")
|
||||
|
||||
err = streamer.Start()
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "already running")
|
||||
|
||||
// Cleanup
|
||||
streamer.Stop()
|
||||
}
|
||||
|
||||
func testAudioOutputStreamerMultipleStops(t *testing.T) {
|
||||
streamer, err := NewAudioOutputStreamer()
|
||||
if err != nil {
|
||||
t.Skipf("Skipping test due to missing dependencies: %v", err)
|
||||
return
|
||||
}
|
||||
require.NotNil(t, streamer)
|
||||
|
||||
// Start first
|
||||
err = streamer.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Multiple stops should be safe
|
||||
streamer.Stop()
|
||||
streamer.Stop()
|
||||
streamer.Stop()
|
||||
}
|
||||
|
||||
// BenchmarkAudioOutputStreamer benchmarks the AudioOutputStreamer operations
|
||||
func BenchmarkAudioOutputStreamer(b *testing.B) {
|
||||
b.Run("GetStats", func(b *testing.B) {
|
||||
streamer, err := NewAudioOutputStreamer()
|
||||
if err != nil {
|
||||
b.Skipf("Skipping benchmark due to missing dependencies: %v", err)
|
||||
return
|
||||
}
|
||||
defer streamer.Stop()
|
||||
|
||||
streamer.Start()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
streamer.GetStats()
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("UpdateBatchSize", func(b *testing.B) {
|
||||
streamer, err := NewAudioOutputStreamer()
|
||||
if err != nil {
|
||||
b.Skipf("Skipping benchmark due to missing dependencies: %v", err)
|
||||
return
|
||||
}
|
||||
defer streamer.Stop()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
streamer.UpdateBatchSize()
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("ReportLatency", func(b *testing.B) {
|
||||
streamer, err := NewAudioOutputStreamer()
|
||||
if err != nil {
|
||||
b.Skipf("Skipping benchmark due to missing dependencies: %v", err)
|
||||
return
|
||||
}
|
||||
defer streamer.Stop()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
streamer.ReportLatency(10 * time.Millisecond)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
@ -0,0 +1,393 @@
|
|||
//go:build cgo
|
||||
// +build cgo
|
||||
|
||||
package audio
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestPerformanceCriticalPaths tests the most frequently executed code paths
|
||||
// to ensure they remain efficient and don't interfere with KVM functionality
|
||||
func TestPerformanceCriticalPaths(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping performance tests in short mode")
|
||||
}
|
||||
|
||||
// Initialize validation cache for performance testing
|
||||
InitValidationCache()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
testFunc func(t *testing.T)
|
||||
}{
|
||||
{"AudioFrameProcessingLatency", testAudioFrameProcessingLatency},
|
||||
{"MetricsUpdateOverhead", testMetricsUpdateOverhead},
|
||||
{"ConfigurationAccessSpeed", testConfigurationAccessSpeed},
|
||||
{"ValidationFunctionSpeed", testValidationFunctionSpeed},
|
||||
{"MemoryAllocationPatterns", testMemoryAllocationPatterns},
|
||||
{"ConcurrentAccessPerformance", testConcurrentAccessPerformance},
|
||||
{"BufferPoolEfficiency", testBufferPoolEfficiency},
|
||||
{"AtomicOperationOverhead", testAtomicOperationOverhead},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.testFunc(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// testAudioFrameProcessingLatency tests the latency of audio frame processing
|
||||
// This is the most critical path that must not interfere with KVM
|
||||
func testAudioFrameProcessingLatency(t *testing.T) {
|
||||
const (
|
||||
frameCount = 1000
|
||||
maxLatencyPerFrame = 100 * time.Microsecond // Very strict requirement
|
||||
)
|
||||
|
||||
// Create test frame data
|
||||
frameData := make([]byte, 1920) // Typical frame size
|
||||
for i := range frameData {
|
||||
frameData[i] = byte(i % 256)
|
||||
}
|
||||
|
||||
// Measure frame processing latency
|
||||
start := time.Now()
|
||||
for i := 0; i < frameCount; i++ {
|
||||
// Simulate the critical path: validation + metrics update
|
||||
err := ValidateAudioFrame(frameData)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Record frame received (atomic operation)
|
||||
RecordFrameReceived(len(frameData))
|
||||
}
|
||||
elapsed := time.Since(start)
|
||||
|
||||
avgLatencyPerFrame := elapsed / frameCount
|
||||
t.Logf("Average frame processing latency: %v", avgLatencyPerFrame)
|
||||
|
||||
// Ensure frame processing is fast enough to not interfere with KVM
|
||||
assert.Less(t, avgLatencyPerFrame, maxLatencyPerFrame,
|
||||
"Frame processing latency %v exceeds maximum %v - may interfere with KVM",
|
||||
avgLatencyPerFrame, maxLatencyPerFrame)
|
||||
|
||||
// Ensure total processing time is reasonable
|
||||
maxTotalTime := 50 * time.Millisecond
|
||||
assert.Less(t, elapsed, maxTotalTime,
|
||||
"Total processing time %v exceeds maximum %v", elapsed, maxTotalTime)
|
||||
}
|
||||
|
||||
// testMetricsUpdateOverhead tests the overhead of metrics updates
|
||||
func testMetricsUpdateOverhead(t *testing.T) {
|
||||
const iterations = 10000
|
||||
|
||||
// Test RecordFrameReceived performance
|
||||
start := time.Now()
|
||||
for i := 0; i < iterations; i++ {
|
||||
RecordFrameReceived(1024)
|
||||
}
|
||||
recordLatency := time.Since(start) / iterations
|
||||
|
||||
// Test GetAudioMetrics performance
|
||||
start = time.Now()
|
||||
for i := 0; i < iterations; i++ {
|
||||
_ = GetAudioMetrics()
|
||||
}
|
||||
getLatency := time.Since(start) / iterations
|
||||
|
||||
t.Logf("RecordFrameReceived latency: %v", recordLatency)
|
||||
t.Logf("GetAudioMetrics latency: %v", getLatency)
|
||||
|
||||
// Metrics operations should be optimized for JetKVM's ARM Cortex-A7 @ 1GHz
|
||||
// With 256MB RAM, we need to be conservative with performance expectations
|
||||
assert.Less(t, recordLatency, 50*time.Microsecond, "RecordFrameReceived too slow")
|
||||
assert.Less(t, getLatency, 20*time.Microsecond, "GetAudioMetrics too slow")
|
||||
}
|
||||
|
||||
// testConfigurationAccessSpeed tests configuration access performance
|
||||
func testConfigurationAccessSpeed(t *testing.T) {
|
||||
const iterations = 10000
|
||||
|
||||
// Test GetAudioConfig performance
|
||||
start := time.Now()
|
||||
for i := 0; i < iterations; i++ {
|
||||
_ = GetAudioConfig()
|
||||
}
|
||||
configLatency := time.Since(start) / iterations
|
||||
|
||||
// Test GetConfig performance
|
||||
start = time.Now()
|
||||
for i := 0; i < iterations; i++ {
|
||||
_ = GetConfig()
|
||||
}
|
||||
constantsLatency := time.Since(start) / iterations
|
||||
|
||||
t.Logf("GetAudioConfig latency: %v", configLatency)
|
||||
t.Logf("GetConfig latency: %v", constantsLatency)
|
||||
|
||||
// Configuration access should be very fast
|
||||
assert.Less(t, configLatency, 100*time.Nanosecond, "GetAudioConfig too slow")
|
||||
assert.Less(t, constantsLatency, 100*time.Nanosecond, "GetConfig too slow")
|
||||
}
|
||||
|
||||
// testValidationFunctionSpeed tests validation function performance
|
||||
func testValidationFunctionSpeed(t *testing.T) {
|
||||
const iterations = 10000
|
||||
frameData := make([]byte, 1920)
|
||||
|
||||
// Test ValidateAudioFrame (most critical)
|
||||
start := time.Now()
|
||||
for i := 0; i < iterations; i++ {
|
||||
err := ValidateAudioFrame(frameData)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
fastValidationLatency := time.Since(start) / iterations
|
||||
|
||||
// Test ValidateAudioQuality
|
||||
start = time.Now()
|
||||
for i := 0; i < iterations; i++ {
|
||||
err := ValidateAudioQuality(AudioQualityMedium)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
qualityValidationLatency := time.Since(start) / iterations
|
||||
|
||||
// Test ValidateBufferSize
|
||||
start = time.Now()
|
||||
for i := 0; i < iterations; i++ {
|
||||
err := ValidateBufferSize(1024)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
bufferValidationLatency := time.Since(start) / iterations
|
||||
|
||||
t.Logf("ValidateAudioFrame latency: %v", fastValidationLatency)
|
||||
t.Logf("ValidateAudioQuality latency: %v", qualityValidationLatency)
|
||||
t.Logf("ValidateBufferSize latency: %v", bufferValidationLatency)
|
||||
|
||||
// Validation functions optimized for ARM Cortex-A7 single core @ 1GHz
|
||||
// Conservative thresholds to ensure KVM functionality isn't impacted
|
||||
assert.Less(t, fastValidationLatency, 100*time.Microsecond, "ValidateAudioFrame too slow")
|
||||
assert.Less(t, qualityValidationLatency, 50*time.Microsecond, "ValidateAudioQuality too slow")
|
||||
assert.Less(t, bufferValidationLatency, 50*time.Microsecond, "ValidateBufferSize too slow")
|
||||
}
|
||||
|
||||
// testMemoryAllocationPatterns tests memory allocation efficiency
|
||||
func testMemoryAllocationPatterns(t *testing.T) {
|
||||
// Test that frequent operations don't cause excessive allocations
|
||||
var m1, m2 runtime.MemStats
|
||||
runtime.GC()
|
||||
runtime.ReadMemStats(&m1)
|
||||
|
||||
// Perform operations that should minimize allocations
|
||||
for i := 0; i < 1000; i++ {
|
||||
_ = GetAudioConfig()
|
||||
_ = GetAudioMetrics()
|
||||
RecordFrameReceived(1024)
|
||||
_ = ValidateAudioQuality(AudioQualityMedium)
|
||||
}
|
||||
|
||||
runtime.GC()
|
||||
runtime.ReadMemStats(&m2)
|
||||
|
||||
allocations := m2.Mallocs - m1.Mallocs
|
||||
t.Logf("Memory allocations for 1000 operations: %d", allocations)
|
||||
|
||||
// Should have minimal allocations for these hot path operations
|
||||
assert.Less(t, allocations, uint64(100), "Too many memory allocations in hot path")
|
||||
}
|
||||
|
||||
// testConcurrentAccessPerformance tests performance under concurrent access
|
||||
func testConcurrentAccessPerformance(t *testing.T) {
|
||||
const (
|
||||
numGoroutines = 10
|
||||
operationsPerGoroutine = 1000
|
||||
)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
start := time.Now()
|
||||
|
||||
// Launch concurrent goroutines performing audio operations
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
frameData := make([]byte, 1920)
|
||||
|
||||
for j := 0; j < operationsPerGoroutine; j++ {
|
||||
// Simulate concurrent audio processing
|
||||
_ = ValidateAudioFrame(frameData)
|
||||
RecordFrameReceived(len(frameData))
|
||||
_ = GetAudioMetrics()
|
||||
_ = GetAudioConfig()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
elapsed := time.Since(start)
|
||||
|
||||
totalOperations := numGoroutines * operationsPerGoroutine * 4 // 4 operations per iteration
|
||||
avgLatency := elapsed / time.Duration(totalOperations)
|
||||
|
||||
t.Logf("Concurrent access: %d operations in %v (avg: %v per operation)",
|
||||
totalOperations, elapsed, avgLatency)
|
||||
|
||||
// Concurrent access should not significantly degrade performance
|
||||
assert.Less(t, avgLatency, 1*time.Microsecond, "Concurrent access too slow")
|
||||
}
|
||||
|
||||
// testBufferPoolEfficiency tests buffer pool performance
|
||||
func testBufferPoolEfficiency(t *testing.T) {
|
||||
// Test buffer acquisition and release performance
|
||||
const iterations = 1000
|
||||
|
||||
start := time.Now()
|
||||
for i := 0; i < iterations; i++ {
|
||||
// Simulate buffer pool usage (if available)
|
||||
buffer := make([]byte, 1920) // Fallback to allocation
|
||||
_ = buffer
|
||||
// In real implementation, this would be pool.Get() and pool.Put()
|
||||
}
|
||||
elapsed := time.Since(start)
|
||||
|
||||
avgLatency := elapsed / iterations
|
||||
t.Logf("Buffer allocation latency: %v per buffer", avgLatency)
|
||||
|
||||
// Buffer operations should be fast
|
||||
assert.Less(t, avgLatency, 1*time.Microsecond, "Buffer allocation too slow")
|
||||
}
|
||||
|
||||
// testAtomicOperationOverhead tests atomic operation performance
|
||||
func testAtomicOperationOverhead(t *testing.T) {
|
||||
const iterations = 10000
|
||||
var counter int64
|
||||
|
||||
// Test atomic increment performance
|
||||
start := time.Now()
|
||||
for i := 0; i < iterations; i++ {
|
||||
atomic.AddInt64(&counter, 1)
|
||||
}
|
||||
atomicLatency := time.Since(start) / iterations
|
||||
|
||||
// Test atomic load performance
|
||||
start = time.Now()
|
||||
for i := 0; i < iterations; i++ {
|
||||
_ = atomic.LoadInt64(&counter)
|
||||
}
|
||||
loadLatency := time.Since(start) / iterations
|
||||
|
||||
t.Logf("Atomic add latency: %v", atomicLatency)
|
||||
t.Logf("Atomic load latency: %v", loadLatency)
|
||||
|
||||
// Atomic operations on ARM Cortex-A7 - realistic expectations
|
||||
assert.Less(t, atomicLatency, 1*time.Microsecond, "Atomic add too slow")
|
||||
assert.Less(t, loadLatency, 500*time.Nanosecond, "Atomic load too slow")
|
||||
}
|
||||
|
||||
// TestRegressionDetection tests for performance regressions
|
||||
func TestRegressionDetection(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping regression test in short mode")
|
||||
}
|
||||
|
||||
// Baseline performance expectations
|
||||
baselines := map[string]time.Duration{
|
||||
"frame_processing": 100 * time.Microsecond,
|
||||
"metrics_update": 500 * time.Nanosecond,
|
||||
"config_access": 100 * time.Nanosecond,
|
||||
"validation": 200 * time.Nanosecond,
|
||||
}
|
||||
|
||||
// Test frame processing
|
||||
frameData := make([]byte, 1920)
|
||||
start := time.Now()
|
||||
for i := 0; i < 100; i++ {
|
||||
_ = ValidateAudioFrame(frameData)
|
||||
RecordFrameReceived(len(frameData))
|
||||
}
|
||||
frameProcessingTime := time.Since(start) / 100
|
||||
|
||||
// Test metrics update
|
||||
start = time.Now()
|
||||
for i := 0; i < 1000; i++ {
|
||||
RecordFrameReceived(1024)
|
||||
}
|
||||
metricsUpdateTime := time.Since(start) / 1000
|
||||
|
||||
// Test config access
|
||||
start = time.Now()
|
||||
for i := 0; i < 1000; i++ {
|
||||
_ = GetAudioConfig()
|
||||
}
|
||||
configAccessTime := time.Since(start) / 1000
|
||||
|
||||
// Test validation
|
||||
start = time.Now()
|
||||
for i := 0; i < 1000; i++ {
|
||||
_ = ValidateAudioQuality(AudioQualityMedium)
|
||||
}
|
||||
validationTime := time.Since(start) / 1000
|
||||
|
||||
// Performance regression thresholds for JetKVM hardware:
|
||||
// - ARM Cortex-A7 @ 1GHz single core
|
||||
// - 256MB DDR3L RAM
|
||||
// - Must not interfere with primary KVM functionality
|
||||
assert.Less(t, frameProcessingTime, baselines["frame_processing"],
|
||||
"Frame processing regression: %v > %v", frameProcessingTime, baselines["frame_processing"])
|
||||
assert.Less(t, metricsUpdateTime, 100*time.Microsecond,
|
||||
"Metrics update regression: %v > 100μs", metricsUpdateTime)
|
||||
assert.Less(t, configAccessTime, 10*time.Microsecond,
|
||||
"Config access regression: %v > 10μs", configAccessTime)
|
||||
assert.Less(t, validationTime, 10*time.Microsecond,
|
||||
"Validation regression: %v > 10μs", validationTime)
|
||||
|
||||
t.Logf("Performance results:")
|
||||
t.Logf(" Frame processing: %v (baseline: %v)", frameProcessingTime, baselines["frame_processing"])
|
||||
t.Logf(" Metrics update: %v (baseline: %v)", metricsUpdateTime, baselines["metrics_update"])
|
||||
t.Logf(" Config access: %v (baseline: %v)", configAccessTime, baselines["config_access"])
|
||||
t.Logf(" Validation: %v (baseline: %v)", validationTime, baselines["validation"])
|
||||
}
|
||||
|
||||
// TestMemoryLeakDetection tests for memory leaks in critical paths
|
||||
func TestMemoryLeakDetection(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping memory leak test in short mode")
|
||||
}
|
||||
|
||||
var m1, m2 runtime.MemStats
|
||||
|
||||
// Baseline measurement
|
||||
runtime.GC()
|
||||
runtime.ReadMemStats(&m1)
|
||||
|
||||
// Perform many operations that should not leak memory
|
||||
for cycle := 0; cycle < 10; cycle++ {
|
||||
for i := 0; i < 1000; i++ {
|
||||
frameData := make([]byte, 1920)
|
||||
_ = ValidateAudioFrame(frameData)
|
||||
RecordFrameReceived(len(frameData))
|
||||
_ = GetAudioMetrics()
|
||||
_ = GetAudioConfig()
|
||||
}
|
||||
// Force garbage collection between cycles
|
||||
runtime.GC()
|
||||
}
|
||||
|
||||
// Final measurement
|
||||
runtime.GC()
|
||||
runtime.ReadMemStats(&m2)
|
||||
|
||||
memoryGrowth := int64(m2.Alloc) - int64(m1.Alloc)
|
||||
t.Logf("Memory growth after 10,000 operations: %d bytes", memoryGrowth)
|
||||
|
||||
// Memory growth should be minimal (less than 1MB)
|
||||
assert.Less(t, memoryGrowth, int64(1024*1024),
|
||||
"Excessive memory growth detected: %d bytes", memoryGrowth)
|
||||
}
|
||||
|
|
@ -0,0 +1,168 @@
|
|||
//go:build linux
|
||||
|
||||
package audio
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/jetkvm/kvm/internal/logging"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// SchedParam represents scheduling parameters for Linux
|
||||
type SchedParam struct {
|
||||
Priority int32
|
||||
}
|
||||
|
||||
// getPriorityConstants returns priority levels from centralized config
|
||||
func getPriorityConstants() (audioHigh, audioMedium, audioLow, normal int) {
|
||||
config := GetConfig()
|
||||
return config.AudioHighPriority, config.AudioMediumPriority, config.AudioLowPriority, config.NormalPriority
|
||||
}
|
||||
|
||||
// getSchedulingPolicies returns scheduling policies from centralized config
|
||||
func getSchedulingPolicies() (schedNormal, schedFIFO, schedRR int) {
|
||||
config := GetConfig()
|
||||
return config.SchedNormal, config.SchedFIFO, config.SchedRR
|
||||
}
|
||||
|
||||
// PriorityScheduler manages thread priorities for audio processing
|
||||
type PriorityScheduler struct {
|
||||
logger zerolog.Logger
|
||||
enabled bool
|
||||
}
|
||||
|
||||
// NewPriorityScheduler creates a new priority scheduler
|
||||
func NewPriorityScheduler() *PriorityScheduler {
|
||||
return &PriorityScheduler{
|
||||
logger: logging.GetDefaultLogger().With().Str("component", "priority-scheduler").Logger(),
|
||||
enabled: true,
|
||||
}
|
||||
}
|
||||
|
||||
// SetThreadPriority sets the priority of the current thread
|
||||
func (ps *PriorityScheduler) SetThreadPriority(priority int, policy int) error {
|
||||
if !ps.enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Lock to OS thread to ensure we're setting priority for the right thread
|
||||
runtime.LockOSThread()
|
||||
|
||||
// Get current thread ID
|
||||
tid := syscall.Gettid()
|
||||
|
||||
// Set scheduling parameters
|
||||
param := &SchedParam{
|
||||
Priority: int32(priority),
|
||||
}
|
||||
|
||||
// Use syscall to set scheduler
|
||||
_, _, errno := syscall.Syscall(syscall.SYS_SCHED_SETSCHEDULER,
|
||||
uintptr(tid),
|
||||
uintptr(policy),
|
||||
uintptr(unsafe.Pointer(param)))
|
||||
|
||||
if errno != 0 {
|
||||
// If we can't set real-time priority, try nice value instead
|
||||
schedNormal, _, _ := getSchedulingPolicies()
|
||||
if policy != schedNormal {
|
||||
ps.logger.Warn().Int("errno", int(errno)).Msg("failed to set real-time priority, falling back to nice")
|
||||
return ps.setNicePriority(priority)
|
||||
}
|
||||
return errno
|
||||
}
|
||||
|
||||
ps.logger.Debug().Int("tid", tid).Int("priority", priority).Int("policy", policy).Msg("thread priority set")
|
||||
return nil
|
||||
}
|
||||
|
||||
// setNicePriority sets nice value as fallback when real-time scheduling is not available
|
||||
func (ps *PriorityScheduler) setNicePriority(rtPriority int) error {
|
||||
// Convert real-time priority to nice value (inverse relationship)
|
||||
// RT priority 80 -> nice -10, RT priority 40 -> nice 0
|
||||
niceValue := (40 - rtPriority) / 4
|
||||
if niceValue < GetConfig().MinNiceValue {
|
||||
niceValue = GetConfig().MinNiceValue
|
||||
}
|
||||
if niceValue > GetConfig().MaxNiceValue {
|
||||
niceValue = GetConfig().MaxNiceValue
|
||||
}
|
||||
|
||||
err := syscall.Setpriority(syscall.PRIO_PROCESS, 0, niceValue)
|
||||
if err != nil {
|
||||
ps.logger.Warn().Err(err).Int("nice", niceValue).Msg("failed to set nice priority")
|
||||
return err
|
||||
}
|
||||
|
||||
ps.logger.Debug().Int("nice", niceValue).Msg("nice priority set as fallback")
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetAudioProcessingPriority sets high priority for audio processing threads
|
||||
func (ps *PriorityScheduler) SetAudioProcessingPriority() error {
|
||||
audioHigh, _, _, _ := getPriorityConstants()
|
||||
_, schedFIFO, _ := getSchedulingPolicies()
|
||||
return ps.SetThreadPriority(audioHigh, schedFIFO)
|
||||
}
|
||||
|
||||
// SetAudioIOPriority sets medium priority for audio I/O threads
|
||||
func (ps *PriorityScheduler) SetAudioIOPriority() error {
|
||||
_, audioMedium, _, _ := getPriorityConstants()
|
||||
_, schedFIFO, _ := getSchedulingPolicies()
|
||||
return ps.SetThreadPriority(audioMedium, schedFIFO)
|
||||
}
|
||||
|
||||
// SetAudioBackgroundPriority sets low priority for background audio tasks
|
||||
func (ps *PriorityScheduler) SetAudioBackgroundPriority() error {
|
||||
_, _, audioLow, _ := getPriorityConstants()
|
||||
_, schedFIFO, _ := getSchedulingPolicies()
|
||||
return ps.SetThreadPriority(audioLow, schedFIFO)
|
||||
}
|
||||
|
||||
// ResetPriority resets thread to normal scheduling
|
||||
func (ps *PriorityScheduler) ResetPriority() error {
|
||||
_, _, _, normal := getPriorityConstants()
|
||||
schedNormal, _, _ := getSchedulingPolicies()
|
||||
return ps.SetThreadPriority(normal, schedNormal)
|
||||
}
|
||||
|
||||
// Disable disables priority scheduling (useful for testing or fallback)
|
||||
func (ps *PriorityScheduler) Disable() {
|
||||
ps.enabled = false
|
||||
ps.logger.Debug().Msg("priority scheduling disabled")
|
||||
}
|
||||
|
||||
// Enable enables priority scheduling
|
||||
func (ps *PriorityScheduler) Enable() {
|
||||
ps.enabled = true
|
||||
ps.logger.Debug().Msg("priority scheduling enabled")
|
||||
}
|
||||
|
||||
// Global priority scheduler instance
|
||||
var globalPriorityScheduler *PriorityScheduler
|
||||
|
||||
// GetPriorityScheduler returns the global priority scheduler instance
|
||||
func GetPriorityScheduler() *PriorityScheduler {
|
||||
if globalPriorityScheduler == nil {
|
||||
globalPriorityScheduler = NewPriorityScheduler()
|
||||
}
|
||||
return globalPriorityScheduler
|
||||
}
|
||||
|
||||
// SetAudioThreadPriority is a convenience function to set audio processing priority
|
||||
func SetAudioThreadPriority() error {
|
||||
return GetPriorityScheduler().SetAudioProcessingPriority()
|
||||
}
|
||||
|
||||
// SetAudioIOThreadPriority is a convenience function to set audio I/O priority
|
||||
func SetAudioIOThreadPriority() error {
|
||||
return GetPriorityScheduler().SetAudioIOPriority()
|
||||
}
|
||||
|
||||
// ResetThreadPriority is a convenience function to reset thread priority
|
||||
func ResetThreadPriority() error {
|
||||
return GetPriorityScheduler().ResetPriority()
|
||||
}
|
||||
|
|
@ -0,0 +1,362 @@
|
|||
//go:build cgo
|
||||
// +build cgo
|
||||
|
||||
package audio
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestRegressionScenarios tests critical edge cases and error conditions
|
||||
// that could cause system instability in production
|
||||
func TestRegressionScenarios(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
testFunc func(t *testing.T)
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "IPCConnectionFailure",
|
||||
testFunc: testIPCConnectionFailureRecovery,
|
||||
description: "Test IPC connection failure and recovery scenarios",
|
||||
},
|
||||
{
|
||||
name: "BufferOverflow",
|
||||
testFunc: testBufferOverflowHandling,
|
||||
description: "Test buffer overflow protection and recovery",
|
||||
},
|
||||
{
|
||||
name: "SupervisorRapidRestart",
|
||||
testFunc: testSupervisorRapidRestartScenario,
|
||||
description: "Test supervisor behavior under rapid restart conditions",
|
||||
},
|
||||
{
|
||||
name: "ConcurrentStartStop",
|
||||
testFunc: testConcurrentStartStopOperations,
|
||||
description: "Test concurrent start/stop operations for race conditions",
|
||||
},
|
||||
{
|
||||
name: "MemoryLeakPrevention",
|
||||
testFunc: testMemoryLeakPrevention,
|
||||
description: "Test memory leak prevention in long-running scenarios",
|
||||
},
|
||||
{
|
||||
name: "ConfigValidationEdgeCases",
|
||||
testFunc: testConfigValidationEdgeCases,
|
||||
description: "Test configuration validation with edge case values",
|
||||
},
|
||||
{
|
||||
name: "AtomicOperationConsistency",
|
||||
testFunc: testAtomicOperationConsistency,
|
||||
description: "Test atomic operations consistency under high concurrency",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Logf("Running regression test: %s - %s", tt.name, tt.description)
|
||||
tt.testFunc(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// testIPCConnectionFailureRecovery tests IPC connection failure scenarios
|
||||
func testIPCConnectionFailureRecovery(t *testing.T) {
|
||||
manager := NewAudioInputIPCManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
// Test start with no IPC server available (should handle gracefully)
|
||||
err := manager.Start()
|
||||
// Should not panic or crash, may return error depending on implementation
|
||||
if err != nil {
|
||||
t.Logf("Expected error when no IPC server available: %v", err)
|
||||
}
|
||||
|
||||
// Test that manager can recover after IPC becomes available
|
||||
if manager.IsRunning() {
|
||||
manager.Stop()
|
||||
}
|
||||
|
||||
// Verify clean state after failure
|
||||
assert.False(t, manager.IsRunning())
|
||||
assert.False(t, manager.IsReady())
|
||||
}
|
||||
|
||||
// testBufferOverflowHandling tests buffer overflow protection
|
||||
func testBufferOverflowHandling(t *testing.T) {
|
||||
// Test with extremely large buffer sizes
|
||||
extremelyLargeSize := 1024 * 1024 * 100 // 100MB
|
||||
err := ValidateBufferSize(extremelyLargeSize)
|
||||
assert.Error(t, err, "Should reject extremely large buffer sizes")
|
||||
|
||||
// Test with negative buffer sizes
|
||||
err = ValidateBufferSize(-1)
|
||||
assert.Error(t, err, "Should reject negative buffer sizes")
|
||||
|
||||
// Test with zero buffer size
|
||||
err = ValidateBufferSize(0)
|
||||
assert.Error(t, err, "Should reject zero buffer size")
|
||||
|
||||
// Test with maximum valid buffer size
|
||||
maxValidSize := GetConfig().SocketMaxBuffer
|
||||
err = ValidateBufferSize(int(maxValidSize))
|
||||
assert.NoError(t, err, "Should accept maximum valid buffer size")
|
||||
}
|
||||
|
||||
// testSupervisorRapidRestartScenario tests supervisor under rapid restart conditions
|
||||
func testSupervisorRapidRestartScenario(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping rapid restart test in short mode")
|
||||
}
|
||||
|
||||
supervisor := NewAudioOutputSupervisor()
|
||||
require.NotNil(t, supervisor)
|
||||
|
||||
// Perform rapid start/stop cycles to test for race conditions
|
||||
for i := 0; i < 10; i++ {
|
||||
err := supervisor.Start()
|
||||
if err != nil {
|
||||
t.Logf("Start attempt %d failed (expected in test environment): %v", i, err)
|
||||
}
|
||||
|
||||
// Very short delay to stress test
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
supervisor.Stop()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
|
||||
// Verify supervisor is in clean state after rapid cycling
|
||||
assert.False(t, supervisor.IsRunning())
|
||||
}
|
||||
|
||||
// testConcurrentStartStopOperations tests concurrent operations for race conditions
|
||||
func testConcurrentStartStopOperations(t *testing.T) {
|
||||
manager := NewAudioInputIPCManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
const numGoroutines = 10
|
||||
|
||||
// Launch multiple goroutines trying to start/stop concurrently
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(2)
|
||||
|
||||
// Start goroutine
|
||||
go func(id int) {
|
||||
defer wg.Done()
|
||||
err := manager.Start()
|
||||
if err != nil {
|
||||
t.Logf("Concurrent start %d: %v", id, err)
|
||||
}
|
||||
}(i)
|
||||
|
||||
// Stop goroutine
|
||||
go func(id int) {
|
||||
defer wg.Done()
|
||||
time.Sleep(5 * time.Millisecond) // Small delay
|
||||
manager.Stop()
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// Ensure final state is consistent
|
||||
manager.Stop() // Final cleanup
|
||||
assert.False(t, manager.IsRunning())
|
||||
}
|
||||
|
||||
// testMemoryLeakPrevention tests for memory leaks in long-running scenarios
|
||||
func testMemoryLeakPrevention(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping memory leak test in short mode")
|
||||
}
|
||||
|
||||
manager := NewAudioInputIPCManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
// Simulate long-running operation with periodic restarts
|
||||
for cycle := 0; cycle < 5; cycle++ {
|
||||
err := manager.Start()
|
||||
if err != nil {
|
||||
t.Logf("Start cycle %d failed (expected): %v", cycle, err)
|
||||
}
|
||||
|
||||
// Simulate some activity
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Get metrics to ensure they're not accumulating indefinitely
|
||||
metrics := manager.GetMetrics()
|
||||
assert.NotNil(t, metrics, "Metrics should be available")
|
||||
|
||||
manager.Stop()
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
|
||||
// Final verification
|
||||
assert.False(t, manager.IsRunning())
|
||||
}
|
||||
|
||||
// testConfigValidationEdgeCases tests configuration validation with edge cases
|
||||
func testConfigValidationEdgeCases(t *testing.T) {
|
||||
// Test sample rate edge cases
|
||||
testCases := []struct {
|
||||
sampleRate int
|
||||
channels int
|
||||
frameSize int
|
||||
shouldPass bool
|
||||
description string
|
||||
}{
|
||||
{0, 2, 960, false, "zero sample rate"},
|
||||
{-1, 2, 960, false, "negative sample rate"},
|
||||
{1, 2, 960, false, "extremely low sample rate"},
|
||||
{999999, 2, 960, false, "extremely high sample rate"},
|
||||
{48000, 0, 960, false, "zero channels"},
|
||||
{48000, -1, 960, false, "negative channels"},
|
||||
{48000, 100, 960, false, "too many channels"},
|
||||
{48000, 2, 0, false, "zero frame size"},
|
||||
{48000, 2, -1, false, "negative frame size"},
|
||||
{48000, 2, 999999, true, "extremely large frame size"},
|
||||
{48000, 2, 960, true, "valid configuration"},
|
||||
{44100, 1, 441, true, "valid mono configuration"},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
err := ValidateInputIPCConfig(tc.sampleRate, tc.channels, tc.frameSize)
|
||||
if tc.shouldPass {
|
||||
assert.NoError(t, err, "Should accept valid config: %s", tc.description)
|
||||
} else {
|
||||
assert.Error(t, err, "Should reject invalid config: %s", tc.description)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// testAtomicOperationConsistency tests atomic operations under high concurrency
|
||||
func testAtomicOperationConsistency(t *testing.T) {
|
||||
var counter int64
|
||||
var wg sync.WaitGroup
|
||||
const numGoroutines = 100
|
||||
const incrementsPerGoroutine = 1000
|
||||
|
||||
// Launch multiple goroutines performing atomic operations
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for j := 0; j < incrementsPerGoroutine; j++ {
|
||||
atomic.AddInt64(&counter, 1)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// Verify final count is correct
|
||||
expected := int64(numGoroutines * incrementsPerGoroutine)
|
||||
actual := atomic.LoadInt64(&counter)
|
||||
assert.Equal(t, expected, actual, "Atomic operations should be consistent")
|
||||
}
|
||||
|
||||
// TestErrorRecoveryScenarios tests various error recovery scenarios
|
||||
func TestErrorRecoveryScenarios(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
testFunc func(t *testing.T)
|
||||
}{
|
||||
{"NetworkConnectionLoss", testNetworkConnectionLossRecovery},
|
||||
{"ProcessCrashRecovery", testProcessCrashRecovery},
|
||||
{"ResourceExhaustionRecovery", testResourceExhaustionRecovery},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.testFunc(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// testNetworkConnectionLossRecovery tests recovery from network connection loss
|
||||
func testNetworkConnectionLossRecovery(t *testing.T) {
|
||||
// Create a temporary socket that we can close to simulate connection loss
|
||||
tempDir := t.TempDir()
|
||||
socketPath := fmt.Sprintf("%s/test_recovery.sock", tempDir)
|
||||
|
||||
// Create and immediately close a socket to test connection failure
|
||||
listener, err := net.Listen("unix", socketPath)
|
||||
if err != nil {
|
||||
t.Skipf("Cannot create test socket: %v", err)
|
||||
}
|
||||
listener.Close() // Close immediately to simulate connection loss
|
||||
|
||||
// Remove socket file to ensure connection will fail
|
||||
os.Remove(socketPath)
|
||||
|
||||
// Test that components handle connection loss gracefully
|
||||
manager := NewAudioInputIPCManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
// This should handle the connection failure gracefully
|
||||
err = manager.Start()
|
||||
if err != nil {
|
||||
t.Logf("Expected connection failure handled: %v", err)
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
manager.Stop()
|
||||
}
|
||||
|
||||
// testProcessCrashRecovery tests recovery from process crashes
|
||||
func testProcessCrashRecovery(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping process crash test in short mode")
|
||||
}
|
||||
|
||||
supervisor := NewAudioOutputSupervisor()
|
||||
require.NotNil(t, supervisor)
|
||||
|
||||
// Start supervisor (will likely fail in test environment, but should handle gracefully)
|
||||
err := supervisor.Start()
|
||||
if err != nil {
|
||||
t.Logf("Supervisor start failed as expected in test environment: %v", err)
|
||||
}
|
||||
|
||||
// Verify supervisor can be stopped cleanly even after start failure
|
||||
supervisor.Stop()
|
||||
assert.False(t, supervisor.IsRunning())
|
||||
}
|
||||
|
||||
// testResourceExhaustionRecovery tests recovery from resource exhaustion
|
||||
func testResourceExhaustionRecovery(t *testing.T) {
|
||||
// Test with resource constraints
|
||||
manager := NewAudioInputIPCManager()
|
||||
require.NotNil(t, manager)
|
||||
|
||||
// Simulate resource exhaustion by rapid start/stop cycles
|
||||
for i := 0; i < 20; i++ {
|
||||
err := manager.Start()
|
||||
if err != nil {
|
||||
t.Logf("Resource exhaustion cycle %d: %v", i, err)
|
||||
}
|
||||
manager.Stop()
|
||||
// No delay to stress test resource management
|
||||
}
|
||||
|
||||
// Verify system can still function after resource stress
|
||||
err := manager.Start()
|
||||
if err != nil {
|
||||
t.Logf("Final start after resource stress: %v", err)
|
||||
}
|
||||
manager.Stop()
|
||||
assert.False(t, manager.IsRunning())
|
||||
}
|
||||
|
|
@ -11,8 +11,6 @@ import (
|
|||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// Restart configuration is now retrieved from centralized config
|
||||
|
|
@ -150,6 +148,13 @@ func (s *AudioOutputSupervisor) Stop() {
|
|||
s.logger.Info().Str("component", AudioOutputSupervisorComponent).Msg("component stopped")
|
||||
}
|
||||
|
||||
// GetProcessMetrics returns current process metrics with audio-output-server name
|
||||
func (s *AudioOutputSupervisor) GetProcessMetrics() *ProcessMetrics {
|
||||
metrics := s.BaseSupervisor.GetProcessMetrics()
|
||||
metrics.ProcessName = "audio-output-server"
|
||||
return metrics
|
||||
}
|
||||
|
||||
// supervisionLoop is the main supervision loop
|
||||
func (s *AudioOutputSupervisor) supervisionLoop() {
|
||||
defer func() {
|
||||
|
|
@ -175,25 +180,16 @@ func (s *AudioOutputSupervisor) supervisionLoop() {
|
|||
default:
|
||||
// Start or restart the process
|
||||
if err := s.startProcess(); err != nil {
|
||||
// Only log start errors if error level enabled to reduce overhead
|
||||
if s.logger.GetLevel() <= zerolog.ErrorLevel {
|
||||
s.logger.Error().Err(err).Msg("failed to start audio server process")
|
||||
}
|
||||
s.logger.Error().Err(err).Msg("failed to start audio server process")
|
||||
|
||||
// Check if we should attempt restart
|
||||
if !s.shouldRestart() {
|
||||
// Only log critical errors to reduce overhead
|
||||
if s.logger.GetLevel() <= zerolog.ErrorLevel {
|
||||
s.logger.Error().Msg("maximum restart attempts exceeded, stopping supervisor")
|
||||
}
|
||||
s.logger.Error().Msg("maximum restart attempts exceeded, stopping supervisor")
|
||||
return
|
||||
}
|
||||
|
||||
delay := s.calculateRestartDelay()
|
||||
// Sample logging to reduce overhead - log every 5th restart attempt
|
||||
if len(s.restartAttempts)%5 == 0 && s.logger.GetLevel() <= zerolog.WarnLevel {
|
||||
s.logger.Warn().Dur("delay", delay).Int("attempt", len(s.restartAttempts)).Msg("retrying process start after delay")
|
||||
}
|
||||
s.logger.Warn().Dur("delay", delay).Msg("retrying process start after delay")
|
||||
|
||||
if s.onRestart != nil {
|
||||
s.onRestart(len(s.restartAttempts), delay)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,393 @@
|
|||
//go:build integration && cgo
|
||||
// +build integration,cgo
|
||||
|
||||
package audio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/exec"
|
||||
"sync"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestSupervisorRestart tests various supervisor restart scenarios
|
||||
func TestSupervisorRestart(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
testFunc func(t *testing.T)
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "BasicRestart",
|
||||
testFunc: testBasicSupervisorRestart,
|
||||
description: "Test basic supervisor restart functionality",
|
||||
},
|
||||
{
|
||||
name: "ProcessCrashRestart",
|
||||
testFunc: testProcessCrashRestart,
|
||||
description: "Test supervisor restart after process crash",
|
||||
},
|
||||
{
|
||||
name: "MaxRestartAttempts",
|
||||
testFunc: testMaxRestartAttempts,
|
||||
description: "Test supervisor respects max restart attempts",
|
||||
},
|
||||
{
|
||||
name: "ExponentialBackoff",
|
||||
testFunc: testExponentialBackoff,
|
||||
description: "Test supervisor exponential backoff behavior",
|
||||
},
|
||||
{
|
||||
name: "HealthMonitoring",
|
||||
testFunc: testHealthMonitoring,
|
||||
description: "Test supervisor health monitoring",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Logf("Running supervisor test: %s - %s", tt.name, tt.description)
|
||||
tt.testFunc(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// testBasicSupervisorRestart tests basic restart functionality
|
||||
func testBasicSupervisorRestart(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Create a mock supervisor with a simple test command
|
||||
supervisor := &AudioInputSupervisor{
|
||||
logger: getTestLogger(),
|
||||
maxRestarts: 3,
|
||||
restartDelay: 100 * time.Millisecond,
|
||||
healthCheckInterval: 200 * time.Millisecond,
|
||||
}
|
||||
|
||||
// Use a simple command that will exit quickly for testing
|
||||
testCmd := exec.CommandContext(ctx, "sleep", "0.5")
|
||||
supervisor.cmd = testCmd
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
// Start supervisor
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
supervisor.Start(ctx)
|
||||
}()
|
||||
|
||||
// Wait for initial process to start and exit
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// Verify that supervisor attempted restart
|
||||
assert.True(t, supervisor.GetRestartCount() > 0, "Supervisor should have attempted restart")
|
||||
|
||||
// Stop supervisor
|
||||
cancel()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// testProcessCrashRestart tests restart after process crash
|
||||
func testProcessCrashRestart(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second)
|
||||
defer cancel()
|
||||
|
||||
supervisor := &AudioInputSupervisor{
|
||||
logger: getTestLogger(),
|
||||
maxRestarts: 2,
|
||||
restartDelay: 200 * time.Millisecond,
|
||||
healthCheckInterval: 100 * time.Millisecond,
|
||||
}
|
||||
|
||||
// Create a command that will crash (exit with non-zero code)
|
||||
testCmd := exec.CommandContext(ctx, "sh", "-c", "sleep 0.2 && exit 1")
|
||||
supervisor.cmd = testCmd
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
supervisor.Start(ctx)
|
||||
}()
|
||||
|
||||
// Wait for process to crash and restart attempts
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
// Verify restart attempts were made
|
||||
restartCount := supervisor.GetRestartCount()
|
||||
assert.True(t, restartCount > 0, "Supervisor should have attempted restart after crash")
|
||||
assert.True(t, restartCount <= 2, "Supervisor should not exceed max restart attempts")
|
||||
|
||||
cancel()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// testMaxRestartAttempts tests that supervisor respects max restart limit
|
||||
func testMaxRestartAttempts(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
maxRestarts := 3
|
||||
supervisor := &AudioInputSupervisor{
|
||||
logger: getTestLogger(),
|
||||
maxRestarts: maxRestarts,
|
||||
restartDelay: 50 * time.Millisecond,
|
||||
healthCheckInterval: 50 * time.Millisecond,
|
||||
}
|
||||
|
||||
// Command that immediately fails
|
||||
testCmd := exec.CommandContext(ctx, "false") // 'false' command always exits with code 1
|
||||
supervisor.cmd = testCmd
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
supervisor.Start(ctx)
|
||||
}()
|
||||
|
||||
// Wait for all restart attempts to complete
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
// Verify that supervisor stopped after max attempts
|
||||
restartCount := supervisor.GetRestartCount()
|
||||
assert.Equal(t, maxRestarts, restartCount, "Supervisor should stop after max restart attempts")
|
||||
assert.False(t, supervisor.IsRunning(), "Supervisor should not be running after max attempts")
|
||||
|
||||
cancel()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// testExponentialBackoff tests the exponential backoff behavior
|
||||
func testExponentialBackoff(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second)
|
||||
defer cancel()
|
||||
|
||||
supervisor := &AudioInputSupervisor{
|
||||
logger: getTestLogger(),
|
||||
maxRestarts: 3,
|
||||
restartDelay: 100 * time.Millisecond, // Base delay
|
||||
healthCheckInterval: 50 * time.Millisecond,
|
||||
}
|
||||
|
||||
// Command that fails immediately
|
||||
testCmd := exec.CommandContext(ctx, "false")
|
||||
supervisor.cmd = testCmd
|
||||
|
||||
var restartTimes []time.Time
|
||||
var mu sync.Mutex
|
||||
|
||||
// Hook into restart events to measure timing
|
||||
originalRestart := supervisor.restart
|
||||
supervisor.restart = func() {
|
||||
mu.Lock()
|
||||
restartTimes = append(restartTimes, time.Now())
|
||||
mu.Unlock()
|
||||
if originalRestart != nil {
|
||||
originalRestart()
|
||||
}
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
supervisor.Start(ctx)
|
||||
}()
|
||||
|
||||
// Wait for restart attempts
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
// Verify exponential backoff (each delay should be longer than the previous)
|
||||
if len(restartTimes) >= 2 {
|
||||
for i := 1; i < len(restartTimes); i++ {
|
||||
delay := restartTimes[i].Sub(restartTimes[i-1])
|
||||
expectedMinDelay := time.Duration(i) * 100 * time.Millisecond
|
||||
assert.True(t, delay >= expectedMinDelay,
|
||||
"Restart delay should increase exponentially: attempt %d delay %v should be >= %v",
|
||||
i, delay, expectedMinDelay)
|
||||
}
|
||||
}
|
||||
|
||||
cancel()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// testHealthMonitoring tests the health monitoring functionality
|
||||
func testHealthMonitoring(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
supervisor := &AudioInputSupervisor{
|
||||
logger: getTestLogger(),
|
||||
maxRestarts: 2,
|
||||
restartDelay: 100 * time.Millisecond,
|
||||
healthCheckInterval: 50 * time.Millisecond,
|
||||
}
|
||||
|
||||
// Command that runs for a while then exits
|
||||
testCmd := exec.CommandContext(ctx, "sleep", "1")
|
||||
supervisor.cmd = testCmd
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
supervisor.Start(ctx)
|
||||
}()
|
||||
|
||||
// Initially should be running
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
assert.True(t, supervisor.IsRunning(), "Supervisor should be running initially")
|
||||
|
||||
// Wait for process to exit and health check to detect it
|
||||
time.Sleep(1.5 * time.Second)
|
||||
|
||||
// Should have detected process exit and attempted restart
|
||||
assert.True(t, supervisor.GetRestartCount() > 0, "Health monitoring should detect process exit")
|
||||
|
||||
cancel()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// TestAudioInputSupervisorIntegration tests the actual AudioInputSupervisor
|
||||
func TestAudioInputSupervisorIntegration(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Create a real supervisor instance
|
||||
supervisor := NewAudioInputSupervisor()
|
||||
require.NotNil(t, supervisor, "Supervisor should be created")
|
||||
|
||||
// Test that supervisor can be started and stopped cleanly
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
// This will likely fail due to missing audio hardware in test environment,
|
||||
// but we're testing the supervisor logic, not the audio functionality
|
||||
supervisor.Start(ctx)
|
||||
}()
|
||||
|
||||
// Let it run briefly
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
|
||||
// Stop the supervisor
|
||||
cancel()
|
||||
wg.Wait()
|
||||
|
||||
// Verify clean shutdown
|
||||
assert.False(t, supervisor.IsRunning(), "Supervisor should not be running after context cancellation")
|
||||
}
|
||||
|
||||
// Mock supervisor for testing (simplified version)
|
||||
type AudioInputSupervisor struct {
|
||||
logger zerolog.Logger
|
||||
cmd *exec.Cmd
|
||||
maxRestarts int
|
||||
restartDelay time.Duration
|
||||
healthCheckInterval time.Duration
|
||||
restartCount int
|
||||
running bool
|
||||
mu sync.RWMutex
|
||||
restart func() // Hook for testing
|
||||
}
|
||||
|
||||
func (s *AudioInputSupervisor) Start(ctx context.Context) error {
|
||||
s.mu.Lock()
|
||||
s.running = true
|
||||
s.mu.Unlock()
|
||||
|
||||
for s.restartCount < s.maxRestarts {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
s.mu.Lock()
|
||||
s.running = false
|
||||
s.mu.Unlock()
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
// Start process
|
||||
if s.cmd != nil {
|
||||
err := s.cmd.Start()
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("Failed to start process")
|
||||
s.restartCount++
|
||||
time.Sleep(s.getBackoffDelay())
|
||||
continue
|
||||
}
|
||||
|
||||
// Wait for process to exit
|
||||
err = s.cmd.Wait()
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("Process exited with error")
|
||||
}
|
||||
}
|
||||
|
||||
s.restartCount++
|
||||
if s.restart != nil {
|
||||
s.restart()
|
||||
}
|
||||
|
||||
if s.restartCount < s.maxRestarts {
|
||||
time.Sleep(s.getBackoffDelay())
|
||||
}
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
s.running = false
|
||||
s.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *AudioInputSupervisor) IsRunning() bool {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.running
|
||||
}
|
||||
|
||||
func (s *AudioInputSupervisor) GetRestartCount() int {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.restartCount
|
||||
}
|
||||
|
||||
func (s *AudioInputSupervisor) getBackoffDelay() time.Duration {
|
||||
// Simple exponential backoff
|
||||
multiplier := 1 << uint(s.restartCount)
|
||||
if multiplier > 8 {
|
||||
multiplier = 8 // Cap the multiplier
|
||||
}
|
||||
return s.restartDelay * time.Duration(multiplier)
|
||||
}
|
||||
|
||||
// NewAudioInputSupervisor creates a new supervisor for testing
|
||||
func NewAudioInputSupervisor() *AudioInputSupervisor {
|
||||
return &AudioInputSupervisor{
|
||||
logger: getTestLogger(),
|
||||
maxRestarts: getMaxRestartAttempts(),
|
||||
restartDelay: getInitialRestartDelay(),
|
||||
healthCheckInterval: 1 * time.Second,
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,217 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNewAudioOutputSupervisor(t *testing.T) {
|
||||
supervisor := NewAudioOutputSupervisor()
|
||||
assert.NotNil(t, supervisor)
|
||||
assert.False(t, supervisor.IsRunning())
|
||||
}
|
||||
|
||||
func TestAudioOutputSupervisorStart(t *testing.T) {
|
||||
supervisor := NewAudioOutputSupervisor()
|
||||
require.NotNil(t, supervisor)
|
||||
|
||||
// Test successful start
|
||||
err := supervisor.Start()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, supervisor.IsRunning())
|
||||
|
||||
// Test starting already running supervisor
|
||||
err = supervisor.Start()
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "already running")
|
||||
|
||||
// Cleanup
|
||||
supervisor.Stop()
|
||||
}
|
||||
|
||||
func TestAudioOutputSupervisorStop(t *testing.T) {
|
||||
supervisor := NewAudioOutputSupervisor()
|
||||
require.NotNil(t, supervisor)
|
||||
|
||||
// Test stopping non-running supervisor
|
||||
supervisor.Stop()
|
||||
assert.False(t, supervisor.IsRunning())
|
||||
|
||||
// Start and then stop
|
||||
err := supervisor.Start()
|
||||
require.NoError(t, err)
|
||||
assert.True(t, supervisor.IsRunning())
|
||||
|
||||
supervisor.Stop()
|
||||
assert.False(t, supervisor.IsRunning())
|
||||
}
|
||||
|
||||
func TestAudioOutputSupervisorIsRunning(t *testing.T) {
|
||||
supervisor := NewAudioOutputSupervisor()
|
||||
require.NotNil(t, supervisor)
|
||||
|
||||
// Test initial state
|
||||
assert.False(t, supervisor.IsRunning())
|
||||
|
||||
// Test after start
|
||||
err := supervisor.Start()
|
||||
require.NoError(t, err)
|
||||
assert.True(t, supervisor.IsRunning())
|
||||
|
||||
// Test after stop
|
||||
supervisor.Stop()
|
||||
assert.False(t, supervisor.IsRunning())
|
||||
}
|
||||
|
||||
func TestAudioOutputSupervisorGetProcessMetrics(t *testing.T) {
|
||||
supervisor := NewAudioOutputSupervisor()
|
||||
require.NotNil(t, supervisor)
|
||||
|
||||
// Test metrics when not running
|
||||
metrics := supervisor.GetProcessMetrics()
|
||||
assert.NotNil(t, metrics)
|
||||
|
||||
// Start and test metrics
|
||||
err := supervisor.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
metrics = supervisor.GetProcessMetrics()
|
||||
assert.NotNil(t, metrics)
|
||||
|
||||
// Cleanup
|
||||
supervisor.Stop()
|
||||
}
|
||||
|
||||
func TestAudioOutputSupervisorConcurrentOperations(t *testing.T) {
|
||||
supervisor := NewAudioOutputSupervisor()
|
||||
require.NotNil(t, supervisor)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Test concurrent start/stop operations
|
||||
for i := 0; i < 10; i++ {
|
||||
wg.Add(2)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
_ = supervisor.Start()
|
||||
}()
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
supervisor.Stop()
|
||||
}()
|
||||
}
|
||||
|
||||
// Test concurrent metric access
|
||||
for i := 0; i < 5; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
_ = supervisor.GetProcessMetrics()
|
||||
}()
|
||||
}
|
||||
|
||||
// Test concurrent status checks
|
||||
for i := 0; i < 5; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
_ = supervisor.IsRunning()
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// Cleanup
|
||||
supervisor.Stop()
|
||||
}
|
||||
|
||||
func TestAudioOutputSupervisorMultipleStartStop(t *testing.T) {
|
||||
supervisor := NewAudioOutputSupervisor()
|
||||
require.NotNil(t, supervisor)
|
||||
|
||||
// Test multiple start/stop cycles
|
||||
for i := 0; i < 5; i++ {
|
||||
err := supervisor.Start()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, supervisor.IsRunning())
|
||||
|
||||
supervisor.Stop()
|
||||
assert.False(t, supervisor.IsRunning())
|
||||
}
|
||||
}
|
||||
|
||||
func TestAudioOutputSupervisorHealthCheck(t *testing.T) {
|
||||
supervisor := NewAudioOutputSupervisor()
|
||||
require.NotNil(t, supervisor)
|
||||
|
||||
// Start supervisor
|
||||
err := supervisor.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Give some time for health monitoring to initialize
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Test that supervisor is still running
|
||||
assert.True(t, supervisor.IsRunning())
|
||||
|
||||
// Cleanup
|
||||
supervisor.Stop()
|
||||
}
|
||||
|
||||
func TestAudioOutputSupervisorProcessManagement(t *testing.T) {
|
||||
supervisor := NewAudioOutputSupervisor()
|
||||
require.NotNil(t, supervisor)
|
||||
|
||||
// Start supervisor
|
||||
err := supervisor.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Give some time for process management to initialize
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
// Test that supervisor is managing processes
|
||||
assert.True(t, supervisor.IsRunning())
|
||||
|
||||
// Cleanup
|
||||
supervisor.Stop()
|
||||
|
||||
// Ensure supervisor stopped cleanly
|
||||
assert.False(t, supervisor.IsRunning())
|
||||
}
|
||||
|
||||
// Benchmark tests
|
||||
func BenchmarkAudioOutputSupervisor(b *testing.B) {
|
||||
supervisor := NewAudioOutputSupervisor()
|
||||
|
||||
b.Run("Start", func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = supervisor.Start()
|
||||
supervisor.Stop()
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("GetProcessMetrics", func(b *testing.B) {
|
||||
_ = supervisor.Start()
|
||||
defer supervisor.Stop()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = supervisor.GetProcessMetrics()
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("IsRunning", func(b *testing.B) {
|
||||
_ = supervisor.Start()
|
||||
defer supervisor.Stop()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = supervisor.IsRunning()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
@ -0,0 +1,319 @@
|
|||
//go:build integration
|
||||
// +build integration
|
||||
|
||||
package audio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/jetkvm/kvm/internal/logging"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// Test utilities and mock implementations for integration tests
|
||||
|
||||
// MockAudioIPCServer provides a mock IPC server for testing
|
||||
type AudioIPCServer struct {
|
||||
socketPath string
|
||||
logger zerolog.Logger
|
||||
listener net.Listener
|
||||
connections map[net.Conn]bool
|
||||
mu sync.RWMutex
|
||||
running bool
|
||||
}
|
||||
|
||||
// Start starts the mock IPC server
|
||||
func (s *AudioIPCServer) Start(ctx context.Context) error {
|
||||
// Remove existing socket file
|
||||
os.Remove(s.socketPath)
|
||||
|
||||
listener, err := net.Listen("unix", s.socketPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.listener = listener
|
||||
s.connections = make(map[net.Conn]bool)
|
||||
|
||||
s.mu.Lock()
|
||||
s.running = true
|
||||
s.mu.Unlock()
|
||||
|
||||
go s.acceptConnections(ctx)
|
||||
|
||||
<-ctx.Done()
|
||||
s.Stop()
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
// Stop stops the mock IPC server
|
||||
func (s *AudioIPCServer) Stop() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if !s.running {
|
||||
return
|
||||
}
|
||||
|
||||
s.running = false
|
||||
|
||||
if s.listener != nil {
|
||||
s.listener.Close()
|
||||
}
|
||||
|
||||
// Close all connections
|
||||
for conn := range s.connections {
|
||||
conn.Close()
|
||||
}
|
||||
|
||||
// Clean up socket file
|
||||
os.Remove(s.socketPath)
|
||||
}
|
||||
|
||||
// acceptConnections handles incoming connections
|
||||
func (s *AudioIPCServer) acceptConnections(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
conn, err := s.listener.Accept()
|
||||
if err != nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
s.logger.Error().Err(err).Msg("Failed to accept connection")
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
s.connections[conn] = true
|
||||
s.mu.Unlock()
|
||||
|
||||
go s.handleConnection(ctx, conn)
|
||||
}
|
||||
}
|
||||
|
||||
// handleConnection handles a single connection
|
||||
func (s *AudioIPCServer) handleConnection(ctx context.Context, conn net.Conn) {
|
||||
defer func() {
|
||||
s.mu.Lock()
|
||||
delete(s.connections, conn)
|
||||
s.mu.Unlock()
|
||||
conn.Close()
|
||||
}()
|
||||
|
||||
buffer := make([]byte, 4096)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
// Set read timeout
|
||||
conn.SetReadDeadline(time.Now().Add(100 * time.Millisecond))
|
||||
n, err := conn.Read(buffer)
|
||||
if err != nil {
|
||||
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
|
||||
continue
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Process received data (for testing, we just log it)
|
||||
s.logger.Debug().Int("bytes", n).Msg("Received data from client")
|
||||
}
|
||||
}
|
||||
|
||||
// AudioInputIPCServer provides a mock input IPC server
|
||||
type AudioInputIPCServer struct {
|
||||
*AudioIPCServer
|
||||
}
|
||||
|
||||
// Test message structures
|
||||
type OutputMessage struct {
|
||||
Type OutputMessageType
|
||||
Timestamp int64
|
||||
Data []byte
|
||||
}
|
||||
|
||||
type InputMessage struct {
|
||||
Type InputMessageType
|
||||
Timestamp int64
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// Test configuration helpers
|
||||
func getTestConfig() *AudioConfigConstants {
|
||||
return &AudioConfigConstants{
|
||||
// Basic audio settings
|
||||
SampleRate: 48000,
|
||||
Channels: 2,
|
||||
MaxAudioFrameSize: 4096,
|
||||
|
||||
// IPC settings
|
||||
OutputMagicNumber: 0x4A4B4F55, // "JKOU"
|
||||
InputMagicNumber: 0x4A4B4D49, // "JKMI"
|
||||
WriteTimeout: 5 * time.Second,
|
||||
HeaderSize: 17,
|
||||
MaxFrameSize: 4096,
|
||||
MessagePoolSize: 100,
|
||||
|
||||
// Supervisor settings
|
||||
MaxRestartAttempts: 3,
|
||||
InitialRestartDelay: 1 * time.Second,
|
||||
MaxRestartDelay: 30 * time.Second,
|
||||
HealthCheckInterval: 5 * time.Second,
|
||||
|
||||
// Quality presets
|
||||
AudioQualityLowOutputBitrate: 32000,
|
||||
AudioQualityMediumOutputBitrate: 96000,
|
||||
AudioQualityHighOutputBitrate: 192000,
|
||||
AudioQualityUltraOutputBitrate: 320000,
|
||||
|
||||
AudioQualityLowInputBitrate: 16000,
|
||||
AudioQualityMediumInputBitrate: 64000,
|
||||
AudioQualityHighInputBitrate: 128000,
|
||||
AudioQualityUltraInputBitrate: 256000,
|
||||
|
||||
AudioQualityLowSampleRate: 24000,
|
||||
AudioQualityMediumSampleRate: 48000,
|
||||
AudioQualityHighSampleRate: 48000,
|
||||
AudioQualityUltraSampleRate: 48000,
|
||||
|
||||
AudioQualityLowChannels: 1,
|
||||
AudioQualityMediumChannels: 2,
|
||||
AudioQualityHighChannels: 2,
|
||||
AudioQualityUltraChannels: 2,
|
||||
|
||||
AudioQualityLowFrameSize: 20 * time.Millisecond,
|
||||
AudioQualityMediumFrameSize: 20 * time.Millisecond,
|
||||
AudioQualityHighFrameSize: 20 * time.Millisecond,
|
||||
AudioQualityUltraFrameSize: 20 * time.Millisecond,
|
||||
|
||||
AudioQualityMicLowSampleRate: 16000,
|
||||
|
||||
// Metrics settings
|
||||
MetricsUpdateInterval: 1 * time.Second,
|
||||
|
||||
// Latency settings
|
||||
DefaultTargetLatencyMS: 50,
|
||||
DefaultOptimizationIntervalSeconds: 5,
|
||||
DefaultAdaptiveThreshold: 0.8,
|
||||
DefaultStatsIntervalSeconds: 5,
|
||||
|
||||
// Buffer settings
|
||||
DefaultBufferPoolSize: 100,
|
||||
DefaultControlPoolSize: 50,
|
||||
DefaultFramePoolSize: 200,
|
||||
DefaultMaxPooledFrames: 500,
|
||||
DefaultPoolCleanupInterval: 30 * time.Second,
|
||||
|
||||
// Process monitoring
|
||||
MaxCPUPercent: 100.0,
|
||||
MinCPUPercent: 0.0,
|
||||
DefaultClockTicks: 100,
|
||||
DefaultMemoryGB: 4.0,
|
||||
MaxWarmupSamples: 10,
|
||||
WarmupCPUSamples: 5,
|
||||
MetricsChannelBuffer: 100,
|
||||
MinValidClockTicks: 50,
|
||||
MaxValidClockTicks: 1000,
|
||||
PageSize: 4096,
|
||||
|
||||
// CGO settings (for cgo builds)
|
||||
CGOOpusBitrate: 96000,
|
||||
CGOOpusComplexity: 3,
|
||||
CGOOpusVBR: 1,
|
||||
CGOOpusVBRConstraint: 1,
|
||||
CGOOpusSignalType: 3,
|
||||
CGOOpusBandwidth: 1105,
|
||||
CGOOpusDTX: 0,
|
||||
CGOSampleRate: 48000,
|
||||
|
||||
// Batch processing
|
||||
BatchProcessorFramesPerBatch: 10,
|
||||
BatchProcessorTimeout: 100 * time.Millisecond,
|
||||
|
||||
// Granular metrics
|
||||
GranularMetricsMaxSamples: 1000,
|
||||
GranularMetricsLogInterval: 30 * time.Second,
|
||||
GranularMetricsCleanupInterval: 5 * time.Minute,
|
||||
}
|
||||
}
|
||||
|
||||
// setupTestEnvironment sets up the test environment
|
||||
func setupTestEnvironment() {
|
||||
// Use test configuration
|
||||
UpdateConfig(getTestConfig())
|
||||
|
||||
// Initialize logging for tests
|
||||
logging.SetLevel("debug")
|
||||
}
|
||||
|
||||
// cleanupTestEnvironment cleans up after tests
|
||||
func cleanupTestEnvironment() {
|
||||
// Reset to default configuration
|
||||
UpdateConfig(DefaultAudioConfig())
|
||||
}
|
||||
|
||||
// createTestLogger creates a logger for testing
|
||||
func createTestLogger(name string) zerolog.Logger {
|
||||
return zerolog.New(os.Stdout).With().
|
||||
Timestamp().
|
||||
Str("component", name).
|
||||
Str("test", "true").
|
||||
Logger()
|
||||
}
|
||||
|
||||
// waitForCondition waits for a condition to be true with timeout
|
||||
func waitForCondition(condition func() bool, timeout time.Duration, checkInterval time.Duration) bool {
|
||||
timeout_timer := time.NewTimer(timeout)
|
||||
defer timeout_timer.Stop()
|
||||
|
||||
ticker := time.NewTicker(checkInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-timeout_timer.C:
|
||||
return false
|
||||
case <-ticker.C:
|
||||
if condition() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestHelper provides common test functionality
|
||||
type TestHelper struct {
|
||||
tempDir string
|
||||
logger zerolog.Logger
|
||||
}
|
||||
|
||||
// NewTestHelper creates a new test helper
|
||||
func NewTestHelper(tempDir string) *TestHelper {
|
||||
return &TestHelper{
|
||||
tempDir: tempDir,
|
||||
logger: createTestLogger("test-helper"),
|
||||
}
|
||||
}
|
||||
|
||||
// CreateTempSocket creates a temporary socket path
|
||||
func (h *TestHelper) CreateTempSocket(name string) string {
|
||||
return filepath.Join(h.tempDir, name)
|
||||
}
|
||||
|
||||
// GetLogger returns the test logger
|
||||
func (h *TestHelper) GetLogger() zerolog.Logger {
|
||||
return h.logger
|
||||
}
|
||||
|
|
@ -41,7 +41,6 @@ func ValidateAudioQuality(quality AudioQuality) error {
|
|||
}
|
||||
|
||||
// ValidateZeroCopyFrame validates zero-copy audio frame
|
||||
// Optimized to use cached max frame size
|
||||
func ValidateZeroCopyFrame(frame *ZeroCopyAudioFrame) error {
|
||||
if frame == nil {
|
||||
return ErrInvalidFrameData
|
||||
|
|
@ -50,22 +49,8 @@ func ValidateZeroCopyFrame(frame *ZeroCopyAudioFrame) error {
|
|||
if len(data) == 0 {
|
||||
return ErrInvalidFrameData
|
||||
}
|
||||
|
||||
// Fast path: use cached max frame size
|
||||
maxFrameSize := cachedMaxFrameSize
|
||||
if maxFrameSize == 0 {
|
||||
// Fallback: get from cache
|
||||
cache := GetCachedConfig()
|
||||
maxFrameSize = int(cache.maxAudioFrameSize.Load())
|
||||
if maxFrameSize == 0 {
|
||||
// Last resort: update cache
|
||||
cache.Update()
|
||||
maxFrameSize = int(cache.maxAudioFrameSize.Load())
|
||||
}
|
||||
// Cache globally for next calls
|
||||
cachedMaxFrameSize = maxFrameSize
|
||||
}
|
||||
|
||||
// Use config value
|
||||
maxFrameSize := GetConfig().MaxAudioFrameSize
|
||||
if len(data) > maxFrameSize {
|
||||
return ErrInvalidFrameSize
|
||||
}
|
||||
|
|
@ -110,31 +95,10 @@ func ValidateThreadPriority(priority int) error {
|
|||
}
|
||||
|
||||
// ValidateLatency validates latency duration values with reasonable bounds
|
||||
// Optimized to use AudioConfigCache for frequently accessed values
|
||||
func ValidateLatency(latency time.Duration) error {
|
||||
if latency < 0 {
|
||||
return fmt.Errorf("%w: latency %v cannot be negative", ErrInvalidLatency, latency)
|
||||
}
|
||||
|
||||
// Fast path: check against cached max latency
|
||||
cache := GetCachedConfig()
|
||||
maxLatency := time.Duration(cache.maxLatency.Load())
|
||||
|
||||
// If we have a valid cached value, use it
|
||||
if maxLatency > 0 {
|
||||
minLatency := time.Millisecond // Minimum reasonable latency
|
||||
if latency > 0 && latency < minLatency {
|
||||
return fmt.Errorf("%w: latency %v below minimum %v",
|
||||
ErrInvalidLatency, latency, minLatency)
|
||||
}
|
||||
if latency > maxLatency {
|
||||
return fmt.Errorf("%w: latency %v exceeds maximum %v",
|
||||
ErrInvalidLatency, latency, maxLatency)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Slower path: full validation with GetConfig()
|
||||
config := GetConfig()
|
||||
minLatency := time.Millisecond // Minimum reasonable latency
|
||||
if latency > 0 && latency < minLatency {
|
||||
|
|
@ -149,30 +113,11 @@ func ValidateLatency(latency time.Duration) error {
|
|||
}
|
||||
|
||||
// ValidateMetricsInterval validates metrics update interval
|
||||
// Optimized to use AudioConfigCache for frequently accessed values
|
||||
func ValidateMetricsInterval(interval time.Duration) error {
|
||||
// Fast path: check against cached values
|
||||
cache := GetCachedConfig()
|
||||
minInterval := time.Duration(cache.minMetricsUpdateInterval.Load())
|
||||
maxInterval := time.Duration(cache.maxMetricsUpdateInterval.Load())
|
||||
|
||||
// If we have valid cached values, use them
|
||||
if minInterval > 0 && maxInterval > 0 {
|
||||
if interval < minInterval {
|
||||
return fmt.Errorf("%w: interval %v below minimum %v",
|
||||
ErrInvalidMetricsInterval, interval, minInterval)
|
||||
}
|
||||
if interval > maxInterval {
|
||||
return fmt.Errorf("%w: interval %v exceeds maximum %v",
|
||||
ErrInvalidMetricsInterval, interval, maxInterval)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Slower path: full validation with GetConfig()
|
||||
// Use config values
|
||||
config := GetConfig()
|
||||
minInterval = config.MinMetricsUpdateInterval
|
||||
maxInterval = config.MaxMetricsUpdateInterval
|
||||
minInterval := config.MinMetricsUpdateInterval
|
||||
maxInterval := config.MaxMetricsUpdateInterval
|
||||
if interval < minInterval {
|
||||
return ErrInvalidMetricsInterval
|
||||
}
|
||||
|
|
@ -309,18 +254,12 @@ func ValidateChannelCount(channels int) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Fast path: Check against cached max channels
|
||||
cachedMaxChannels := int(cache.maxChannels.Load())
|
||||
if cachedMaxChannels > 0 && channels <= cachedMaxChannels {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Slow path: Update cache and validate
|
||||
cache.Update()
|
||||
updatedMaxChannels := int(cache.maxChannels.Load())
|
||||
if channels > updatedMaxChannels {
|
||||
// Check against max channels - still using cache to avoid GetConfig()
|
||||
// Note: We don't have maxChannels in the cache yet, so we'll use GetConfig() for now
|
||||
config := GetConfig()
|
||||
if channels > config.MaxChannels {
|
||||
return fmt.Errorf("%w: channel count %d exceeds maximum %d",
|
||||
ErrInvalidChannels, channels, updatedMaxChannels)
|
||||
ErrInvalidChannels, channels, config.MaxChannels)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -392,34 +331,15 @@ func ValidateFrameDuration(duration time.Duration) error {
|
|||
}
|
||||
}
|
||||
|
||||
// Fast path: Check against cached min/max frame duration
|
||||
cachedMinDuration := time.Duration(cache.minFrameDuration.Load())
|
||||
cachedMaxDuration := time.Duration(cache.maxFrameDuration.Load())
|
||||
|
||||
if cachedMinDuration > 0 && cachedMaxDuration > 0 {
|
||||
if duration < cachedMinDuration {
|
||||
return fmt.Errorf("%w: frame duration %v below minimum %v",
|
||||
ErrInvalidFrameDuration, duration, cachedMinDuration)
|
||||
}
|
||||
if duration > cachedMaxDuration {
|
||||
return fmt.Errorf("%w: frame duration %v exceeds maximum %v",
|
||||
ErrInvalidFrameDuration, duration, cachedMaxDuration)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Slow path: Update cache and validate
|
||||
cache.Update()
|
||||
updatedMinDuration := time.Duration(cache.minFrameDuration.Load())
|
||||
updatedMaxDuration := time.Duration(cache.maxFrameDuration.Load())
|
||||
|
||||
if duration < updatedMinDuration {
|
||||
// Slower path: full validation against min/max
|
||||
config := GetConfig()
|
||||
if duration < config.MinFrameDuration {
|
||||
return fmt.Errorf("%w: frame duration %v below minimum %v",
|
||||
ErrInvalidFrameDuration, duration, updatedMinDuration)
|
||||
ErrInvalidFrameDuration, duration, config.MinFrameDuration)
|
||||
}
|
||||
if duration > updatedMaxDuration {
|
||||
if duration > config.MaxFrameDuration {
|
||||
return fmt.Errorf("%w: frame duration %v exceeds maximum %v",
|
||||
ErrInvalidFrameDuration, duration, updatedMaxDuration)
|
||||
ErrInvalidFrameDuration, duration, config.MaxFrameDuration)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,541 @@
|
|||
//go:build cgo
|
||||
// +build cgo
|
||||
|
||||
package audio
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestValidationFunctions provides comprehensive testing of all validation functions
|
||||
// to ensure they catch breaking changes and regressions effectively
|
||||
func TestValidationFunctions(t *testing.T) {
|
||||
// Initialize validation cache for testing
|
||||
InitValidationCache()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
testFunc func(t *testing.T)
|
||||
}{
|
||||
{"AudioQualityValidation", testAudioQualityValidation},
|
||||
{"FrameDataValidation", testFrameDataValidation},
|
||||
{"BufferSizeValidation", testBufferSizeValidation},
|
||||
{"ThreadPriorityValidation", testThreadPriorityValidation},
|
||||
{"LatencyValidation", testLatencyValidation},
|
||||
{"MetricsIntervalValidation", testMetricsIntervalValidation},
|
||||
{"SampleRateValidation", testSampleRateValidation},
|
||||
{"ChannelCountValidation", testChannelCountValidation},
|
||||
{"BitrateValidation", testBitrateValidation},
|
||||
{"FrameDurationValidation", testFrameDurationValidation},
|
||||
{"IPCConfigValidation", testIPCConfigValidation},
|
||||
{"AdaptiveBufferConfigValidation", testAdaptiveBufferConfigValidation},
|
||||
{"AudioConfigCompleteValidation", testAudioConfigCompleteValidation},
|
||||
{"ZeroCopyFrameValidation", testZeroCopyFrameValidation},
|
||||
{"AudioFrameFastValidation", testAudioFrameFastValidation},
|
||||
{"ErrorWrappingValidation", testErrorWrappingValidation},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.testFunc(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// testAudioQualityValidation tests audio quality validation with boundary conditions
|
||||
func testAudioQualityValidation(t *testing.T) {
|
||||
// Test valid quality levels
|
||||
validQualities := []AudioQuality{AudioQualityLow, AudioQualityMedium, AudioQualityHigh, AudioQualityUltra}
|
||||
for _, quality := range validQualities {
|
||||
err := ValidateAudioQuality(quality)
|
||||
assert.NoError(t, err, "Valid quality %d should pass validation", quality)
|
||||
}
|
||||
|
||||
// Test invalid quality levels
|
||||
invalidQualities := []AudioQuality{-1, 4, 100, -100}
|
||||
for _, quality := range invalidQualities {
|
||||
err := ValidateAudioQuality(quality)
|
||||
assert.Error(t, err, "Invalid quality %d should fail validation", quality)
|
||||
assert.Contains(t, err.Error(), "invalid audio quality level", "Error should mention audio quality")
|
||||
}
|
||||
}
|
||||
|
||||
// testFrameDataValidation tests frame data validation with various edge cases using modern validation
|
||||
func testFrameDataValidation(t *testing.T) {
|
||||
config := GetConfig()
|
||||
|
||||
// Test empty data
|
||||
err := ValidateAudioFrame([]byte{})
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "frame data is empty")
|
||||
|
||||
// Test data above maximum size
|
||||
largeData := make([]byte, config.MaxAudioFrameSize+1)
|
||||
err = ValidateAudioFrame(largeData)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "exceeds maximum")
|
||||
|
||||
// Test valid data
|
||||
validData := make([]byte, 1000) // Within bounds
|
||||
if len(validData) <= config.MaxAudioFrameSize {
|
||||
err = ValidateAudioFrame(validData)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// testBufferSizeValidation tests buffer size validation
|
||||
func testBufferSizeValidation(t *testing.T) {
|
||||
config := GetConfig()
|
||||
|
||||
// Test negative and zero sizes
|
||||
invalidSizes := []int{-1, -100, 0}
|
||||
for _, size := range invalidSizes {
|
||||
err := ValidateBufferSize(size)
|
||||
assert.Error(t, err, "Buffer size %d should be invalid", size)
|
||||
assert.Contains(t, err.Error(), "must be positive")
|
||||
}
|
||||
|
||||
// Test size exceeding maximum
|
||||
err := ValidateBufferSize(config.SocketMaxBuffer + 1)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "exceeds maximum")
|
||||
|
||||
// Test valid sizes
|
||||
validSizes := []int{1, 1024, 4096, config.SocketMaxBuffer}
|
||||
for _, size := range validSizes {
|
||||
err := ValidateBufferSize(size)
|
||||
assert.NoError(t, err, "Buffer size %d should be valid", size)
|
||||
}
|
||||
}
|
||||
|
||||
// testThreadPriorityValidation tests thread priority validation
|
||||
func testThreadPriorityValidation(t *testing.T) {
|
||||
// Test valid priorities
|
||||
validPriorities := []int{-20, -10, 0, 10, 19}
|
||||
for _, priority := range validPriorities {
|
||||
err := ValidateThreadPriority(priority)
|
||||
assert.NoError(t, err, "Priority %d should be valid", priority)
|
||||
}
|
||||
|
||||
// Test invalid priorities
|
||||
invalidPriorities := []int{-21, -100, 20, 100}
|
||||
for _, priority := range invalidPriorities {
|
||||
err := ValidateThreadPriority(priority)
|
||||
assert.Error(t, err, "Priority %d should be invalid", priority)
|
||||
assert.Contains(t, err.Error(), "outside valid range")
|
||||
}
|
||||
}
|
||||
|
||||
// testLatencyValidation tests latency validation
|
||||
func testLatencyValidation(t *testing.T) {
|
||||
config := GetConfig()
|
||||
|
||||
// Test negative latency
|
||||
err := ValidateLatency(-1 * time.Millisecond)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "cannot be negative")
|
||||
|
||||
// Test zero latency (should be valid)
|
||||
err = ValidateLatency(0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Test very small positive latency
|
||||
err = ValidateLatency(500 * time.Microsecond)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "below minimum")
|
||||
|
||||
// Test latency exceeding maximum
|
||||
err = ValidateLatency(config.MaxLatency + time.Second)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "exceeds maximum")
|
||||
|
||||
// Test valid latencies
|
||||
validLatencies := []time.Duration{
|
||||
1 * time.Millisecond,
|
||||
10 * time.Millisecond,
|
||||
100 * time.Millisecond,
|
||||
config.MaxLatency,
|
||||
}
|
||||
for _, latency := range validLatencies {
|
||||
err := ValidateLatency(latency)
|
||||
assert.NoError(t, err, "Latency %v should be valid", latency)
|
||||
}
|
||||
}
|
||||
|
||||
// testMetricsIntervalValidation tests metrics interval validation
|
||||
func testMetricsIntervalValidation(t *testing.T) {
|
||||
config := GetConfig()
|
||||
|
||||
// Test interval below minimum
|
||||
err := ValidateMetricsInterval(config.MinMetricsUpdateInterval - time.Millisecond)
|
||||
assert.Error(t, err)
|
||||
|
||||
// Test interval above maximum
|
||||
err = ValidateMetricsInterval(config.MaxMetricsUpdateInterval + time.Second)
|
||||
assert.Error(t, err)
|
||||
|
||||
// Test valid intervals
|
||||
validIntervals := []time.Duration{
|
||||
config.MinMetricsUpdateInterval,
|
||||
config.MaxMetricsUpdateInterval,
|
||||
(config.MinMetricsUpdateInterval + config.MaxMetricsUpdateInterval) / 2,
|
||||
}
|
||||
for _, interval := range validIntervals {
|
||||
err := ValidateMetricsInterval(interval)
|
||||
assert.NoError(t, err, "Interval %v should be valid", interval)
|
||||
}
|
||||
}
|
||||
|
||||
// testSampleRateValidation tests sample rate validation
|
||||
func testSampleRateValidation(t *testing.T) {
|
||||
config := GetConfig()
|
||||
|
||||
// Test negative and zero sample rates
|
||||
invalidRates := []int{-1, -48000, 0}
|
||||
for _, rate := range invalidRates {
|
||||
err := ValidateSampleRate(rate)
|
||||
assert.Error(t, err, "Sample rate %d should be invalid", rate)
|
||||
assert.Contains(t, err.Error(), "must be positive")
|
||||
}
|
||||
|
||||
// Test unsupported sample rates
|
||||
unsupportedRates := []int{1000, 12345, 96001}
|
||||
for _, rate := range unsupportedRates {
|
||||
err := ValidateSampleRate(rate)
|
||||
assert.Error(t, err, "Sample rate %d should be unsupported", rate)
|
||||
assert.Contains(t, err.Error(), "not in supported rates")
|
||||
}
|
||||
|
||||
// Test valid sample rates
|
||||
for _, rate := range config.ValidSampleRates {
|
||||
err := ValidateSampleRate(rate)
|
||||
assert.NoError(t, err, "Sample rate %d should be valid", rate)
|
||||
}
|
||||
}
|
||||
|
||||
// testChannelCountValidation tests channel count validation
|
||||
func testChannelCountValidation(t *testing.T) {
|
||||
config := GetConfig()
|
||||
|
||||
// Test invalid channel counts
|
||||
invalidCounts := []int{-1, -10, 0}
|
||||
for _, count := range invalidCounts {
|
||||
err := ValidateChannelCount(count)
|
||||
assert.Error(t, err, "Channel count %d should be invalid", count)
|
||||
assert.Contains(t, err.Error(), "must be positive")
|
||||
}
|
||||
|
||||
// Test channel count exceeding maximum
|
||||
err := ValidateChannelCount(config.MaxChannels + 1)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "exceeds maximum")
|
||||
|
||||
// Test valid channel counts
|
||||
validCounts := []int{1, 2, config.MaxChannels}
|
||||
for _, count := range validCounts {
|
||||
err := ValidateChannelCount(count)
|
||||
assert.NoError(t, err, "Channel count %d should be valid", count)
|
||||
}
|
||||
}
|
||||
|
||||
// testBitrateValidation tests bitrate validation
|
||||
func testBitrateValidation(t *testing.T) {
|
||||
// Test invalid bitrates
|
||||
invalidBitrates := []int{-1, -1000, 0}
|
||||
for _, bitrate := range invalidBitrates {
|
||||
err := ValidateBitrate(bitrate)
|
||||
assert.Error(t, err, "Bitrate %d should be invalid", bitrate)
|
||||
assert.Contains(t, err.Error(), "must be positive")
|
||||
}
|
||||
|
||||
// Test bitrate below minimum (in kbps)
|
||||
err := ValidateBitrate(5) // 5 kbps = 5000 bps < 6000 bps minimum
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "below minimum")
|
||||
|
||||
// Test bitrate above maximum (in kbps)
|
||||
err = ValidateBitrate(511) // 511 kbps = 511000 bps > 510000 bps maximum
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "exceeds maximum")
|
||||
|
||||
// Test valid bitrates (in kbps)
|
||||
validBitrates := []int{
|
||||
6, // 6 kbps = 6000 bps (minimum)
|
||||
64, // Medium quality preset
|
||||
128, // High quality preset
|
||||
192, // Ultra quality preset
|
||||
510, // 510 kbps = 510000 bps (maximum)
|
||||
}
|
||||
for _, bitrate := range validBitrates {
|
||||
err := ValidateBitrate(bitrate)
|
||||
assert.NoError(t, err, "Bitrate %d kbps should be valid", bitrate)
|
||||
}
|
||||
}
|
||||
|
||||
// testFrameDurationValidation tests frame duration validation
|
||||
func testFrameDurationValidation(t *testing.T) {
|
||||
config := GetConfig()
|
||||
|
||||
// Test invalid durations
|
||||
invalidDurations := []time.Duration{-1 * time.Millisecond, -1 * time.Second, 0}
|
||||
for _, duration := range invalidDurations {
|
||||
err := ValidateFrameDuration(duration)
|
||||
assert.Error(t, err, "Duration %v should be invalid", duration)
|
||||
assert.Contains(t, err.Error(), "must be positive")
|
||||
}
|
||||
|
||||
// Test duration below minimum
|
||||
err := ValidateFrameDuration(config.MinFrameDuration - time.Microsecond)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "below minimum")
|
||||
|
||||
// Test duration above maximum
|
||||
err = ValidateFrameDuration(config.MaxFrameDuration + time.Second)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "exceeds maximum")
|
||||
|
||||
// Test valid durations
|
||||
validDurations := []time.Duration{
|
||||
config.MinFrameDuration,
|
||||
config.MaxFrameDuration,
|
||||
20 * time.Millisecond, // Common frame duration
|
||||
}
|
||||
for _, duration := range validDurations {
|
||||
err := ValidateFrameDuration(duration)
|
||||
assert.NoError(t, err, "Duration %v should be valid", duration)
|
||||
}
|
||||
}
|
||||
|
||||
// testIPCConfigValidation tests IPC configuration validation
|
||||
func testIPCConfigValidation(t *testing.T) {
|
||||
config := GetConfig()
|
||||
|
||||
// Test invalid configurations for input IPC
|
||||
invalidConfigs := []struct {
|
||||
sampleRate, channels, frameSize int
|
||||
description string
|
||||
}{
|
||||
{0, 2, 960, "zero sample rate"},
|
||||
{48000, 0, 960, "zero channels"},
|
||||
{48000, 2, 0, "zero frame size"},
|
||||
{config.MinSampleRate - 1, 2, 960, "sample rate below minimum"},
|
||||
{config.MaxSampleRate + 1, 2, 960, "sample rate above maximum"},
|
||||
{48000, config.MaxChannels + 1, 960, "too many channels"},
|
||||
{48000, -1, 960, "negative channels"},
|
||||
{48000, 2, -1, "negative frame size"},
|
||||
}
|
||||
|
||||
for _, tc := range invalidConfigs {
|
||||
// Test input IPC validation
|
||||
err := ValidateInputIPCConfig(tc.sampleRate, tc.channels, tc.frameSize)
|
||||
assert.Error(t, err, "Input IPC config should be invalid: %s", tc.description)
|
||||
|
||||
// Test output IPC validation
|
||||
err = ValidateOutputIPCConfig(tc.sampleRate, tc.channels, tc.frameSize)
|
||||
assert.Error(t, err, "Output IPC config should be invalid: %s", tc.description)
|
||||
}
|
||||
|
||||
// Test valid configuration
|
||||
err := ValidateInputIPCConfig(48000, 2, 960)
|
||||
assert.NoError(t, err)
|
||||
err = ValidateOutputIPCConfig(48000, 2, 960)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
// testAdaptiveBufferConfigValidation tests adaptive buffer configuration validation
|
||||
func testAdaptiveBufferConfigValidation(t *testing.T) {
|
||||
config := GetConfig()
|
||||
|
||||
// Test invalid configurations
|
||||
invalidConfigs := []struct {
|
||||
minSize, maxSize, defaultSize int
|
||||
description string
|
||||
}{
|
||||
{0, 1024, 512, "zero min size"},
|
||||
{-1, 1024, 512, "negative min size"},
|
||||
{512, 0, 256, "zero max size"},
|
||||
{512, -1, 256, "negative max size"},
|
||||
{512, 1024, 0, "zero default size"},
|
||||
{512, 1024, -1, "negative default size"},
|
||||
{1024, 512, 768, "min >= max"},
|
||||
{512, 1024, 256, "default < min"},
|
||||
{512, 1024, 2048, "default > max"},
|
||||
{512, config.SocketMaxBuffer + 1, 1024, "max exceeds global limit"},
|
||||
}
|
||||
|
||||
for _, tc := range invalidConfigs {
|
||||
err := ValidateAdaptiveBufferConfig(tc.minSize, tc.maxSize, tc.defaultSize)
|
||||
assert.Error(t, err, "Config should be invalid: %s", tc.description)
|
||||
}
|
||||
|
||||
// Test valid configuration
|
||||
err := ValidateAdaptiveBufferConfig(512, 4096, 1024)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
// testAudioConfigCompleteValidation tests complete audio configuration validation
|
||||
func testAudioConfigCompleteValidation(t *testing.T) {
|
||||
// Test valid configuration using actual preset values
|
||||
validConfig := AudioConfig{
|
||||
Quality: AudioQualityMedium,
|
||||
Bitrate: 64, // kbps - matches medium quality preset
|
||||
SampleRate: 48000,
|
||||
Channels: 2,
|
||||
FrameSize: 20 * time.Millisecond,
|
||||
}
|
||||
err := ValidateAudioConfigComplete(validConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Test invalid quality
|
||||
invalidQualityConfig := validConfig
|
||||
invalidQualityConfig.Quality = AudioQuality(99)
|
||||
err = ValidateAudioConfigComplete(invalidQualityConfig)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "quality validation failed")
|
||||
|
||||
// Test invalid bitrate
|
||||
invalidBitrateConfig := validConfig
|
||||
invalidBitrateConfig.Bitrate = -1
|
||||
err = ValidateAudioConfigComplete(invalidBitrateConfig)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "bitrate validation failed")
|
||||
|
||||
// Test invalid sample rate
|
||||
invalidSampleRateConfig := validConfig
|
||||
invalidSampleRateConfig.SampleRate = 12345
|
||||
err = ValidateAudioConfigComplete(invalidSampleRateConfig)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "sample rate validation failed")
|
||||
|
||||
// Test invalid channels
|
||||
invalidChannelsConfig := validConfig
|
||||
invalidChannelsConfig.Channels = 0
|
||||
err = ValidateAudioConfigComplete(invalidChannelsConfig)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "channel count validation failed")
|
||||
|
||||
// Test invalid frame duration
|
||||
invalidFrameDurationConfig := validConfig
|
||||
invalidFrameDurationConfig.FrameSize = -1 * time.Millisecond
|
||||
err = ValidateAudioConfigComplete(invalidFrameDurationConfig)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "frame duration validation failed")
|
||||
}
|
||||
|
||||
// testZeroCopyFrameValidation tests zero-copy frame validation
|
||||
func testZeroCopyFrameValidation(t *testing.T) {
|
||||
// Test nil frame
|
||||
err := ValidateZeroCopyFrame(nil)
|
||||
assert.Error(t, err)
|
||||
|
||||
// Note: We can't easily test ZeroCopyAudioFrame without creating actual instances
|
||||
// This would require more complex setup, but the validation logic is tested
|
||||
}
|
||||
|
||||
// testAudioFrameFastValidation tests fast audio frame validation
|
||||
func testAudioFrameFastValidation(t *testing.T) {
|
||||
config := GetConfig()
|
||||
|
||||
// Test empty data
|
||||
err := ValidateAudioFrame([]byte{})
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "frame data is empty")
|
||||
|
||||
// Test data exceeding maximum size
|
||||
largeData := make([]byte, config.MaxAudioFrameSize+1)
|
||||
err = ValidateAudioFrame(largeData)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "exceeds maximum")
|
||||
|
||||
// Test valid data
|
||||
validData := make([]byte, 1000)
|
||||
err = ValidateAudioFrame(validData)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
// testErrorWrappingValidation tests error wrapping functionality
|
||||
func testErrorWrappingValidation(t *testing.T) {
|
||||
// Test wrapping nil error
|
||||
wrapped := WrapWithMetadata(nil, "component", "operation", map[string]interface{}{"key": "value"})
|
||||
assert.Nil(t, wrapped)
|
||||
|
||||
// Test wrapping actual error
|
||||
originalErr := assert.AnError
|
||||
metadata := map[string]interface{}{
|
||||
"frame_size": 1024,
|
||||
"quality": "high",
|
||||
}
|
||||
wrapped = WrapWithMetadata(originalErr, "audio", "decode", metadata)
|
||||
require.NotNil(t, wrapped)
|
||||
assert.Contains(t, wrapped.Error(), "audio.decode")
|
||||
assert.Contains(t, wrapped.Error(), "assert.AnError")
|
||||
assert.Contains(t, wrapped.Error(), "metadata")
|
||||
assert.Contains(t, wrapped.Error(), "frame_size")
|
||||
assert.Contains(t, wrapped.Error(), "quality")
|
||||
}
|
||||
|
||||
// TestValidationIntegration tests validation functions working together
|
||||
func TestValidationIntegration(t *testing.T) {
|
||||
// Test that validation functions work correctly with actual audio configurations
|
||||
presets := GetAudioQualityPresets()
|
||||
require.NotEmpty(t, presets)
|
||||
|
||||
for quality, config := range presets {
|
||||
t.Run(fmt.Sprintf("Quality_%d", quality), func(t *testing.T) {
|
||||
// Validate the preset configuration
|
||||
err := ValidateAudioConfigComplete(config)
|
||||
assert.NoError(t, err, "Preset configuration for quality %d should be valid", quality)
|
||||
|
||||
// Validate individual components
|
||||
err = ValidateAudioQuality(config.Quality)
|
||||
assert.NoError(t, err, "Quality should be valid")
|
||||
|
||||
err = ValidateBitrate(config.Bitrate)
|
||||
assert.NoError(t, err, "Bitrate should be valid")
|
||||
|
||||
err = ValidateSampleRate(config.SampleRate)
|
||||
assert.NoError(t, err, "Sample rate should be valid")
|
||||
|
||||
err = ValidateChannelCount(config.Channels)
|
||||
assert.NoError(t, err, "Channel count should be valid")
|
||||
|
||||
err = ValidateFrameDuration(config.FrameSize)
|
||||
assert.NoError(t, err, "Frame duration should be valid")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestValidationPerformance ensures validation functions are efficient
|
||||
func TestValidationPerformance(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping performance test in short mode")
|
||||
}
|
||||
|
||||
// Initialize validation cache for performance testing
|
||||
InitValidationCache()
|
||||
|
||||
// Test that validation functions complete quickly
|
||||
start := time.Now()
|
||||
iterations := 10000
|
||||
|
||||
for i := 0; i < iterations; i++ {
|
||||
_ = ValidateAudioQuality(AudioQualityMedium)
|
||||
_ = ValidateBufferSize(1024)
|
||||
_ = ValidateChannelCount(2)
|
||||
_ = ValidateSampleRate(48000)
|
||||
_ = ValidateBitrate(96) // 96 kbps
|
||||
}
|
||||
|
||||
elapsed := time.Since(start)
|
||||
perIteration := elapsed / time.Duration(iterations)
|
||||
|
||||
// Performance expectations for JetKVM (ARM Cortex-A7 @ 1GHz, 256MB RAM)
|
||||
// Audio processing must not interfere with primary KVM functionality
|
||||
assert.Less(t, perIteration, 200*time.Microsecond, "Validation should not impact KVM performance")
|
||||
t.Logf("Validation performance: %v per iteration", perIteration)
|
||||
}
|
||||
|
|
@ -3,6 +3,7 @@ package audio
|
|||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
|
|
@ -141,23 +142,32 @@ func NewZeroCopyFramePool(maxFrameSize int) *ZeroCopyFramePool {
|
|||
|
||||
// Get retrieves a zero-copy frame from the pool
|
||||
func (p *ZeroCopyFramePool) Get() *ZeroCopyAudioFrame {
|
||||
start := time.Now()
|
||||
var wasHit bool
|
||||
defer func() {
|
||||
latency := time.Since(start)
|
||||
GetGranularMetricsCollector().RecordZeroCopyGet(latency, wasHit)
|
||||
}()
|
||||
|
||||
// Memory guard: Track allocation count to prevent excessive memory usage
|
||||
allocationCount := atomic.LoadInt64(&p.allocationCount)
|
||||
if allocationCount > int64(p.maxPoolSize*2) {
|
||||
// If we've allocated too many frames, force pool reuse
|
||||
atomic.AddInt64(&p.missCount, 1)
|
||||
wasHit = true // Pool reuse counts as hit
|
||||
frame := p.pool.Get().(*ZeroCopyAudioFrame)
|
||||
frame.mutex.Lock()
|
||||
frame.refCount = 1
|
||||
frame.length = 0
|
||||
frame.data = frame.data[:0]
|
||||
frame.mutex.Unlock()
|
||||
|
||||
return frame
|
||||
}
|
||||
|
||||
// First try pre-allocated frames for fastest access
|
||||
p.mutex.Lock()
|
||||
if len(p.preallocated) > 0 {
|
||||
wasHit = true
|
||||
frame := p.preallocated[len(p.preallocated)-1]
|
||||
p.preallocated = p.preallocated[:len(p.preallocated)-1]
|
||||
p.mutex.Unlock()
|
||||
|
|
@ -168,11 +178,13 @@ func (p *ZeroCopyFramePool) Get() *ZeroCopyAudioFrame {
|
|||
frame.data = frame.data[:0]
|
||||
frame.mutex.Unlock()
|
||||
|
||||
atomic.AddInt64(&p.hitCount, 1)
|
||||
return frame
|
||||
}
|
||||
p.mutex.Unlock()
|
||||
|
||||
// Try sync.Pool next and track allocation
|
||||
atomic.AddInt64(&p.allocationCount, 1)
|
||||
frame := p.pool.Get().(*ZeroCopyAudioFrame)
|
||||
frame.mutex.Lock()
|
||||
frame.refCount = 1
|
||||
|
|
@ -180,13 +192,18 @@ func (p *ZeroCopyFramePool) Get() *ZeroCopyAudioFrame {
|
|||
frame.data = frame.data[:0]
|
||||
frame.mutex.Unlock()
|
||||
|
||||
wasHit = true // Pool hit
|
||||
atomic.AddInt64(&p.hitCount, 1)
|
||||
|
||||
return frame
|
||||
}
|
||||
|
||||
// Put returns a zero-copy frame to the pool
|
||||
func (p *ZeroCopyFramePool) Put(frame *ZeroCopyAudioFrame) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
latency := time.Since(start)
|
||||
GetGranularMetricsCollector().RecordZeroCopyPut(latency, frame.capacity)
|
||||
}()
|
||||
if frame == nil || !frame.pooled {
|
||||
return
|
||||
}
|
||||
|
|
@ -219,15 +236,10 @@ func (p *ZeroCopyFramePool) Put(frame *ZeroCopyAudioFrame) {
|
|||
|
||||
// Return to sync.Pool
|
||||
p.pool.Put(frame)
|
||||
// Metrics collection removed
|
||||
if false {
|
||||
atomic.AddInt64(&p.counter, 1)
|
||||
}
|
||||
atomic.AddInt64(&p.counter, 1)
|
||||
} else {
|
||||
frame.mutex.Unlock()
|
||||
}
|
||||
|
||||
// Metrics recording removed - granular metrics collector was unused
|
||||
}
|
||||
|
||||
// Data returns the frame data as a slice (zero-copy view)
|
||||
|
|
|
|||
11
main.go
11
main.go
|
|
@ -68,17 +68,6 @@ func startAudioSubprocess() error {
|
|||
config.AudioQualityLowOpusDTX,
|
||||
)
|
||||
|
||||
// Pre-warm audio input subprocess to reduce activation latency (if enabled)
|
||||
if config.EnableSubprocessPrewarming {
|
||||
if err := audio.PrewarmAudioInputSubprocess(); err != nil {
|
||||
logger.Warn().Err(err).Msg("failed to pre-warm audio input subprocess")
|
||||
} else {
|
||||
logger.Info().Msg("audio input subprocess pre-warmed successfully")
|
||||
}
|
||||
} else {
|
||||
logger.Info().Msg("audio input subprocess pre-warming disabled by configuration")
|
||||
}
|
||||
|
||||
// Note: Audio input supervisor is NOT started here - it will be started on-demand
|
||||
// when the user activates microphone input through the UI
|
||||
|
||||
|
|
|
|||
|
|
@ -360,7 +360,7 @@ export default function Actionbar({
|
|||
checkIfStateChanged(open);
|
||||
return (
|
||||
<div className="mx-auto">
|
||||
<AudioControlPopover microphone={microphone} />
|
||||
<AudioControlPopover microphone={microphone} open={open} />
|
||||
</div>
|
||||
);
|
||||
}}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,38 @@
|
|||
import { cx } from "@/cva.config";
|
||||
|
||||
interface AudioConfig {
|
||||
Quality: number;
|
||||
Bitrate: number;
|
||||
SampleRate: number;
|
||||
Channels: number;
|
||||
FrameSize: string;
|
||||
}
|
||||
|
||||
interface AudioConfigDisplayProps {
|
||||
config: AudioConfig;
|
||||
variant?: 'default' | 'success' | 'info';
|
||||
className?: string;
|
||||
}
|
||||
|
||||
const variantStyles = {
|
||||
default: "bg-slate-50 text-slate-600 dark:bg-slate-700 dark:text-slate-400",
|
||||
success: "bg-green-50 text-green-600 dark:bg-green-900/20 dark:text-green-400",
|
||||
info: "bg-blue-50 text-blue-600 dark:bg-blue-900/20 dark:text-blue-400"
|
||||
};
|
||||
|
||||
export function AudioConfigDisplay({ config, variant = 'default', className }: AudioConfigDisplayProps) {
|
||||
return (
|
||||
<div className={cx(
|
||||
"rounded-md p-2 text-xs",
|
||||
variantStyles[variant],
|
||||
className
|
||||
)}>
|
||||
<div className="grid grid-cols-2 gap-1">
|
||||
<span>Sample Rate: {config.SampleRate}Hz</span>
|
||||
<span>Channels: {config.Channels}</span>
|
||||
<span>Bitrate: {config.Bitrate}kbps</span>
|
||||
<span>Frame: {config.FrameSize}</span>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
|
@ -0,0 +1,77 @@
|
|||
import React from 'react';
|
||||
import clsx from 'clsx';
|
||||
|
||||
interface AudioLevelMeterProps {
|
||||
level: number; // 0-100 percentage
|
||||
isActive: boolean;
|
||||
className?: string;
|
||||
size?: 'sm' | 'md' | 'lg';
|
||||
showLabel?: boolean;
|
||||
}
|
||||
|
||||
export const AudioLevelMeter: React.FC<AudioLevelMeterProps> = ({
|
||||
level,
|
||||
isActive,
|
||||
className,
|
||||
size = 'md',
|
||||
showLabel = true
|
||||
}) => {
|
||||
const sizeClasses = {
|
||||
sm: 'h-1',
|
||||
md: 'h-2',
|
||||
lg: 'h-3'
|
||||
};
|
||||
|
||||
const getLevelColor = (level: number) => {
|
||||
if (level < 20) return 'bg-green-500';
|
||||
if (level < 60) return 'bg-yellow-500';
|
||||
return 'bg-red-500';
|
||||
};
|
||||
|
||||
const getTextColor = (level: number) => {
|
||||
if (level < 20) return 'text-green-600 dark:text-green-400';
|
||||
if (level < 60) return 'text-yellow-600 dark:text-yellow-400';
|
||||
return 'text-red-600 dark:text-red-400';
|
||||
};
|
||||
|
||||
return (
|
||||
<div className={clsx('space-y-1', className)}>
|
||||
{showLabel && (
|
||||
<div className="flex justify-between text-xs">
|
||||
<span className="text-slate-500 dark:text-slate-400">
|
||||
Microphone Level
|
||||
</span>
|
||||
<span className={clsx(
|
||||
'font-mono',
|
||||
isActive ? getTextColor(level) : 'text-slate-400 dark:text-slate-500'
|
||||
)}>
|
||||
{isActive ? `${Math.round(level)}%` : 'No Signal'}
|
||||
</span>
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div className={clsx(
|
||||
'w-full rounded-full bg-slate-200 dark:bg-slate-700',
|
||||
sizeClasses[size]
|
||||
)}>
|
||||
<div
|
||||
className={clsx(
|
||||
'rounded-full transition-all duration-150 ease-out',
|
||||
sizeClasses[size],
|
||||
isActive ? getLevelColor(level) : 'bg-slate-300 dark:bg-slate-600'
|
||||
)}
|
||||
style={{
|
||||
width: isActive ? `${Math.min(100, Math.max(2, level))}%` : '0%'
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{/* Peak indicators */}
|
||||
<div className="flex justify-between text-xs text-slate-400 dark:text-slate-500">
|
||||
<span>0%</span>
|
||||
<span>50%</span>
|
||||
<span>100%</span>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
|
@ -0,0 +1,880 @@
|
|||
import { useEffect, useState } from "react";
|
||||
import { MdGraphicEq, MdSignalWifi4Bar, MdError, MdMic } from "react-icons/md";
|
||||
import { LuActivity, LuClock, LuHardDrive, LuSettings, LuCpu, LuMemoryStick } from "react-icons/lu";
|
||||
|
||||
import { AudioLevelMeter } from "@components/AudioLevelMeter";
|
||||
import StatChart from "@components/StatChart";
|
||||
import { cx } from "@/cva.config";
|
||||
import { useMicrophone } from "@/hooks/useMicrophone";
|
||||
import { useAudioLevel } from "@/hooks/useAudioLevel";
|
||||
import { useAudioEvents } from "@/hooks/useAudioEvents";
|
||||
import api from "@/api";
|
||||
import { AUDIO_CONFIG } from "@/config/constants";
|
||||
import audioQualityService from "@/services/audioQualityService";
|
||||
|
||||
interface AudioMetrics {
|
||||
frames_received: number;
|
||||
frames_dropped: number;
|
||||
bytes_processed: number;
|
||||
last_frame_time: string;
|
||||
connection_drops: number;
|
||||
average_latency: string;
|
||||
}
|
||||
|
||||
interface MicrophoneMetrics {
|
||||
frames_sent: number;
|
||||
frames_dropped: number;
|
||||
bytes_processed: number;
|
||||
last_frame_time: string;
|
||||
connection_drops: number;
|
||||
average_latency: string;
|
||||
}
|
||||
|
||||
interface ProcessMetrics {
|
||||
cpu_percent: number;
|
||||
memory_percent: number;
|
||||
memory_rss: number;
|
||||
memory_vms: number;
|
||||
running: boolean;
|
||||
}
|
||||
|
||||
interface AudioConfig {
|
||||
Quality: number;
|
||||
Bitrate: number;
|
||||
SampleRate: number;
|
||||
Channels: number;
|
||||
FrameSize: string;
|
||||
}
|
||||
|
||||
// Quality labels will be managed by the audio quality service
|
||||
const getQualityLabels = () => audioQualityService.getQualityLabels();
|
||||
|
||||
// Format percentage values to 2 decimal places
|
||||
function formatPercentage(value: number | null | undefined): string {
|
||||
if (value === null || value === undefined || isNaN(value)) {
|
||||
return "0.00%";
|
||||
}
|
||||
return `${value.toFixed(2)}%`;
|
||||
}
|
||||
|
||||
function formatMemoryMB(rssBytes: number | null | undefined): string {
|
||||
if (rssBytes === null || rssBytes === undefined || isNaN(rssBytes)) {
|
||||
return "0.00 MB";
|
||||
}
|
||||
const mb = rssBytes / (1024 * 1024);
|
||||
return `${mb.toFixed(2)} MB`;
|
||||
}
|
||||
|
||||
// Default system memory estimate in MB (will be replaced by actual value from backend)
|
||||
const DEFAULT_SYSTEM_MEMORY_MB = 4096; // 4GB default
|
||||
|
||||
// Create chart array similar to connectionStats.tsx
|
||||
function createChartArray<T, K extends keyof T>(
|
||||
stream: Map<number, T>,
|
||||
metric: K,
|
||||
): { date: number; stat: T[K] | null }[] {
|
||||
const stat = Array.from(stream).map(([key, stats]) => {
|
||||
return { date: key, stat: stats[metric] };
|
||||
});
|
||||
|
||||
// Sort the dates to ensure they are in chronological order
|
||||
const sortedStat = stat.map(x => x.date).sort((a, b) => a - b);
|
||||
|
||||
// Determine the earliest statistic date
|
||||
const earliestStat = sortedStat[0];
|
||||
|
||||
// Current time in seconds since the Unix epoch
|
||||
const now = Math.floor(Date.now() / 1000);
|
||||
|
||||
// Determine the starting point for the chart data
|
||||
const firstChartDate = earliestStat ? Math.min(earliestStat, now - 120) : now - 120;
|
||||
|
||||
// Generate the chart array for the range between 'firstChartDate' and 'now'
|
||||
return Array.from({ length: now - firstChartDate }, (_, i) => {
|
||||
const currentDate = firstChartDate + i;
|
||||
return {
|
||||
date: currentDate,
|
||||
// Find the statistic for 'currentDate', or use the last known statistic if none exists for that date
|
||||
stat: stat.find(x => x.date === currentDate)?.stat ?? null,
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
export default function AudioMetricsDashboard() {
|
||||
// System memory state
|
||||
const [systemMemoryMB, setSystemMemoryMB] = useState(DEFAULT_SYSTEM_MEMORY_MB);
|
||||
|
||||
// Use WebSocket-based audio events for real-time updates
|
||||
const {
|
||||
audioMetrics,
|
||||
microphoneMetrics: wsMicrophoneMetrics,
|
||||
audioProcessMetrics: wsAudioProcessMetrics,
|
||||
microphoneProcessMetrics: wsMicrophoneProcessMetrics,
|
||||
isConnected: wsConnected
|
||||
} = useAudioEvents();
|
||||
|
||||
// Fetch system memory information on component mount
|
||||
useEffect(() => {
|
||||
const fetchSystemMemory = async () => {
|
||||
try {
|
||||
const response = await api.GET('/system/memory');
|
||||
const data = await response.json();
|
||||
setSystemMemoryMB(data.total_memory_mb);
|
||||
} catch {
|
||||
// Failed to fetch system memory, using default
|
||||
}
|
||||
};
|
||||
fetchSystemMemory();
|
||||
}, []);
|
||||
|
||||
// Update historical data when WebSocket process metrics are received
|
||||
useEffect(() => {
|
||||
if (wsConnected && wsAudioProcessMetrics && wsAudioProcessMetrics.running) {
|
||||
const now = Math.floor(Date.now() / 1000); // Convert to seconds for StatChart
|
||||
// Validate that now is a valid number
|
||||
if (isNaN(now)) return;
|
||||
|
||||
const cpuStat = isNaN(wsAudioProcessMetrics.cpu_percent) ? null : wsAudioProcessMetrics.cpu_percent;
|
||||
|
||||
setAudioCpuStats(prev => {
|
||||
const newMap = new Map(prev);
|
||||
newMap.set(now, { cpu_percent: cpuStat });
|
||||
// Keep only last 120 seconds of data for memory management
|
||||
const cutoff = now - 120;
|
||||
for (const [key] of newMap) {
|
||||
if (key < cutoff) newMap.delete(key);
|
||||
}
|
||||
return newMap;
|
||||
});
|
||||
|
||||
setAudioMemoryStats(prev => {
|
||||
const newMap = new Map(prev);
|
||||
const memoryRss = isNaN(wsAudioProcessMetrics.memory_rss) ? null : wsAudioProcessMetrics.memory_rss;
|
||||
newMap.set(now, { memory_rss: memoryRss });
|
||||
// Keep only last 120 seconds of data for memory management
|
||||
const cutoff = now - 120;
|
||||
for (const [key] of newMap) {
|
||||
if (key < cutoff) newMap.delete(key);
|
||||
}
|
||||
return newMap;
|
||||
});
|
||||
}
|
||||
}, [wsConnected, wsAudioProcessMetrics]);
|
||||
|
||||
useEffect(() => {
|
||||
if (wsConnected && wsMicrophoneProcessMetrics) {
|
||||
const now = Math.floor(Date.now() / 1000); // Convert to seconds for StatChart
|
||||
// Validate that now is a valid number
|
||||
if (isNaN(now)) return;
|
||||
|
||||
const cpuStat = isNaN(wsMicrophoneProcessMetrics.cpu_percent) ? null : wsMicrophoneProcessMetrics.cpu_percent;
|
||||
|
||||
setMicCpuStats(prev => {
|
||||
const newMap = new Map(prev);
|
||||
newMap.set(now, { cpu_percent: cpuStat });
|
||||
// Keep only last 120 seconds of data for memory management
|
||||
const cutoff = now - 120;
|
||||
for (const [key] of newMap) {
|
||||
if (key < cutoff) newMap.delete(key);
|
||||
}
|
||||
return newMap;
|
||||
});
|
||||
|
||||
setMicMemoryStats(prev => {
|
||||
const newMap = new Map(prev);
|
||||
const memoryRss = isNaN(wsMicrophoneProcessMetrics.memory_rss) ? null : wsMicrophoneProcessMetrics.memory_rss;
|
||||
newMap.set(now, { memory_rss: memoryRss });
|
||||
// Keep only last 120 seconds of data for memory management
|
||||
const cutoff = now - 120;
|
||||
for (const [key] of newMap) {
|
||||
if (key < cutoff) newMap.delete(key);
|
||||
}
|
||||
return newMap;
|
||||
});
|
||||
}
|
||||
}, [wsConnected, wsMicrophoneProcessMetrics]);
|
||||
|
||||
// Fallback state for when WebSocket is not connected
|
||||
const [fallbackMetrics, setFallbackMetrics] = useState<AudioMetrics | null>(null);
|
||||
const [fallbackMicrophoneMetrics, setFallbackMicrophoneMetrics] = useState<MicrophoneMetrics | null>(null);
|
||||
const [fallbackConnected, setFallbackConnected] = useState(false);
|
||||
|
||||
// Process metrics state (fallback for when WebSocket is not connected)
|
||||
const [fallbackAudioProcessMetrics, setFallbackAudioProcessMetrics] = useState<ProcessMetrics | null>(null);
|
||||
const [fallbackMicrophoneProcessMetrics, setFallbackMicrophoneProcessMetrics] = useState<ProcessMetrics | null>(null);
|
||||
|
||||
// Historical data for charts using Maps for better memory management
|
||||
const [audioCpuStats, setAudioCpuStats] = useState<Map<number, { cpu_percent: number | null }>>(new Map());
|
||||
const [audioMemoryStats, setAudioMemoryStats] = useState<Map<number, { memory_rss: number | null }>>(new Map());
|
||||
const [micCpuStats, setMicCpuStats] = useState<Map<number, { cpu_percent: number | null }>>(new Map());
|
||||
const [micMemoryStats, setMicMemoryStats] = useState<Map<number, { memory_rss: number | null }>>(new Map());
|
||||
|
||||
// Configuration state (these don't change frequently, so we can load them once)
|
||||
const [config, setConfig] = useState<AudioConfig | null>(null);
|
||||
const [microphoneConfig, setMicrophoneConfig] = useState<AudioConfig | null>(null);
|
||||
const [lastUpdate, setLastUpdate] = useState<Date>(new Date());
|
||||
|
||||
// Use WebSocket data when available, fallback to polling data otherwise
|
||||
const metrics = wsConnected && audioMetrics !== null ? audioMetrics : fallbackMetrics;
|
||||
const microphoneMetrics = wsConnected && wsMicrophoneMetrics !== null ? wsMicrophoneMetrics : fallbackMicrophoneMetrics;
|
||||
const audioProcessMetrics = wsConnected && wsAudioProcessMetrics !== null ? wsAudioProcessMetrics : fallbackAudioProcessMetrics;
|
||||
const microphoneProcessMetrics = wsConnected && wsMicrophoneProcessMetrics !== null ? wsMicrophoneProcessMetrics : fallbackMicrophoneProcessMetrics;
|
||||
const isConnected = wsConnected ? wsConnected : fallbackConnected;
|
||||
|
||||
// Microphone state for audio level monitoring
|
||||
const { isMicrophoneActive, isMicrophoneMuted, microphoneStream } = useMicrophone();
|
||||
const { audioLevel, isAnalyzing } = useAudioLevel(
|
||||
isMicrophoneActive ? microphoneStream : null,
|
||||
{
|
||||
enabled: isMicrophoneActive,
|
||||
updateInterval: 120,
|
||||
});
|
||||
|
||||
useEffect(() => {
|
||||
// Load initial configuration (only once)
|
||||
loadAudioConfig();
|
||||
|
||||
// Set up fallback polling only when WebSocket is not connected
|
||||
if (!wsConnected) {
|
||||
loadAudioData();
|
||||
const interval = setInterval(loadAudioData, 1000);
|
||||
return () => clearInterval(interval);
|
||||
}
|
||||
}, [wsConnected]);
|
||||
|
||||
const loadAudioConfig = async () => {
|
||||
try {
|
||||
// Use centralized audio quality service
|
||||
const { audio, microphone } = await audioQualityService.loadAllConfigurations();
|
||||
|
||||
if (audio) {
|
||||
setConfig(audio.current);
|
||||
}
|
||||
|
||||
if (microphone) {
|
||||
setMicrophoneConfig(microphone.current);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Failed to load audio config:", error);
|
||||
}
|
||||
};
|
||||
|
||||
const loadAudioData = async () => {
|
||||
try {
|
||||
// Load metrics
|
||||
const metricsResp = await api.GET("/audio/metrics");
|
||||
if (metricsResp.ok) {
|
||||
const metricsData = await metricsResp.json();
|
||||
setFallbackMetrics(metricsData);
|
||||
// Consider connected if API call succeeds, regardless of frame count
|
||||
setFallbackConnected(true);
|
||||
setLastUpdate(new Date());
|
||||
} else {
|
||||
setFallbackConnected(false);
|
||||
}
|
||||
|
||||
// Load audio process metrics
|
||||
try {
|
||||
const audioProcessResp = await api.GET("/audio/process-metrics");
|
||||
if (audioProcessResp.ok) {
|
||||
const audioProcessData = await audioProcessResp.json();
|
||||
setFallbackAudioProcessMetrics(audioProcessData);
|
||||
|
||||
// Update historical data for charts (keep last 120 seconds)
|
||||
if (audioProcessData.running) {
|
||||
const now = Math.floor(Date.now() / 1000); // Convert to seconds for StatChart
|
||||
// Validate that now is a valid number
|
||||
if (isNaN(now)) return;
|
||||
|
||||
const cpuStat = isNaN(audioProcessData.cpu_percent) ? null : audioProcessData.cpu_percent;
|
||||
const memoryRss = isNaN(audioProcessData.memory_rss) ? null : audioProcessData.memory_rss;
|
||||
|
||||
setAudioCpuStats(prev => {
|
||||
const newMap = new Map(prev);
|
||||
newMap.set(now, { cpu_percent: cpuStat });
|
||||
// Keep only last 120 seconds of data for memory management
|
||||
const cutoff = now - 120;
|
||||
for (const [key] of newMap) {
|
||||
if (key < cutoff) newMap.delete(key);
|
||||
}
|
||||
return newMap;
|
||||
});
|
||||
|
||||
setAudioMemoryStats(prev => {
|
||||
const newMap = new Map(prev);
|
||||
newMap.set(now, { memory_rss: memoryRss });
|
||||
// Keep only last 120 seconds of data for memory management
|
||||
const cutoff = now - 120;
|
||||
for (const [key] of newMap) {
|
||||
if (key < cutoff) newMap.delete(key);
|
||||
}
|
||||
return newMap;
|
||||
});
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Audio process metrics not available
|
||||
}
|
||||
|
||||
// Load microphone metrics
|
||||
try {
|
||||
const micResp = await api.GET("/microphone/metrics");
|
||||
if (micResp.ok) {
|
||||
const micData = await micResp.json();
|
||||
setFallbackMicrophoneMetrics(micData);
|
||||
}
|
||||
} catch {
|
||||
// Microphone metrics might not be available, that's okay
|
||||
// Microphone metrics not available
|
||||
}
|
||||
|
||||
// Load microphone process metrics
|
||||
try {
|
||||
const micProcessResp = await api.GET("/microphone/process-metrics");
|
||||
if (micProcessResp.ok) {
|
||||
const micProcessData = await micProcessResp.json();
|
||||
setFallbackMicrophoneProcessMetrics(micProcessData);
|
||||
|
||||
// Update historical data for charts (keep last 120 seconds)
|
||||
const now = Math.floor(Date.now() / 1000); // Convert to seconds for StatChart
|
||||
// Validate that now is a valid number
|
||||
if (isNaN(now)) return;
|
||||
|
||||
const cpuStat = isNaN(micProcessData.cpu_percent) ? null : micProcessData.cpu_percent;
|
||||
const memoryRss = isNaN(micProcessData.memory_rss) ? null : micProcessData.memory_rss;
|
||||
|
||||
setMicCpuStats(prev => {
|
||||
const newMap = new Map(prev);
|
||||
newMap.set(now, { cpu_percent: cpuStat });
|
||||
// Keep only last 120 seconds of data for memory management
|
||||
const cutoff = now - 120;
|
||||
for (const [key] of newMap) {
|
||||
if (key < cutoff) newMap.delete(key);
|
||||
}
|
||||
return newMap;
|
||||
});
|
||||
|
||||
setMicMemoryStats(prev => {
|
||||
const newMap = new Map(prev);
|
||||
newMap.set(now, { memory_rss: memoryRss });
|
||||
// Keep only last 120 seconds of data for memory management
|
||||
const cutoff = now - 120;
|
||||
for (const [key] of newMap) {
|
||||
if (key < cutoff) newMap.delete(key);
|
||||
}
|
||||
return newMap;
|
||||
});
|
||||
}
|
||||
} catch {
|
||||
// Microphone process metrics not available
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Failed to load audio data:", error);
|
||||
setFallbackConnected(false);
|
||||
}
|
||||
};
|
||||
|
||||
const formatBytes = (bytes: number) => {
|
||||
if (bytes === 0) return "0 B";
|
||||
const k = 1024;
|
||||
const sizes = ["B", "KB", "MB", "GB"];
|
||||
const i = Math.floor(Math.log(bytes) / Math.log(k));
|
||||
return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + " " + sizes[i];
|
||||
};
|
||||
|
||||
const formatNumber = (num: number) => {
|
||||
return new Intl.NumberFormat().format(num);
|
||||
};
|
||||
|
||||
const getDropRate = () => {
|
||||
if (!metrics || metrics.frames_received === 0) return 0;
|
||||
return ((metrics.frames_dropped / metrics.frames_received) * AUDIO_CONFIG.PERCENTAGE_MULTIPLIER);
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
const getQualityColor = (quality: number) => {
|
||||
switch (quality) {
|
||||
case 0: return "text-yellow-600 dark:text-yellow-400";
|
||||
case 1: return "text-blue-600 dark:text-blue-400";
|
||||
case 2: return "text-green-600 dark:text-green-400";
|
||||
case 3: return "text-purple-600 dark:text-purple-400";
|
||||
default: return "text-slate-600 dark:text-slate-400";
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="space-y-4">
|
||||
{/* Header */}
|
||||
<div className="flex items-center justify-between">
|
||||
<div className="flex items-center gap-2">
|
||||
<MdGraphicEq className="h-5 w-5 text-blue-600 dark:text-blue-400" />
|
||||
<h3 className="text-lg font-semibold text-slate-900 dark:text-slate-100">
|
||||
Audio Metrics
|
||||
</h3>
|
||||
</div>
|
||||
<div className="flex items-center gap-2">
|
||||
<div className={cx(
|
||||
"h-2 w-2 rounded-full",
|
||||
isConnected ? "bg-green-500" : "bg-red-500"
|
||||
)} />
|
||||
<span className="text-xs text-slate-500 dark:text-slate-400">
|
||||
{isConnected ? "Active" : "Inactive"}
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Current Configuration */}
|
||||
<div className="grid grid-cols-1 md:grid-cols-2 gap-4">
|
||||
{config && (
|
||||
<div className="rounded-lg border border-slate-200 p-3 dark:border-slate-700">
|
||||
<div className="mb-2 flex items-center gap-2">
|
||||
<LuSettings className="h-4 w-4 text-blue-600 dark:text-blue-400" />
|
||||
<span className="font-medium text-slate-900 dark:text-slate-100">
|
||||
Audio Output Config
|
||||
</span>
|
||||
</div>
|
||||
<div className="space-y-2 text-sm">
|
||||
<div className="flex justify-between">
|
||||
<span className="text-slate-500 dark:text-slate-400">Quality:</span>
|
||||
<span className={cx("font-medium", getQualityColor(config.Quality))}>
|
||||
{getQualityLabels()[config.Quality]}
|
||||
</span>
|
||||
</div>
|
||||
<div className="flex justify-between">
|
||||
<span className="text-slate-500 dark:text-slate-400">Bitrate:</span>
|
||||
<span className="font-medium text-slate-900 dark:text-slate-100">
|
||||
{config.Bitrate}kbps
|
||||
</span>
|
||||
</div>
|
||||
<div className="flex justify-between">
|
||||
<span className="text-slate-500 dark:text-slate-400">Sample Rate:</span>
|
||||
<span className="font-medium text-slate-900 dark:text-slate-100">
|
||||
{config.SampleRate}Hz
|
||||
</span>
|
||||
</div>
|
||||
<div className="flex justify-between">
|
||||
<span className="text-slate-500 dark:text-slate-400">Channels:</span>
|
||||
<span className="font-medium text-slate-900 dark:text-slate-100">
|
||||
{config.Channels}
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{microphoneConfig && (
|
||||
<div className="rounded-lg border border-slate-200 p-3 dark:border-slate-700">
|
||||
<div className="mb-2 flex items-center gap-2">
|
||||
<MdMic className="h-4 w-4 text-green-600 dark:text-green-400" />
|
||||
<span className="font-medium text-slate-900 dark:text-slate-100">
|
||||
Audio Input Config
|
||||
</span>
|
||||
</div>
|
||||
<div className="space-y-2 text-sm">
|
||||
<div className="flex justify-between">
|
||||
<span className="text-slate-500 dark:text-slate-400">Quality:</span>
|
||||
<span className={cx("font-medium", getQualityColor(microphoneConfig.Quality))}>
|
||||
{getQualityLabels()[microphoneConfig.Quality]}
|
||||
</span>
|
||||
</div>
|
||||
<div className="flex justify-between">
|
||||
<span className="text-slate-500 dark:text-slate-400">Bitrate:</span>
|
||||
<span className="font-medium text-slate-900 dark:text-slate-100">
|
||||
{microphoneConfig.Bitrate}kbps
|
||||
</span>
|
||||
</div>
|
||||
<div className="flex justify-between">
|
||||
<span className="text-slate-500 dark:text-slate-400">Sample Rate:</span>
|
||||
<span className="font-medium text-slate-900 dark:text-slate-100">
|
||||
{microphoneConfig.SampleRate}Hz
|
||||
</span>
|
||||
</div>
|
||||
<div className="flex justify-between">
|
||||
<span className="text-slate-500 dark:text-slate-400">Channels:</span>
|
||||
<span className="font-medium text-slate-900 dark:text-slate-100">
|
||||
{microphoneConfig.Channels}
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
{/* Subprocess Resource Usage - Histogram View */}
|
||||
<div className="grid grid-cols-1 md:grid-cols-2 gap-4">
|
||||
{/* Audio Output Subprocess */}
|
||||
{audioProcessMetrics && (
|
||||
<div className="rounded-lg border border-slate-200 p-3 dark:border-slate-700">
|
||||
<div className="mb-3 flex items-center gap-2">
|
||||
<LuCpu className="h-4 w-4 text-blue-600 dark:text-blue-400" />
|
||||
<span className="font-medium text-slate-900 dark:text-slate-100">
|
||||
Audio Output Process
|
||||
</span>
|
||||
<div className={cx(
|
||||
"h-2 w-2 rounded-full ml-auto",
|
||||
audioProcessMetrics.running ? "bg-green-500" : "bg-red-500"
|
||||
)} />
|
||||
</div>
|
||||
<div className="space-y-4">
|
||||
<div>
|
||||
<h4 className="text-sm font-medium text-slate-900 dark:text-slate-100 mb-2">CPU Usage</h4>
|
||||
<div className="h-24">
|
||||
<StatChart
|
||||
data={createChartArray(audioCpuStats, 'cpu_percent')}
|
||||
unit="%"
|
||||
domain={[0, 100]}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
<div>
|
||||
<h4 className="text-sm font-medium text-slate-900 dark:text-slate-100 mb-2">Memory Usage</h4>
|
||||
<div className="h-24">
|
||||
<StatChart
|
||||
data={createChartArray(audioMemoryStats, 'memory_rss').map(item => ({
|
||||
date: item.date,
|
||||
stat: item.stat ? item.stat / (1024 * 1024) : null // Convert bytes to MB
|
||||
}))}
|
||||
unit="MB"
|
||||
domain={[0, systemMemoryMB]}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
<div className="grid grid-cols-2 gap-2 text-xs">
|
||||
<div className="text-center p-2 bg-slate-50 dark:bg-slate-800 rounded">
|
||||
<div className="font-medium text-slate-900 dark:text-slate-100">
|
||||
{formatPercentage(audioProcessMetrics.cpu_percent)}
|
||||
</div>
|
||||
<div className="text-slate-500 dark:text-slate-400">CPU</div>
|
||||
</div>
|
||||
<div className="text-center p-2 bg-slate-50 dark:bg-slate-800 rounded">
|
||||
<div className="font-medium text-slate-900 dark:text-slate-100">
|
||||
{formatMemoryMB(audioProcessMetrics.memory_rss)}
|
||||
</div>
|
||||
<div className="text-slate-500 dark:text-slate-400">Memory</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Microphone Input Subprocess */}
|
||||
{microphoneProcessMetrics && (
|
||||
<div className="rounded-lg border border-slate-200 p-3 dark:border-slate-700">
|
||||
<div className="mb-3 flex items-center gap-2">
|
||||
<LuMemoryStick className="h-4 w-4 text-green-600 dark:text-green-400" />
|
||||
<span className="font-medium text-slate-900 dark:text-slate-100">
|
||||
Microphone Input Process
|
||||
</span>
|
||||
<div className={cx(
|
||||
"h-2 w-2 rounded-full ml-auto",
|
||||
microphoneProcessMetrics.running ? "bg-green-500" : "bg-red-500"
|
||||
)} />
|
||||
</div>
|
||||
<div className="space-y-4">
|
||||
<div>
|
||||
<h4 className="text-sm font-medium text-slate-900 dark:text-slate-100 mb-2">CPU Usage</h4>
|
||||
<div className="h-24">
|
||||
<StatChart
|
||||
data={createChartArray(micCpuStats, 'cpu_percent')}
|
||||
unit="%"
|
||||
domain={[0, 100]}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
<div>
|
||||
<h4 className="text-sm font-medium text-slate-900 dark:text-slate-100 mb-2">Memory Usage</h4>
|
||||
<div className="h-24">
|
||||
<StatChart
|
||||
data={createChartArray(micMemoryStats, 'memory_rss').map(item => ({
|
||||
date: item.date,
|
||||
stat: item.stat ? item.stat / (1024 * 1024) : null // Convert bytes to MB
|
||||
}))}
|
||||
unit="MB"
|
||||
domain={[0, systemMemoryMB]}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
<div className="grid grid-cols-2 gap-2 text-xs">
|
||||
<div className="text-center p-2 bg-slate-50 dark:bg-slate-800 rounded">
|
||||
<div className="font-medium text-slate-900 dark:text-slate-100">
|
||||
{formatPercentage(microphoneProcessMetrics.cpu_percent)}
|
||||
</div>
|
||||
<div className="text-slate-500 dark:text-slate-400">CPU</div>
|
||||
</div>
|
||||
<div className="text-center p-2 bg-slate-50 dark:bg-slate-800 rounded">
|
||||
<div className="font-medium text-slate-900 dark:text-slate-100">
|
||||
{formatMemoryMB(microphoneProcessMetrics.memory_rss)}
|
||||
</div>
|
||||
<div className="text-slate-500 dark:text-slate-400">Memory</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Performance Metrics */}
|
||||
{metrics && (
|
||||
<div className="space-y-3">
|
||||
{/* Audio Output Frames */}
|
||||
<div className="rounded-lg border border-slate-200 p-3 dark:border-slate-700">
|
||||
<div className="mb-2 flex items-center gap-2">
|
||||
<LuActivity className="h-4 w-4 text-green-600 dark:text-green-400" />
|
||||
<span className="font-medium text-slate-900 dark:text-slate-100">
|
||||
Audio Output
|
||||
</span>
|
||||
</div>
|
||||
<div className="grid grid-cols-2 gap-3">
|
||||
<div className="text-center">
|
||||
<div className="text-2xl font-bold text-green-600 dark:text-green-400">
|
||||
{formatNumber(metrics.frames_received)}
|
||||
</div>
|
||||
<div className="text-xs text-slate-500 dark:text-slate-400">
|
||||
Frames Received
|
||||
</div>
|
||||
</div>
|
||||
<div className="text-center">
|
||||
<div className={cx(
|
||||
"text-2xl font-bold",
|
||||
metrics.frames_dropped > 0
|
||||
? "text-red-600 dark:text-red-400"
|
||||
: "text-green-600 dark:text-green-400"
|
||||
)}>
|
||||
{formatNumber(metrics.frames_dropped)}
|
||||
</div>
|
||||
<div className="text-xs text-slate-500 dark:text-slate-400">
|
||||
Frames Dropped
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Drop Rate */}
|
||||
<div className="mt-3 rounded-md bg-slate-50 p-2 dark:bg-slate-700">
|
||||
<div className="flex items-center justify-between">
|
||||
<span className="text-sm text-slate-600 dark:text-slate-400">
|
||||
Drop Rate
|
||||
</span>
|
||||
<span className={cx(
|
||||
"font-bold",
|
||||
getDropRate() > AUDIO_CONFIG.DROP_RATE_CRITICAL_THRESHOLD
|
||||
? "text-red-600 dark:text-red-400"
|
||||
: getDropRate() > AUDIO_CONFIG.DROP_RATE_WARNING_THRESHOLD
|
||||
? "text-yellow-600 dark:text-yellow-400"
|
||||
: "text-green-600 dark:text-green-400"
|
||||
)}>
|
||||
{getDropRate().toFixed(AUDIO_CONFIG.PERCENTAGE_DECIMAL_PLACES)}%
|
||||
</span>
|
||||
</div>
|
||||
<div className="mt-1 h-2 w-full rounded-full bg-slate-200 dark:bg-slate-600">
|
||||
<div
|
||||
className={cx(
|
||||
"h-2 rounded-full transition-all duration-300",
|
||||
getDropRate() > AUDIO_CONFIG.DROP_RATE_CRITICAL_THRESHOLD
|
||||
? "bg-red-500"
|
||||
: getDropRate() > AUDIO_CONFIG.DROP_RATE_WARNING_THRESHOLD
|
||||
? "bg-yellow-500"
|
||||
: "bg-green-500"
|
||||
)}
|
||||
style={{ width: `${Math.min(getDropRate(), AUDIO_CONFIG.MAX_LEVEL_PERCENTAGE)}%` }}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Microphone Input Metrics */}
|
||||
{microphoneMetrics && (
|
||||
<div className="rounded-lg border border-slate-200 p-3 dark:border-slate-700">
|
||||
<div className="mb-2 flex items-center gap-2">
|
||||
<MdMic className="h-4 w-4 text-orange-600 dark:text-orange-400" />
|
||||
<span className="font-medium text-slate-900 dark:text-slate-100">
|
||||
Microphone Input
|
||||
</span>
|
||||
</div>
|
||||
<div className="grid grid-cols-2 gap-3">
|
||||
<div className="text-center">
|
||||
<div className="text-2xl font-bold text-orange-600 dark:text-orange-400">
|
||||
{formatNumber(microphoneMetrics.frames_sent)}
|
||||
</div>
|
||||
<div className="text-xs text-slate-500 dark:text-slate-400">
|
||||
Frames Sent
|
||||
</div>
|
||||
</div>
|
||||
<div className="text-center">
|
||||
<div className={cx(
|
||||
"text-2xl font-bold",
|
||||
microphoneMetrics.frames_dropped > 0
|
||||
? "text-red-600 dark:text-red-400"
|
||||
: "text-green-600 dark:text-green-400"
|
||||
)}>
|
||||
{formatNumber(microphoneMetrics.frames_dropped)}
|
||||
</div>
|
||||
<div className="text-xs text-slate-500 dark:text-slate-400">
|
||||
Frames Dropped
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Microphone Drop Rate */}
|
||||
<div className="mt-3 rounded-md bg-slate-50 p-2 dark:bg-slate-700">
|
||||
<div className="flex items-center justify-between">
|
||||
<span className="text-sm text-slate-600 dark:text-slate-400">
|
||||
Drop Rate
|
||||
</span>
|
||||
<span className={cx(
|
||||
"font-bold",
|
||||
(microphoneMetrics.frames_sent > 0 ? (microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * AUDIO_CONFIG.PERCENTAGE_MULTIPLIER : 0) > AUDIO_CONFIG.DROP_RATE_CRITICAL_THRESHOLD
|
||||
? "text-red-600 dark:text-red-400"
|
||||
: (microphoneMetrics.frames_sent > 0 ? (microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * AUDIO_CONFIG.PERCENTAGE_MULTIPLIER : 0) > AUDIO_CONFIG.DROP_RATE_WARNING_THRESHOLD
|
||||
? "text-yellow-600 dark:text-yellow-400"
|
||||
: "text-green-600 dark:text-green-400"
|
||||
)}>
|
||||
{microphoneMetrics.frames_sent > 0 ? ((microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * AUDIO_CONFIG.PERCENTAGE_MULTIPLIER).toFixed(AUDIO_CONFIG.PERCENTAGE_DECIMAL_PLACES) : "0.00"}%
|
||||
</span>
|
||||
</div>
|
||||
<div className="mt-1 h-2 w-full rounded-full bg-slate-200 dark:bg-slate-600">
|
||||
<div
|
||||
className={cx(
|
||||
"h-2 rounded-full transition-all duration-300",
|
||||
(microphoneMetrics.frames_sent > 0 ? (microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * AUDIO_CONFIG.PERCENTAGE_MULTIPLIER : 0) > AUDIO_CONFIG.DROP_RATE_CRITICAL_THRESHOLD
|
||||
? "bg-red-500"
|
||||
: (microphoneMetrics.frames_sent > 0 ? (microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * AUDIO_CONFIG.PERCENTAGE_MULTIPLIER : 0) > AUDIO_CONFIG.DROP_RATE_WARNING_THRESHOLD
|
||||
? "bg-yellow-500"
|
||||
: "bg-green-500"
|
||||
)}
|
||||
style={{
|
||||
width: `${Math.min(microphoneMetrics.frames_sent > 0 ? (microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * AUDIO_CONFIG.PERCENTAGE_MULTIPLIER : 0, AUDIO_CONFIG.MAX_LEVEL_PERCENTAGE)}%`
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Microphone Audio Level */}
|
||||
{isMicrophoneActive && (
|
||||
<div className="mt-3 rounded-md bg-slate-50 p-2 dark:bg-slate-700">
|
||||
<AudioLevelMeter
|
||||
level={audioLevel}
|
||||
isActive={isMicrophoneActive && !isMicrophoneMuted && isAnalyzing}
|
||||
size="sm"
|
||||
showLabel={true}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Microphone Connection Health */}
|
||||
<div className="mt-3 rounded-md bg-slate-50 p-2 dark:bg-slate-700">
|
||||
<div className="mb-2 flex items-center gap-2">
|
||||
<MdSignalWifi4Bar className="h-3 w-3 text-purple-600 dark:text-purple-400" />
|
||||
<span className="text-sm font-medium text-slate-900 dark:text-slate-100">
|
||||
Connection Health
|
||||
</span>
|
||||
</div>
|
||||
<div className="space-y-2">
|
||||
<div className="flex justify-between">
|
||||
<span className="text-xs text-slate-500 dark:text-slate-400">
|
||||
Connection Drops:
|
||||
</span>
|
||||
<span className={cx(
|
||||
"text-xs font-medium",
|
||||
microphoneMetrics.connection_drops > 0
|
||||
? "text-red-600 dark:text-red-400"
|
||||
: "text-green-600 dark:text-green-400"
|
||||
)}>
|
||||
{formatNumber(microphoneMetrics.connection_drops)}
|
||||
</span>
|
||||
</div>
|
||||
{microphoneMetrics.average_latency && (
|
||||
<div className="flex justify-between">
|
||||
<span className="text-xs text-slate-500 dark:text-slate-400">
|
||||
Avg Latency:
|
||||
</span>
|
||||
<span className="text-xs font-medium text-slate-900 dark:text-slate-100">
|
||||
{microphoneMetrics.average_latency}
|
||||
</span>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Data Transfer */}
|
||||
<div className="rounded-lg border border-slate-200 p-3 dark:border-slate-700">
|
||||
<div className="mb-2 flex items-center gap-2">
|
||||
<LuHardDrive className="h-4 w-4 text-blue-600 dark:text-blue-400" />
|
||||
<span className="font-medium text-slate-900 dark:text-slate-100">
|
||||
Data Transfer
|
||||
</span>
|
||||
</div>
|
||||
<div className="text-center">
|
||||
<div className="text-2xl font-bold text-blue-600 dark:text-blue-400">
|
||||
{formatBytes(metrics.bytes_processed)}
|
||||
</div>
|
||||
<div className="text-xs text-slate-500 dark:text-slate-400">
|
||||
Total Processed
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Connection Health */}
|
||||
<div className="rounded-lg border border-slate-200 p-3 dark:border-slate-700">
|
||||
<div className="mb-2 flex items-center gap-2">
|
||||
<MdSignalWifi4Bar className="h-4 w-4 text-purple-600 dark:text-purple-400" />
|
||||
<span className="font-medium text-slate-900 dark:text-slate-100">
|
||||
Connection Health
|
||||
</span>
|
||||
</div>
|
||||
<div className="space-y-2">
|
||||
<div className="flex justify-between">
|
||||
<span className="text-sm text-slate-500 dark:text-slate-400">
|
||||
Connection Drops:
|
||||
</span>
|
||||
<span className={cx(
|
||||
"font-medium",
|
||||
metrics.connection_drops > 0
|
||||
? "text-red-600 dark:text-red-400"
|
||||
: "text-green-600 dark:text-green-400"
|
||||
)}>
|
||||
{formatNumber(metrics.connection_drops)}
|
||||
</span>
|
||||
</div>
|
||||
{metrics.average_latency && (
|
||||
<div className="flex justify-between">
|
||||
<span className="text-sm text-slate-500 dark:text-slate-400">
|
||||
Avg Latency:
|
||||
</span>
|
||||
<span className="font-medium text-slate-900 dark:text-slate-100">
|
||||
{metrics.average_latency}
|
||||
</span>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Last Update */}
|
||||
<div className="flex items-center justify-center gap-2 text-xs text-slate-500 dark:text-slate-400">
|
||||
<LuClock className="h-3 w-3" />
|
||||
<span>Last updated: {lastUpdate.toLocaleTimeString()}</span>
|
||||
</div>
|
||||
|
||||
{/* No Data State */}
|
||||
{!metrics && (
|
||||
<div className="flex flex-col items-center justify-center py-8 text-center">
|
||||
<MdError className="h-12 w-12 text-slate-400 dark:text-slate-600" />
|
||||
<h3 className="mt-2 text-sm font-medium text-slate-900 dark:text-slate-100">
|
||||
No Audio Data
|
||||
</h3>
|
||||
<p className="mt-1 text-sm text-slate-500 dark:text-slate-400">
|
||||
Audio metrics will appear when audio streaming is active.
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
import { cx } from "@/cva.config";
|
||||
|
||||
interface AudioMetrics {
|
||||
frames_dropped: number;
|
||||
// Add other metrics properties as needed
|
||||
}
|
||||
|
||||
interface AudioStatusIndicatorProps {
|
||||
metrics?: AudioMetrics;
|
||||
label: string;
|
||||
className?: string;
|
||||
}
|
||||
|
||||
export function AudioStatusIndicator({ metrics, label, className }: AudioStatusIndicatorProps) {
|
||||
const hasIssues = metrics && metrics.frames_dropped > 0;
|
||||
|
||||
return (
|
||||
<div className={cx(
|
||||
"text-center p-2 bg-slate-50 dark:bg-slate-800 rounded",
|
||||
className
|
||||
)}>
|
||||
<div className={cx(
|
||||
"font-medium",
|
||||
hasIssues
|
||||
? "text-red-600 dark:text-red-400"
|
||||
: "text-green-600 dark:text-green-400"
|
||||
)}>
|
||||
{hasIssues ? "Issues" : "Good"}
|
||||
</div>
|
||||
<div className="text-slate-500 dark:text-slate-400">{label}</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
|
@ -1,9 +1,15 @@
|
|||
import { useEffect, useState } from "react";
|
||||
import { MdVolumeOff, MdVolumeUp, MdGraphicEq, MdMic, MdMicOff, MdRefresh } from "react-icons/md";
|
||||
import { LuActivity, LuSignal } from "react-icons/lu";
|
||||
|
||||
import { Button } from "@components/Button";
|
||||
import { AudioLevelMeter } from "@components/AudioLevelMeter";
|
||||
import { AudioConfigDisplay } from "@components/AudioConfigDisplay";
|
||||
import { AudioStatusIndicator } from "@components/AudioStatusIndicator";
|
||||
import { cx } from "@/cva.config";
|
||||
import { useUiStore } from "@/hooks/stores";
|
||||
import { useAudioDevices } from "@/hooks/useAudioDevices";
|
||||
import { useAudioLevel } from "@/hooks/useAudioLevel";
|
||||
import { useAudioEvents } from "@/hooks/useAudioEvents";
|
||||
import api from "@/api";
|
||||
import notifications from "@/notifications";
|
||||
|
|
@ -43,9 +49,10 @@ const getQualityLabels = () => audioQualityService.getQualityLabels();
|
|||
|
||||
interface AudioControlPopoverProps {
|
||||
microphone: MicrophoneHookReturn;
|
||||
open?: boolean; // whether the popover is open (controls analysis)
|
||||
}
|
||||
|
||||
export default function AudioControlPopover({ microphone }: AudioControlPopoverProps) {
|
||||
export default function AudioControlPopover({ microphone, open }: AudioControlPopoverProps) {
|
||||
const [currentConfig, setCurrentConfig] = useState<AudioConfig | null>(null);
|
||||
const [currentMicrophoneConfig, setCurrentMicrophoneConfig] = useState<AudioConfig | null>(null);
|
||||
|
||||
|
|
@ -61,6 +68,8 @@ export default function AudioControlPopover({ microphone }: AudioControlPopoverP
|
|||
// Use WebSocket-based audio events for real-time updates
|
||||
const {
|
||||
audioMuted,
|
||||
audioMetrics,
|
||||
microphoneMetrics,
|
||||
isConnected: wsConnected
|
||||
} = useAudioEvents();
|
||||
|
||||
|
|
@ -70,6 +79,7 @@ export default function AudioControlPopover({ microphone }: AudioControlPopoverP
|
|||
const {
|
||||
isMicrophoneActive,
|
||||
isMicrophoneMuted,
|
||||
microphoneStream,
|
||||
startMicrophone,
|
||||
stopMicrophone,
|
||||
toggleMicrophoneMute,
|
||||
|
|
@ -82,9 +92,16 @@ export default function AudioControlPopover({ microphone }: AudioControlPopoverP
|
|||
|
||||
// Use WebSocket data exclusively - no polling fallback
|
||||
const isMuted = audioMuted ?? false;
|
||||
const metrics = audioMetrics;
|
||||
const micMetrics = microphoneMetrics;
|
||||
const isConnected = wsConnected;
|
||||
|
||||
|
||||
// Audio level monitoring - enable only when popover is open and microphone is active to save resources
|
||||
const analysisEnabled = (open ?? true) && isMicrophoneActive;
|
||||
const { audioLevel, isAnalyzing } = useAudioLevel(analysisEnabled ? microphoneStream : null, {
|
||||
enabled: analysisEnabled,
|
||||
updateInterval: 120, // 8-10 fps to reduce CPU without losing UX quality
|
||||
});
|
||||
|
||||
// Audio devices
|
||||
const {
|
||||
|
|
@ -99,7 +116,7 @@ export default function AudioControlPopover({ microphone }: AudioControlPopoverP
|
|||
refreshDevices
|
||||
} = useAudioDevices();
|
||||
|
||||
|
||||
const { toggleSidebarView } = useUiStore();
|
||||
|
||||
// Load initial configurations once - cache to prevent repeated calls
|
||||
useEffect(() => {
|
||||
|
|
@ -358,7 +375,42 @@ export default function AudioControlPopover({ microphone }: AudioControlPopoverP
|
|||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
{/* Audio Level Meter */}
|
||||
{isMicrophoneActive && (
|
||||
<div className="rounded-lg bg-slate-50 p-3 dark:bg-slate-700">
|
||||
<AudioLevelMeter
|
||||
level={audioLevel}
|
||||
isActive={isMicrophoneActive && !isMicrophoneMuted && isAnalyzing}
|
||||
size="md"
|
||||
showLabel={true}
|
||||
/>
|
||||
{/* Debug information */}
|
||||
<div className="mt-2 text-xs text-slate-500 dark:text-slate-400">
|
||||
<div className="grid grid-cols-2 gap-1">
|
||||
<span>Stream: {microphoneStream ? '✓' : '✗'}</span>
|
||||
<span>Analyzing: {isAnalyzing ? '✓' : '✗'}</span>
|
||||
<span>Active: {isMicrophoneActive ? '✓' : '✗'}</span>
|
||||
<span>Muted: {isMicrophoneMuted ? '✓' : '✗'}</span>
|
||||
</div>
|
||||
{microphoneStream && (
|
||||
<div className="mt-1">
|
||||
Tracks: {microphoneStream.getAudioTracks().length}
|
||||
{microphoneStream.getAudioTracks().length > 0 && (
|
||||
<span className="ml-2">
|
||||
(Enabled: {microphoneStream.getAudioTracks().filter((t: MediaStreamTrack) => t.enabled).length})
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
<button
|
||||
onClick={syncMicrophoneState}
|
||||
className="mt-1 text-blue-500 hover:text-blue-600 dark:text-blue-400 dark:hover:text-blue-300"
|
||||
>
|
||||
Sync State
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Device Selection */}
|
||||
|
|
@ -462,11 +514,10 @@ export default function AudioControlPopover({ microphone }: AudioControlPopoverP
|
|||
</div>
|
||||
|
||||
{currentMicrophoneConfig && (
|
||||
<div className="text-xs text-slate-600 dark:text-slate-400 mt-2">
|
||||
Quality: {currentMicrophoneConfig.Quality} |
|
||||
Bitrate: {currentMicrophoneConfig.Bitrate}kbps |
|
||||
Sample Rate: {currentMicrophoneConfig.SampleRate}Hz
|
||||
</div>
|
||||
<AudioConfigDisplay
|
||||
config={currentMicrophoneConfig}
|
||||
variant="success"
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
|
@ -500,16 +551,59 @@ export default function AudioControlPopover({ microphone }: AudioControlPopoverP
|
|||
</div>
|
||||
|
||||
{currentConfig && (
|
||||
<div className="text-xs text-slate-600 dark:text-slate-400 mt-2">
|
||||
Quality: {currentConfig.Quality} |
|
||||
Bitrate: {currentConfig.Bitrate}kbps |
|
||||
Sample Rate: {currentConfig.SampleRate}Hz
|
||||
<AudioConfigDisplay
|
||||
config={currentConfig}
|
||||
variant="default"
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Quick Status Summary */}
|
||||
<div className="rounded-lg border border-slate-200 p-3 dark:border-slate-600">
|
||||
<div className="flex items-center gap-2 mb-2">
|
||||
<LuActivity className="h-4 w-4 text-slate-600 dark:text-slate-400" />
|
||||
<span className="font-medium text-slate-900 dark:text-slate-100">
|
||||
Quick Status
|
||||
</span>
|
||||
</div>
|
||||
|
||||
{metrics ? (
|
||||
<div className="grid grid-cols-2 gap-3 text-xs">
|
||||
<AudioStatusIndicator
|
||||
metrics={metrics}
|
||||
label="Audio Output"
|
||||
/>
|
||||
|
||||
{micMetrics && (
|
||||
<AudioStatusIndicator
|
||||
metrics={micMetrics}
|
||||
label="Microphone"
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
) : (
|
||||
<div className="text-center py-2">
|
||||
<div className="text-sm text-slate-500 dark:text-slate-400">
|
||||
No data available
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
{/* Audio Metrics Dashboard Button */}
|
||||
<div className="pt-2 border-t border-slate-200 dark:border-slate-600">
|
||||
<div className="flex justify-center">
|
||||
<button
|
||||
onClick={() => {
|
||||
toggleSidebarView("audio-metrics");
|
||||
}}
|
||||
className="flex items-center gap-2 rounded-md border border-slate-200 bg-white px-4 py-2 text-sm font-medium text-slate-700 hover:bg-slate-50 dark:border-slate-600 dark:bg-slate-700 dark:text-slate-300 dark:hover:bg-slate-600 transition-colors"
|
||||
>
|
||||
<LuSignal className="h-4 w-4 text-blue-500" />
|
||||
<span>View Full Audio Metrics</span>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
|
|
|
|||
|
|
@ -0,0 +1,16 @@
|
|||
import SidebarHeader from "@/components/SidebarHeader";
|
||||
import { useUiStore } from "@/hooks/stores";
|
||||
import AudioMetricsDashboard from "@/components/AudioMetricsDashboard";
|
||||
|
||||
export default function AudioMetricsSidebar() {
|
||||
const setSidebarView = useUiStore(state => state.setSidebarView);
|
||||
|
||||
return (
|
||||
<>
|
||||
<SidebarHeader title="Audio Metrics" setSidebarView={setSidebarView} />
|
||||
<div className="h-full overflow-y-scroll bg-white px-4 py-2 pb-8 dark:bg-slate-900">
|
||||
<AudioMetricsDashboard />
|
||||
</div>
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
|
@ -40,7 +40,7 @@ const appendStatToMap = <T extends { timestamp: number }>(
|
|||
};
|
||||
|
||||
// Constants and types
|
||||
export type AvailableSidebarViews = "connection-stats";
|
||||
export type AvailableSidebarViews = "connection-stats" | "audio-metrics";
|
||||
export type AvailableTerminalTypes = "kvm" | "serial" | "none";
|
||||
|
||||
export interface User {
|
||||
|
|
|
|||
|
|
@ -7,7 +7,11 @@ import { NETWORK_CONFIG } from '../config/constants';
|
|||
// Audio event types matching the backend
|
||||
export type AudioEventType =
|
||||
| 'audio-mute-changed'
|
||||
| 'audio-metrics-update'
|
||||
| 'microphone-state-changed'
|
||||
| 'microphone-metrics-update'
|
||||
| 'audio-process-metrics'
|
||||
| 'microphone-process-metrics'
|
||||
| 'audio-device-changed';
|
||||
|
||||
// Audio event data interfaces
|
||||
|
|
@ -15,11 +19,39 @@ export interface AudioMuteData {
|
|||
muted: boolean;
|
||||
}
|
||||
|
||||
export interface AudioMetricsData {
|
||||
frames_received: number;
|
||||
frames_dropped: number;
|
||||
bytes_processed: number;
|
||||
last_frame_time: string;
|
||||
connection_drops: number;
|
||||
average_latency: string;
|
||||
}
|
||||
|
||||
export interface MicrophoneStateData {
|
||||
running: boolean;
|
||||
session_active: boolean;
|
||||
}
|
||||
|
||||
export interface MicrophoneMetricsData {
|
||||
frames_sent: number;
|
||||
frames_dropped: number;
|
||||
bytes_processed: number;
|
||||
last_frame_time: string;
|
||||
connection_drops: number;
|
||||
average_latency: string;
|
||||
}
|
||||
|
||||
export interface ProcessMetricsData {
|
||||
pid: number;
|
||||
cpu_percent: number;
|
||||
memory_rss: number;
|
||||
memory_vms: number;
|
||||
memory_percent: number;
|
||||
running: boolean;
|
||||
process_name: string;
|
||||
}
|
||||
|
||||
export interface AudioDeviceChangedData {
|
||||
enabled: boolean;
|
||||
reason: string;
|
||||
|
|
@ -28,7 +60,7 @@ export interface AudioDeviceChangedData {
|
|||
// Audio event structure
|
||||
export interface AudioEvent {
|
||||
type: AudioEventType;
|
||||
data: AudioMuteData | MicrophoneStateData | AudioDeviceChangedData;
|
||||
data: AudioMuteData | AudioMetricsData | MicrophoneStateData | MicrophoneMetricsData | ProcessMetricsData | AudioDeviceChangedData;
|
||||
}
|
||||
|
||||
// Hook return type
|
||||
|
|
@ -39,9 +71,15 @@ export interface UseAudioEventsReturn {
|
|||
|
||||
// Audio state
|
||||
audioMuted: boolean | null;
|
||||
audioMetrics: AudioMetricsData | null;
|
||||
|
||||
// Microphone state
|
||||
microphoneState: MicrophoneStateData | null;
|
||||
microphoneMetrics: MicrophoneMetricsData | null;
|
||||
|
||||
// Process metrics
|
||||
audioProcessMetrics: ProcessMetricsData | null;
|
||||
microphoneProcessMetrics: ProcessMetricsData | null;
|
||||
|
||||
// Device change events
|
||||
onAudioDeviceChanged?: (data: AudioDeviceChangedData) => void;
|
||||
|
|
@ -61,7 +99,11 @@ const globalSubscriptionState = {
|
|||
export function useAudioEvents(onAudioDeviceChanged?: (data: AudioDeviceChangedData) => void): UseAudioEventsReturn {
|
||||
// State for audio data
|
||||
const [audioMuted, setAudioMuted] = useState<boolean | null>(null);
|
||||
const [audioMetrics, setAudioMetrics] = useState<AudioMetricsData | null>(null);
|
||||
const [microphoneState, setMicrophoneState] = useState<MicrophoneStateData | null>(null);
|
||||
const [microphoneMetrics, setMicrophoneMetricsData] = useState<MicrophoneMetricsData | null>(null);
|
||||
const [audioProcessMetrics, setAudioProcessMetrics] = useState<ProcessMetricsData | null>(null);
|
||||
const [microphoneProcessMetrics, setMicrophoneProcessMetrics] = useState<ProcessMetricsData | null>(null);
|
||||
|
||||
// Local subscription state
|
||||
const [isLocallySubscribed, setIsLocallySubscribed] = useState(false);
|
||||
|
|
@ -183,6 +225,12 @@ export function useAudioEvents(onAudioDeviceChanged?: (data: AudioDeviceChangedD
|
|||
break;
|
||||
}
|
||||
|
||||
case 'audio-metrics-update': {
|
||||
const audioMetricsData = audioEvent.data as AudioMetricsData;
|
||||
setAudioMetrics(audioMetricsData);
|
||||
break;
|
||||
}
|
||||
|
||||
case 'microphone-state-changed': {
|
||||
const micStateData = audioEvent.data as MicrophoneStateData;
|
||||
setMicrophoneState(micStateData);
|
||||
|
|
@ -190,6 +238,24 @@ export function useAudioEvents(onAudioDeviceChanged?: (data: AudioDeviceChangedD
|
|||
break;
|
||||
}
|
||||
|
||||
case 'microphone-metrics-update': {
|
||||
const micMetricsData = audioEvent.data as MicrophoneMetricsData;
|
||||
setMicrophoneMetricsData(micMetricsData);
|
||||
break;
|
||||
}
|
||||
|
||||
case 'audio-process-metrics': {
|
||||
const audioProcessData = audioEvent.data as ProcessMetricsData;
|
||||
setAudioProcessMetrics(audioProcessData);
|
||||
break;
|
||||
}
|
||||
|
||||
case 'microphone-process-metrics': {
|
||||
const micProcessData = audioEvent.data as ProcessMetricsData;
|
||||
setMicrophoneProcessMetrics(micProcessData);
|
||||
break;
|
||||
}
|
||||
|
||||
case 'audio-device-changed': {
|
||||
const deviceChangedData = audioEvent.data as AudioDeviceChangedData;
|
||||
// Audio device changed
|
||||
|
|
@ -254,9 +320,15 @@ export function useAudioEvents(onAudioDeviceChanged?: (data: AudioDeviceChangedD
|
|||
|
||||
// Audio state
|
||||
audioMuted,
|
||||
audioMetrics,
|
||||
|
||||
// Microphone state
|
||||
microphoneState,
|
||||
microphoneMetrics: microphoneMetrics,
|
||||
|
||||
// Process metrics
|
||||
audioProcessMetrics,
|
||||
microphoneProcessMetrics,
|
||||
|
||||
// Device change events
|
||||
onAudioDeviceChanged,
|
||||
|
|
|
|||
|
|
@ -0,0 +1,136 @@
|
|||
import { useEffect, useRef, useState } from 'react';
|
||||
|
||||
import { AUDIO_CONFIG } from '@/config/constants';
|
||||
|
||||
interface AudioLevelHookResult {
|
||||
audioLevel: number; // 0-100 percentage
|
||||
isAnalyzing: boolean;
|
||||
}
|
||||
|
||||
interface AudioLevelOptions {
|
||||
enabled?: boolean; // Allow external control of analysis
|
||||
updateInterval?: number; // Throttle updates (default from AUDIO_CONFIG)
|
||||
}
|
||||
|
||||
export const useAudioLevel = (
|
||||
stream: MediaStream | null,
|
||||
options: AudioLevelOptions = {}
|
||||
): AudioLevelHookResult => {
|
||||
const { enabled = true, updateInterval = AUDIO_CONFIG.LEVEL_UPDATE_INTERVAL } = options;
|
||||
|
||||
const [audioLevel, setAudioLevel] = useState(0);
|
||||
const [isAnalyzing, setIsAnalyzing] = useState(false);
|
||||
const audioContextRef = useRef<AudioContext | null>(null);
|
||||
const analyserRef = useRef<AnalyserNode | null>(null);
|
||||
const sourceRef = useRef<MediaStreamAudioSourceNode | null>(null);
|
||||
const intervalRef = useRef<number | null>(null);
|
||||
const lastUpdateTimeRef = useRef<number>(0);
|
||||
|
||||
useEffect(() => {
|
||||
if (!stream || !enabled) {
|
||||
// Clean up when stream is null or disabled
|
||||
if (intervalRef.current !== null) {
|
||||
clearInterval(intervalRef.current);
|
||||
intervalRef.current = null;
|
||||
}
|
||||
if (sourceRef.current) {
|
||||
sourceRef.current.disconnect();
|
||||
sourceRef.current = null;
|
||||
}
|
||||
if (audioContextRef.current) {
|
||||
audioContextRef.current.close();
|
||||
audioContextRef.current = null;
|
||||
}
|
||||
analyserRef.current = null;
|
||||
setIsAnalyzing(false);
|
||||
setAudioLevel(0);
|
||||
return;
|
||||
}
|
||||
|
||||
const audioTracks = stream.getAudioTracks();
|
||||
if (audioTracks.length === 0) {
|
||||
setIsAnalyzing(false);
|
||||
setAudioLevel(0);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
// Create audio context and analyser
|
||||
const audioContext = new (window.AudioContext || (window as Window & { webkitAudioContext?: typeof AudioContext }).webkitAudioContext)();
|
||||
const analyser = audioContext.createAnalyser();
|
||||
const source = audioContext.createMediaStreamSource(stream);
|
||||
|
||||
// Configure analyser - use smaller FFT for better performance
|
||||
analyser.fftSize = AUDIO_CONFIG.FFT_SIZE;
|
||||
analyser.smoothingTimeConstant = AUDIO_CONFIG.SMOOTHING_TIME_CONSTANT;
|
||||
|
||||
// Connect nodes
|
||||
source.connect(analyser);
|
||||
|
||||
// Store references
|
||||
audioContextRef.current = audioContext;
|
||||
analyserRef.current = analyser;
|
||||
sourceRef.current = source;
|
||||
|
||||
const dataArray = new Uint8Array(analyser.frequencyBinCount);
|
||||
|
||||
const updateLevel = () => {
|
||||
if (!analyserRef.current) return;
|
||||
|
||||
const now = performance.now();
|
||||
|
||||
// Throttle updates to reduce CPU usage
|
||||
if (now - lastUpdateTimeRef.current < updateInterval) {
|
||||
return;
|
||||
}
|
||||
lastUpdateTimeRef.current = now;
|
||||
|
||||
analyserRef.current.getByteFrequencyData(dataArray);
|
||||
|
||||
// Optimized RMS calculation - process only relevant frequency bands
|
||||
let sum = 0;
|
||||
const relevantBins = Math.min(dataArray.length, AUDIO_CONFIG.RELEVANT_FREQUENCY_BINS);
|
||||
for (let i = 0; i < relevantBins; i++) {
|
||||
const value = dataArray[i];
|
||||
sum += value * value;
|
||||
}
|
||||
const rms = Math.sqrt(sum / relevantBins);
|
||||
|
||||
// Convert to percentage (0-100) with better scaling
|
||||
const level = Math.min(AUDIO_CONFIG.MAX_LEVEL_PERCENTAGE, Math.max(0, (rms / AUDIO_CONFIG.RMS_SCALING_FACTOR) * AUDIO_CONFIG.MAX_LEVEL_PERCENTAGE));
|
||||
setAudioLevel(Math.round(level));
|
||||
};
|
||||
|
||||
setIsAnalyzing(true);
|
||||
|
||||
// Use setInterval instead of requestAnimationFrame for more predictable timing
|
||||
intervalRef.current = window.setInterval(updateLevel, updateInterval);
|
||||
|
||||
} catch {
|
||||
// Audio level analyzer creation failed - silently handle
|
||||
setIsAnalyzing(false);
|
||||
setAudioLevel(0);
|
||||
}
|
||||
|
||||
// Cleanup function
|
||||
return () => {
|
||||
if (intervalRef.current !== null) {
|
||||
clearInterval(intervalRef.current);
|
||||
intervalRef.current = null;
|
||||
}
|
||||
if (sourceRef.current) {
|
||||
sourceRef.current.disconnect();
|
||||
sourceRef.current = null;
|
||||
}
|
||||
if (audioContextRef.current) {
|
||||
audioContextRef.current.close();
|
||||
audioContextRef.current = null;
|
||||
}
|
||||
analyserRef.current = null;
|
||||
setIsAnalyzing(false);
|
||||
setAudioLevel(0);
|
||||
};
|
||||
}, [stream, enabled, updateInterval]);
|
||||
|
||||
return { audioLevel, isAnalyzing };
|
||||
};
|
||||
|
|
@ -3,7 +3,7 @@ import { useCallback, useEffect, useRef, useState } from "react";
|
|||
import { useRTCStore } from "@/hooks/stores";
|
||||
import api from "@/api";
|
||||
import { devLog, devInfo, devWarn, devError, devOnly } from "@/utils/debug";
|
||||
import { AUDIO_CONFIG } from "@/config/constants";
|
||||
import { NETWORK_CONFIG, AUDIO_CONFIG } from "@/config/constants";
|
||||
|
||||
export interface MicrophoneError {
|
||||
type: 'permission' | 'device' | 'network' | 'unknown';
|
||||
|
|
@ -84,7 +84,53 @@ export function useMicrophone() {
|
|||
setMicrophoneMuted(false);
|
||||
}, [microphoneSender, peerConnection, setMicrophoneStream, setMicrophoneSender, setMicrophoneActive, setMicrophoneMuted]);
|
||||
|
||||
// Debug function to check current state (can be called from browser console)
|
||||
const debugMicrophoneState = useCallback(() => {
|
||||
const refStream = microphoneStreamRef.current;
|
||||
const state = {
|
||||
isMicrophoneActive,
|
||||
isMicrophoneMuted,
|
||||
streamInRef: !!refStream,
|
||||
streamInStore: !!microphoneStream,
|
||||
senderInStore: !!microphoneSender,
|
||||
streamId: refStream?.id,
|
||||
storeStreamId: microphoneStream?.id,
|
||||
audioTracks: refStream?.getAudioTracks().length || 0,
|
||||
storeAudioTracks: microphoneStream?.getAudioTracks().length || 0,
|
||||
audioTrackDetails: refStream?.getAudioTracks().map(track => ({
|
||||
id: track.id,
|
||||
label: track.label,
|
||||
enabled: track.enabled,
|
||||
readyState: track.readyState,
|
||||
muted: track.muted
|
||||
})) || [],
|
||||
peerConnectionState: peerConnection ? {
|
||||
connectionState: peerConnection.connectionState,
|
||||
iceConnectionState: peerConnection.iceConnectionState,
|
||||
signalingState: peerConnection.signalingState
|
||||
} : "No peer connection",
|
||||
streamMatch: refStream === microphoneStream
|
||||
};
|
||||
devLog("Microphone Debug State:", state);
|
||||
|
||||
// Also check if streams are active
|
||||
if (refStream) {
|
||||
devLog("Ref stream active tracks:", refStream.getAudioTracks().filter(t => t.readyState === 'live').length);
|
||||
}
|
||||
if (microphoneStream && microphoneStream !== refStream) {
|
||||
devLog("Store stream active tracks:", microphoneStream.getAudioTracks().filter(t => t.readyState === 'live').length);
|
||||
}
|
||||
|
||||
return state;
|
||||
}, [isMicrophoneActive, isMicrophoneMuted, microphoneStream, microphoneSender, peerConnection]);
|
||||
|
||||
// Make debug function available globally for console access
|
||||
useEffect(() => {
|
||||
(window as Window & { debugMicrophoneState?: () => unknown }).debugMicrophoneState = debugMicrophoneState;
|
||||
return () => {
|
||||
delete (window as Window & { debugMicrophoneState?: () => unknown }).debugMicrophoneState;
|
||||
};
|
||||
}, [debugMicrophoneState]);
|
||||
|
||||
const lastSyncRef = useRef<number>(0);
|
||||
const isStartingRef = useRef<boolean>(false); // Track if we're in the middle of starting
|
||||
|
|
@ -449,7 +495,51 @@ export function useMicrophone() {
|
|||
}
|
||||
}, [peerConnection, setMicrophoneStream, setMicrophoneSender, setMicrophoneActive, setMicrophoneMuted, stopMicrophoneStream, isMicrophoneActive, isMicrophoneMuted, microphoneStream, isStarting, isStopping, isToggling]);
|
||||
|
||||
// Reset backend microphone state
|
||||
const resetBackendMicrophoneState = useCallback(async (): Promise<boolean> => {
|
||||
try {
|
||||
devLog("Resetting backend microphone state...");
|
||||
const response = await api.POST("/microphone/reset", {});
|
||||
|
||||
if (response.ok) {
|
||||
const data = await response.json();
|
||||
devLog("Backend microphone reset successful:", data);
|
||||
|
||||
// Update frontend state to match backend
|
||||
setMicrophoneActive(false);
|
||||
setMicrophoneMuted(false);
|
||||
|
||||
// Clean up any orphaned streams
|
||||
if (microphoneStreamRef.current) {
|
||||
devLog("Cleaning up orphaned stream after reset");
|
||||
await stopMicrophoneStream();
|
||||
}
|
||||
|
||||
// Wait a bit for everything to settle
|
||||
await new Promise(resolve => setTimeout(resolve, 200));
|
||||
|
||||
// Sync state to ensure consistency
|
||||
await syncMicrophoneState();
|
||||
|
||||
return true;
|
||||
} else {
|
||||
devError("Backend microphone reset failed:", response.status);
|
||||
return false;
|
||||
}
|
||||
} catch (error) {
|
||||
devWarn("Failed to reset backend microphone state:", error);
|
||||
// Fallback to old method
|
||||
try {
|
||||
devLog("Trying fallback reset method...");
|
||||
await api.POST("/microphone/stop", {});
|
||||
await new Promise(resolve => setTimeout(resolve, 300));
|
||||
return true;
|
||||
} catch (fallbackError) {
|
||||
devError("Fallback reset also failed:", fallbackError);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}, [setMicrophoneActive, setMicrophoneMuted, stopMicrophoneStream, syncMicrophoneState]);
|
||||
|
||||
// Stop microphone
|
||||
const stopMicrophone = useCallback(async (): Promise<{ success: boolean; error?: MicrophoneError }> => {
|
||||
|
|
@ -589,9 +679,173 @@ export function useMicrophone() {
|
|||
}
|
||||
}, [microphoneStream, isMicrophoneActive, isMicrophoneMuted, setMicrophoneMuted, isStarting, isStopping, isToggling]);
|
||||
|
||||
// Function to check WebRTC audio transmission stats
|
||||
const checkAudioTransmissionStats = useCallback(async () => {
|
||||
if (!microphoneSender) {
|
||||
devLog("No microphone sender available");
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
const stats = await microphoneSender.getStats();
|
||||
const audioStats: {
|
||||
id: string;
|
||||
type: string;
|
||||
kind: string;
|
||||
packetsSent?: number;
|
||||
bytesSent?: number;
|
||||
timestamp?: number;
|
||||
ssrc?: number;
|
||||
}[] = [];
|
||||
|
||||
stats.forEach((report, id) => {
|
||||
if (report.type === 'outbound-rtp' && report.kind === 'audio') {
|
||||
audioStats.push({
|
||||
id,
|
||||
type: report.type,
|
||||
kind: report.kind,
|
||||
packetsSent: report.packetsSent,
|
||||
bytesSent: report.bytesSent,
|
||||
timestamp: report.timestamp,
|
||||
ssrc: report.ssrc
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
devLog("Audio transmission stats:", audioStats);
|
||||
return audioStats;
|
||||
} catch (error) {
|
||||
devError("Failed to get audio transmission stats:", error);
|
||||
return null;
|
||||
}
|
||||
}, [microphoneSender]);
|
||||
|
||||
// Comprehensive test function to diagnose microphone issues
|
||||
const testMicrophoneAudio = useCallback(async () => {
|
||||
devLog("=== MICROPHONE AUDIO TEST ===");
|
||||
|
||||
// 1. Check if we have a stream
|
||||
const stream = microphoneStreamRef.current;
|
||||
if (!stream) {
|
||||
devLog("❌ No microphone stream available");
|
||||
return;
|
||||
}
|
||||
|
||||
devLog("✅ Microphone stream exists:", stream.id);
|
||||
|
||||
// 2. Check audio tracks
|
||||
const audioTracks = stream.getAudioTracks();
|
||||
devLog("Audio tracks:", audioTracks.length);
|
||||
|
||||
if (audioTracks.length === 0) {
|
||||
devLog("❌ No audio tracks in stream");
|
||||
return;
|
||||
}
|
||||
|
||||
const track = audioTracks[0];
|
||||
devLog("✅ Audio track details:", {
|
||||
id: track.id,
|
||||
label: track.label,
|
||||
enabled: track.enabled,
|
||||
readyState: track.readyState,
|
||||
muted: track.muted
|
||||
});
|
||||
|
||||
// 3. Test audio level detection manually
|
||||
try {
|
||||
const audioContext = new (window.AudioContext || (window as Window & { webkitAudioContext?: typeof AudioContext }).webkitAudioContext)();
|
||||
const analyser = audioContext.createAnalyser();
|
||||
const source = audioContext.createMediaStreamSource(stream);
|
||||
|
||||
analyser.fftSize = AUDIO_CONFIG.ANALYSIS_FFT_SIZE;
|
||||
source.connect(analyser);
|
||||
|
||||
const dataArray = new Uint8Array(analyser.frequencyBinCount);
|
||||
|
||||
devLog("🎤 Testing audio level detection for 5 seconds...");
|
||||
devLog("Please speak into your microphone now!");
|
||||
|
||||
let maxLevel = 0;
|
||||
let sampleCount = 0;
|
||||
|
||||
const testInterval = setInterval(() => {
|
||||
analyser.getByteFrequencyData(dataArray);
|
||||
|
||||
let sum = 0;
|
||||
for (const value of dataArray) {
|
||||
sum += value * value;
|
||||
}
|
||||
const rms = Math.sqrt(sum / dataArray.length);
|
||||
const level = Math.min(AUDIO_CONFIG.MAX_LEVEL_PERCENTAGE, (rms / AUDIO_CONFIG.LEVEL_SCALING_FACTOR) * AUDIO_CONFIG.MAX_LEVEL_PERCENTAGE);
|
||||
|
||||
maxLevel = Math.max(maxLevel, level);
|
||||
sampleCount++;
|
||||
|
||||
if (sampleCount % 10 === 0) { // Log every 10th sample
|
||||
devLog(`Audio level: ${level.toFixed(1)}% (max so far: ${maxLevel.toFixed(1)}%)`);
|
||||
}
|
||||
}, AUDIO_CONFIG.ANALYSIS_UPDATE_INTERVAL);
|
||||
|
||||
setTimeout(() => {
|
||||
clearInterval(testInterval);
|
||||
source.disconnect();
|
||||
audioContext.close();
|
||||
|
||||
devLog("🎤 Audio test completed!");
|
||||
devLog(`Maximum audio level detected: ${maxLevel.toFixed(1)}%`);
|
||||
|
||||
if (maxLevel > 5) {
|
||||
devLog("✅ Microphone is detecting audio!");
|
||||
} else {
|
||||
devLog("❌ No significant audio detected. Check microphone permissions and hardware.");
|
||||
}
|
||||
}, NETWORK_CONFIG.AUDIO_TEST_DURATION);
|
||||
|
||||
} catch (error) {
|
||||
devError("❌ Failed to test audio level:", error);
|
||||
}
|
||||
|
||||
// 4. Check WebRTC sender
|
||||
if (microphoneSender) {
|
||||
devLog("✅ WebRTC sender exists");
|
||||
devLog("Sender track:", {
|
||||
id: microphoneSender.track?.id,
|
||||
kind: microphoneSender.track?.kind,
|
||||
enabled: microphoneSender.track?.enabled,
|
||||
readyState: microphoneSender.track?.readyState
|
||||
});
|
||||
|
||||
// Check if sender track matches stream track
|
||||
if (microphoneSender.track === track) {
|
||||
devLog("✅ Sender track matches stream track");
|
||||
} else {
|
||||
devLog("❌ Sender track does NOT match stream track");
|
||||
}
|
||||
} else {
|
||||
devLog("❌ No WebRTC sender available");
|
||||
}
|
||||
|
||||
// 5. Check peer connection
|
||||
if (peerConnection) {
|
||||
devLog("✅ Peer connection exists");
|
||||
devLog("Connection state:", peerConnection.connectionState);
|
||||
devLog("ICE connection state:", peerConnection.iceConnectionState);
|
||||
|
||||
const transceivers = peerConnection.getTransceivers();
|
||||
const audioTransceivers = transceivers.filter(t =>
|
||||
t.sender.track?.kind === 'audio' || t.receiver.track?.kind === 'audio'
|
||||
);
|
||||
|
||||
devLog("Audio transceivers:", audioTransceivers.map(t => ({
|
||||
direction: t.direction,
|
||||
senderTrack: t.sender.track?.id,
|
||||
receiverTrack: t.receiver.track?.id
|
||||
})));
|
||||
} else {
|
||||
devLog("❌ No peer connection available");
|
||||
}
|
||||
|
||||
}, [microphoneSender, peerConnection]);
|
||||
|
||||
const startMicrophoneDebounced = useCallback((deviceId?: string) => {
|
||||
debouncedOperation(async () => {
|
||||
|
|
@ -605,7 +859,59 @@ export function useMicrophone() {
|
|||
}, "stop");
|
||||
}, [stopMicrophone, debouncedOperation]);
|
||||
|
||||
|
||||
// Make debug functions available globally for console access
|
||||
useEffect(() => {
|
||||
(window as Window & {
|
||||
debugMicrophone?: () => unknown;
|
||||
checkAudioStats?: () => unknown;
|
||||
testMicrophoneAudio?: () => unknown;
|
||||
resetBackendMicrophone?: () => unknown;
|
||||
}).debugMicrophone = debugMicrophoneState;
|
||||
(window as Window & {
|
||||
debugMicrophone?: () => unknown;
|
||||
checkAudioStats?: () => unknown;
|
||||
testMicrophoneAudio?: () => unknown;
|
||||
resetBackendMicrophone?: () => unknown;
|
||||
}).checkAudioStats = checkAudioTransmissionStats;
|
||||
(window as Window & {
|
||||
debugMicrophone?: () => unknown;
|
||||
checkAudioStats?: () => unknown;
|
||||
testMicrophoneAudio?: () => unknown;
|
||||
resetBackendMicrophone?: () => unknown;
|
||||
}).testMicrophoneAudio = testMicrophoneAudio;
|
||||
(window as Window & {
|
||||
debugMicrophone?: () => unknown;
|
||||
checkAudioStats?: () => unknown;
|
||||
testMicrophoneAudio?: () => unknown;
|
||||
resetBackendMicrophone?: () => unknown;
|
||||
}).resetBackendMicrophone = resetBackendMicrophoneState;
|
||||
return () => {
|
||||
delete (window as Window & {
|
||||
debugMicrophone?: () => unknown;
|
||||
checkAudioStats?: () => unknown;
|
||||
testMicrophoneAudio?: () => unknown;
|
||||
resetBackendMicrophone?: () => unknown;
|
||||
}).debugMicrophone;
|
||||
delete (window as Window & {
|
||||
debugMicrophone?: () => unknown;
|
||||
checkAudioStats?: () => unknown;
|
||||
testMicrophoneAudio?: () => unknown;
|
||||
resetBackendMicrophone?: () => unknown;
|
||||
}).checkAudioStats;
|
||||
delete (window as Window & {
|
||||
debugMicrophone?: () => unknown;
|
||||
checkAudioStats?: () => unknown;
|
||||
testMicrophoneAudio?: () => unknown;
|
||||
resetBackendMicrophone?: () => unknown;
|
||||
}).testMicrophoneAudio;
|
||||
delete (window as Window & {
|
||||
debugMicrophone?: () => unknown;
|
||||
checkAudioStats?: () => unknown;
|
||||
testMicrophoneAudio?: () => unknown;
|
||||
resetBackendMicrophone?: () => unknown;
|
||||
}).resetBackendMicrophone;
|
||||
};
|
||||
}, [debugMicrophoneState, checkAudioTransmissionStats, testMicrophoneAudio, resetBackendMicrophoneState]);
|
||||
|
||||
// Sync state on mount
|
||||
useEffect(() => {
|
||||
|
|
@ -635,7 +941,7 @@ export function useMicrophone() {
|
|||
startMicrophone,
|
||||
stopMicrophone,
|
||||
toggleMicrophoneMute,
|
||||
|
||||
debugMicrophoneState,
|
||||
// Expose debounced variants for UI handlers
|
||||
startMicrophoneDebounced,
|
||||
stopMicrophoneDebounced,
|
||||
|
|
|
|||
|
|
@ -39,6 +39,7 @@ import WebRTCVideo from "@components/WebRTCVideo";
|
|||
import { checkAuth, isInCloud, isOnDevice } from "@/main";
|
||||
import DashboardNavbar from "@components/Header";
|
||||
import ConnectionStatsSidebar from "@/components/sidebar/connectionStats";
|
||||
import AudioMetricsSidebar from "@/components/sidebar/AudioMetricsSidebar";
|
||||
import { JsonRpcRequest, JsonRpcResponse, useJsonRpc } from "@/hooks/useJsonRpc";
|
||||
import Terminal from "@components/Terminal";
|
||||
import { CLOUD_API, DEVICE_API } from "@/ui.config";
|
||||
|
|
@ -924,7 +925,22 @@ function SidebarContainer(props: SidebarContainerProps) {
|
|||
<ConnectionStatsSidebar />
|
||||
</motion.div>
|
||||
)}
|
||||
|
||||
{sidebarView === "audio-metrics" && (
|
||||
<motion.div
|
||||
className="absolute inset-0"
|
||||
initial={{ opacity: 0 }}
|
||||
animate={{ opacity: 1 }}
|
||||
exit={{ opacity: 0 }}
|
||||
transition={{
|
||||
duration: 0.5,
|
||||
ease: "easeInOut",
|
||||
}}
|
||||
>
|
||||
<div className="grid h-full grid-rows-(--grid-headerBody) shadow-xs">
|
||||
<AudioMetricsSidebar />
|
||||
</div>
|
||||
</motion.div>
|
||||
)}
|
||||
</AnimatePresence>
|
||||
</div>
|
||||
</div>
|
||||
|
|
|
|||
136
web.go
136
web.go
|
|
@ -24,7 +24,8 @@ import (
|
|||
"github.com/google/uuid"
|
||||
"github.com/jetkvm/kvm/internal/logging"
|
||||
"github.com/pion/webrtc/v4"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/rs/zerolog"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
|
|
@ -102,6 +103,9 @@ func setupRouter() *gin.Engine {
|
|||
// We use this to setup the device in the welcome page
|
||||
r.POST("/device/setup", handleSetup)
|
||||
|
||||
// A Prometheus metrics endpoint.
|
||||
r.GET("/metrics", gin.WrapH(promhttp.Handler()))
|
||||
|
||||
// Developer mode protected routes
|
||||
developerModeRouter := r.Group("/developer/")
|
||||
developerModeRouter.Use(basicAuthProtectedMiddleware(true))
|
||||
|
|
@ -207,6 +211,19 @@ func setupRouter() *gin.Engine {
|
|||
})
|
||||
})
|
||||
|
||||
protected.GET("/audio/metrics", func(c *gin.Context) {
|
||||
registry := audio.GetMetricsRegistry()
|
||||
metrics := registry.GetAudioMetrics()
|
||||
c.JSON(200, gin.H{
|
||||
"frames_received": metrics.FramesReceived,
|
||||
"frames_dropped": metrics.FramesDropped,
|
||||
"bytes_processed": metrics.BytesProcessed,
|
||||
"last_frame_time": metrics.LastFrameTime,
|
||||
"connection_drops": metrics.ConnectionDrops,
|
||||
"average_latency": fmt.Sprintf("%.1fms", float64(metrics.AverageLatency.Nanoseconds())/1e6),
|
||||
})
|
||||
})
|
||||
|
||||
protected.GET("/microphone/quality", func(c *gin.Context) {
|
||||
config := audio.GetMicrophoneConfig()
|
||||
presets := audio.GetMicrophoneQualityPresets()
|
||||
|
|
@ -382,6 +399,103 @@ func setupRouter() *gin.Engine {
|
|||
})
|
||||
})
|
||||
|
||||
protected.GET("/microphone/metrics", func(c *gin.Context) {
|
||||
registry := audio.GetMetricsRegistry()
|
||||
metrics := registry.GetAudioInputMetrics()
|
||||
c.JSON(200, gin.H{
|
||||
"frames_sent": metrics.FramesSent,
|
||||
"frames_dropped": metrics.FramesDropped,
|
||||
"bytes_processed": metrics.BytesProcessed,
|
||||
"last_frame_time": metrics.LastFrameTime.Format("2006-01-02T15:04:05.000Z"),
|
||||
"connection_drops": metrics.ConnectionDrops,
|
||||
"average_latency": fmt.Sprintf("%.1fms", float64(metrics.AverageLatency.Nanoseconds())/1e6),
|
||||
})
|
||||
})
|
||||
|
||||
// Audio subprocess process metrics endpoints
|
||||
protected.GET("/audio/process-metrics", func(c *gin.Context) {
|
||||
// Access the global audio supervisor from main.go
|
||||
if audioSupervisor == nil {
|
||||
c.JSON(200, gin.H{
|
||||
"cpu_percent": 0.0,
|
||||
"memory_percent": 0.0,
|
||||
"memory_rss": 0,
|
||||
"memory_vms": 0,
|
||||
"running": false,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
metrics := audioSupervisor.GetProcessMetrics()
|
||||
if metrics == nil {
|
||||
c.JSON(200, gin.H{
|
||||
"cpu_percent": 0.0,
|
||||
"memory_percent": 0.0,
|
||||
"memory_rss": 0,
|
||||
"memory_vms": 0,
|
||||
"running": false,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(200, gin.H{
|
||||
"cpu_percent": metrics.CPUPercent,
|
||||
"memory_percent": metrics.MemoryPercent,
|
||||
"memory_rss": metrics.MemoryRSS,
|
||||
"memory_vms": metrics.MemoryVMS,
|
||||
"running": true,
|
||||
})
|
||||
})
|
||||
|
||||
// Audio memory allocation metrics endpoint
|
||||
protected.GET("/audio/memory-metrics", gin.WrapF(audio.HandleMemoryMetrics))
|
||||
|
||||
protected.GET("/microphone/process-metrics", func(c *gin.Context) {
|
||||
if currentSession == nil || currentSession.AudioInputManager == nil {
|
||||
c.JSON(200, gin.H{
|
||||
"cpu_percent": 0.0,
|
||||
"memory_percent": 0.0,
|
||||
"memory_rss": 0,
|
||||
"memory_vms": 0,
|
||||
"running": false,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Get the supervisor from the audio input manager
|
||||
supervisor := currentSession.AudioInputManager.GetSupervisor()
|
||||
if supervisor == nil {
|
||||
c.JSON(200, gin.H{
|
||||
"cpu_percent": 0.0,
|
||||
"memory_percent": 0.0,
|
||||
"memory_rss": 0,
|
||||
"memory_vms": 0,
|
||||
"running": false,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
metrics := supervisor.GetProcessMetrics()
|
||||
if metrics == nil {
|
||||
c.JSON(200, gin.H{
|
||||
"cpu_percent": 0.0,
|
||||
"memory_percent": 0.0,
|
||||
"memory_rss": 0,
|
||||
"memory_vms": 0,
|
||||
"running": false,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(200, gin.H{
|
||||
"cpu_percent": metrics.CPUPercent,
|
||||
"memory_percent": metrics.MemoryPercent,
|
||||
"memory_rss": metrics.MemoryRSS,
|
||||
"memory_vms": metrics.MemoryVMS,
|
||||
"running": true,
|
||||
})
|
||||
})
|
||||
|
||||
// System memory information endpoint
|
||||
protected.GET("/system/memory", func(c *gin.Context) {
|
||||
processMonitor := audio.GetProcessMonitor()
|
||||
|
|
@ -598,7 +712,11 @@ func handleWebRTCSignalWsMessages(
|
|||
return
|
||||
}
|
||||
|
||||
// Metrics collection disabled
|
||||
// set the timer for the ping duration
|
||||
timer := prometheus.NewTimer(prometheus.ObserverFunc(func(v float64) {
|
||||
metricConnectionLastPingDuration.WithLabelValues(sourceType, source).Set(v)
|
||||
metricConnectionPingDuration.WithLabelValues(sourceType, source).Observe(v)
|
||||
}))
|
||||
|
||||
l.Trace().Msg("sending ping frame")
|
||||
err := wsCon.Ping(runCtx)
|
||||
|
|
@ -609,9 +727,13 @@ func handleWebRTCSignalWsMessages(
|
|||
return
|
||||
}
|
||||
|
||||
// Metrics collection disabled
|
||||
// dont use `defer` here because we want to observe the duration of the ping
|
||||
duration := timer.ObserveDuration()
|
||||
|
||||
l.Trace().Msg("received pong frame")
|
||||
metricConnectionTotalPingSentCount.WithLabelValues(sourceType, source).Inc()
|
||||
metricConnectionLastPingTimestamp.WithLabelValues(sourceType, source).SetToCurrentTime()
|
||||
|
||||
l.Trace().Str("duration", duration.String()).Msg("received pong frame")
|
||||
}
|
||||
}()
|
||||
|
||||
|
|
@ -657,7 +779,8 @@ func handleWebRTCSignalWsMessages(
|
|||
return err
|
||||
}
|
||||
|
||||
// Metrics collection disabled
|
||||
metricConnectionTotalPingReceivedCount.WithLabelValues(sourceType, source).Inc()
|
||||
metricConnectionLastPingReceivedTimestamp.WithLabelValues(sourceType, source).SetToCurrentTime()
|
||||
|
||||
continue
|
||||
}
|
||||
|
|
@ -681,7 +804,8 @@ func handleWebRTCSignalWsMessages(
|
|||
l.Info().Str("oidcGoogle", req.OidcGoogle).Msg("new session request with OIDC Google")
|
||||
}
|
||||
|
||||
// Metrics collection disabled
|
||||
metricConnectionSessionRequestCount.WithLabelValues(sourceType, source).Inc()
|
||||
metricConnectionLastSessionRequestTimestamp.WithLabelValues(sourceType, source).SetToCurrentTime()
|
||||
err = handleSessionRequest(runCtx, wsCon, req, isCloudConnection, source, &l)
|
||||
if err != nil {
|
||||
l.Warn().Str("error", err.Error()).Msg("error starting new session")
|
||||
|
|
|
|||
Loading…
Reference in New Issue