mirror of https://github.com/jetkvm/kvm.git
refactor(audio): remove granular latency metrics and histogram functionality
This commit removes the granular latency metrics collection and histogram visualization functionality across the codebase. The changes include: - Removing latency histogram tracking from audio input/output processing - Removing latency histogram UI components and related types - Removing granular metrics collector's latency tracking capabilities - Updating Prometheus metrics to use milliseconds instead of seconds - Removing related tests and benchmarks
This commit is contained in:
parent
f9adb4382d
commit
fe4571956d
|
@ -0,0 +1,5 @@
|
|||
# Netscape HTTP Cookie File
|
||||
# https://curl.se/docs/http-cookies.html
|
||||
# This file was generated by libcurl! Edit at your own risk.
|
||||
|
||||
#HttpOnly_192.168.100.214 FALSE / FALSE 1756968962 authToken 3b0b77eb-3771-4eb2-9704-ffcdf3ba788b
|
|
@ -45,7 +45,6 @@ type AudioMetricsData struct {
|
|||
LastFrameTime string `json:"last_frame_time"`
|
||||
ConnectionDrops int64 `json:"connection_drops"`
|
||||
AverageLatency string `json:"average_latency"`
|
||||
LatencyHistogram *LatencyHistogramData `json:"latency_histogram,omitempty"`
|
||||
}
|
||||
|
||||
// MicrophoneStateData represents microphone state data
|
||||
|
@ -62,7 +61,6 @@ type MicrophoneMetricsData struct {
|
|||
LastFrameTime string `json:"last_frame_time"`
|
||||
ConnectionDrops int64 `json:"connection_drops"`
|
||||
AverageLatency string `json:"average_latency"`
|
||||
LatencyHistogram *LatencyHistogramData `json:"latency_histogram,omitempty"`
|
||||
}
|
||||
|
||||
// ProcessMetricsData represents process metrics data for WebSocket events
|
||||
|
@ -227,13 +225,6 @@ func (aeb *AudioEventBroadcaster) sendInitialState(connectionID string) {
|
|||
|
||||
// convertAudioMetricsToEventDataWithLatencyMs converts internal audio metrics to AudioMetricsData with millisecond latency formatting
|
||||
func convertAudioMetricsToEventDataWithLatencyMs(metrics AudioMetrics) AudioMetricsData {
|
||||
// Get histogram data from granular metrics collector
|
||||
granularCollector := GetGranularMetricsCollector()
|
||||
var histogramData *LatencyHistogramData
|
||||
if granularCollector != nil {
|
||||
histogramData = granularCollector.GetOutputLatencyHistogram()
|
||||
}
|
||||
|
||||
return AudioMetricsData{
|
||||
FramesReceived: metrics.FramesReceived,
|
||||
FramesDropped: metrics.FramesDropped,
|
||||
|
@ -241,19 +232,11 @@ func convertAudioMetricsToEventDataWithLatencyMs(metrics AudioMetrics) AudioMetr
|
|||
LastFrameTime: metrics.LastFrameTime.Format(GetConfig().EventTimeFormatString),
|
||||
ConnectionDrops: metrics.ConnectionDrops,
|
||||
AverageLatency: fmt.Sprintf("%.1fms", float64(metrics.AverageLatency.Nanoseconds())/1e6),
|
||||
LatencyHistogram: histogramData,
|
||||
}
|
||||
}
|
||||
|
||||
// convertAudioInputMetricsToEventDataWithLatencyMs converts internal audio input metrics to MicrophoneMetricsData with millisecond latency formatting
|
||||
func convertAudioInputMetricsToEventDataWithLatencyMs(metrics AudioInputMetrics) MicrophoneMetricsData {
|
||||
// Get histogram data from granular metrics collector
|
||||
granularCollector := GetGranularMetricsCollector()
|
||||
var histogramData *LatencyHistogramData
|
||||
if granularCollector != nil {
|
||||
histogramData = granularCollector.GetInputLatencyHistogram()
|
||||
}
|
||||
|
||||
return MicrophoneMetricsData{
|
||||
FramesSent: metrics.FramesSent,
|
||||
FramesDropped: metrics.FramesDropped,
|
||||
|
@ -261,7 +244,6 @@ func convertAudioInputMetricsToEventDataWithLatencyMs(metrics AudioInputMetrics)
|
|||
LastFrameTime: metrics.LastFrameTime.Format(GetConfig().EventTimeFormatString),
|
||||
ConnectionDrops: metrics.ConnectionDrops,
|
||||
AverageLatency: fmt.Sprintf("%.1fms", float64(metrics.AverageLatency.Nanoseconds())/1e6),
|
||||
LatencyHistogram: histogramData,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
@ -10,24 +9,6 @@ import (
|
|||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// LatencyHistogram tracks latency distribution with percentile calculations
|
||||
type LatencyHistogram struct {
|
||||
// Atomic fields MUST be first for ARM32 alignment
|
||||
sampleCount int64 // Total number of samples (atomic)
|
||||
totalLatency int64 // Sum of all latencies in nanoseconds (atomic)
|
||||
|
||||
// Latency buckets for histogram (in nanoseconds)
|
||||
buckets []int64 // Bucket boundaries
|
||||
counts []int64 // Count for each bucket (atomic)
|
||||
|
||||
// Recent samples for percentile calculation
|
||||
recentSamples []time.Duration
|
||||
samplesMutex sync.RWMutex
|
||||
maxSamples int
|
||||
|
||||
logger zerolog.Logger
|
||||
}
|
||||
|
||||
// LatencyPercentiles holds calculated percentile values
|
||||
type LatencyPercentiles struct {
|
||||
P50 time.Duration `json:"p50"`
|
||||
|
@ -59,11 +40,6 @@ type BufferPoolEfficiencyMetrics struct {
|
|||
|
||||
// GranularMetricsCollector aggregates all granular metrics
|
||||
type GranularMetricsCollector struct {
|
||||
// Latency histograms by source
|
||||
inputLatencyHist *LatencyHistogram
|
||||
outputLatencyHist *LatencyHistogram
|
||||
processingLatencyHist *LatencyHistogram
|
||||
|
||||
// Buffer pool efficiency tracking
|
||||
framePoolMetrics *BufferPoolEfficiencyTracker
|
||||
controlPoolMetrics *BufferPoolEfficiencyTracker
|
||||
|
@ -91,118 +67,6 @@ type BufferPoolEfficiencyTracker struct {
|
|||
logger zerolog.Logger
|
||||
}
|
||||
|
||||
// NewLatencyHistogram creates a new latency histogram with predefined buckets
|
||||
func NewLatencyHistogram(maxSamples int, logger zerolog.Logger) *LatencyHistogram {
|
||||
// Define latency buckets using configuration constants
|
||||
buckets := []int64{
|
||||
int64(1 * time.Millisecond),
|
||||
int64(5 * time.Millisecond),
|
||||
int64(GetConfig().LatencyBucket10ms),
|
||||
int64(GetConfig().LatencyBucket25ms),
|
||||
int64(GetConfig().LatencyBucket50ms),
|
||||
int64(GetConfig().LatencyBucket100ms),
|
||||
int64(GetConfig().LatencyBucket250ms),
|
||||
int64(GetConfig().LatencyBucket500ms),
|
||||
int64(GetConfig().LatencyBucket1s),
|
||||
int64(GetConfig().LatencyBucket2s),
|
||||
}
|
||||
|
||||
return &LatencyHistogram{
|
||||
buckets: buckets,
|
||||
counts: make([]int64, len(buckets)+1), // +1 for overflow bucket
|
||||
recentSamples: make([]time.Duration, 0, maxSamples),
|
||||
maxSamples: maxSamples,
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// RecordLatency adds a latency measurement to the histogram
|
||||
func (lh *LatencyHistogram) RecordLatency(latency time.Duration) {
|
||||
latencyNs := latency.Nanoseconds()
|
||||
atomic.AddInt64(&lh.sampleCount, 1)
|
||||
atomic.AddInt64(&lh.totalLatency, latencyNs)
|
||||
|
||||
// Find appropriate bucket
|
||||
bucketIndex := len(lh.buckets) // Default to overflow bucket
|
||||
for i, boundary := range lh.buckets {
|
||||
if latencyNs <= boundary {
|
||||
bucketIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
atomic.AddInt64(&lh.counts[bucketIndex], 1)
|
||||
|
||||
// Store recent sample for percentile calculation
|
||||
lh.samplesMutex.Lock()
|
||||
if len(lh.recentSamples) >= lh.maxSamples {
|
||||
// Remove oldest sample
|
||||
lh.recentSamples = lh.recentSamples[1:]
|
||||
}
|
||||
lh.recentSamples = append(lh.recentSamples, latency)
|
||||
lh.samplesMutex.Unlock()
|
||||
}
|
||||
|
||||
// LatencyHistogramData represents histogram data for WebSocket transmission
|
||||
type LatencyHistogramData struct {
|
||||
Buckets []float64 `json:"buckets"` // Bucket boundaries in milliseconds
|
||||
Counts []int64 `json:"counts"` // Count for each bucket
|
||||
}
|
||||
|
||||
// GetHistogramData returns histogram buckets and counts for WebSocket transmission
|
||||
func (lh *LatencyHistogram) GetHistogramData() LatencyHistogramData {
|
||||
// Convert bucket boundaries from nanoseconds to milliseconds
|
||||
buckets := make([]float64, len(lh.buckets))
|
||||
for i, bucket := range lh.buckets {
|
||||
buckets[i] = float64(bucket) / 1e6 // Convert ns to ms
|
||||
}
|
||||
|
||||
// Get current counts atomically
|
||||
counts := make([]int64, len(lh.counts))
|
||||
for i := range lh.counts {
|
||||
counts[i] = atomic.LoadInt64(&lh.counts[i])
|
||||
}
|
||||
|
||||
return LatencyHistogramData{
|
||||
Buckets: buckets,
|
||||
Counts: counts,
|
||||
}
|
||||
}
|
||||
|
||||
// GetPercentiles calculates latency percentiles from recent samples
|
||||
func (lh *LatencyHistogram) GetPercentiles() LatencyPercentiles {
|
||||
lh.samplesMutex.RLock()
|
||||
samples := make([]time.Duration, len(lh.recentSamples))
|
||||
copy(samples, lh.recentSamples)
|
||||
lh.samplesMutex.RUnlock()
|
||||
|
||||
if len(samples) == 0 {
|
||||
return LatencyPercentiles{}
|
||||
}
|
||||
|
||||
// Sort samples for percentile calculation
|
||||
sort.Slice(samples, func(i, j int) bool {
|
||||
return samples[i] < samples[j]
|
||||
})
|
||||
|
||||
n := len(samples)
|
||||
totalLatency := atomic.LoadInt64(&lh.totalLatency)
|
||||
sampleCount := atomic.LoadInt64(&lh.sampleCount)
|
||||
|
||||
var avg time.Duration
|
||||
if sampleCount > 0 {
|
||||
avg = time.Duration(totalLatency / sampleCount)
|
||||
}
|
||||
|
||||
return LatencyPercentiles{
|
||||
P50: samples[n*50/100],
|
||||
P95: samples[n*95/100],
|
||||
P99: samples[n*99/100],
|
||||
Min: samples[0],
|
||||
Max: samples[n-1],
|
||||
Avg: avg,
|
||||
}
|
||||
}
|
||||
|
||||
// NewBufferPoolEfficiencyTracker creates a new efficiency tracker
|
||||
func NewBufferPoolEfficiencyTracker(poolName string, logger zerolog.Logger) *BufferPoolEfficiencyTracker {
|
||||
return &BufferPoolEfficiencyTracker{
|
||||
|
@ -300,12 +164,7 @@ func (bpet *BufferPoolEfficiencyTracker) GetEfficiencyMetrics() BufferPoolEffici
|
|||
|
||||
// NewGranularMetricsCollector creates a new granular metrics collector
|
||||
func NewGranularMetricsCollector(logger zerolog.Logger) *GranularMetricsCollector {
|
||||
maxSamples := GetConfig().LatencyHistorySize
|
||||
|
||||
return &GranularMetricsCollector{
|
||||
inputLatencyHist: NewLatencyHistogram(maxSamples, logger.With().Str("histogram", "input").Logger()),
|
||||
outputLatencyHist: NewLatencyHistogram(maxSamples, logger.With().Str("histogram", "output").Logger()),
|
||||
processingLatencyHist: NewLatencyHistogram(maxSamples, logger.With().Str("histogram", "processing").Logger()),
|
||||
framePoolMetrics: NewBufferPoolEfficiencyTracker("frame_pool", logger.With().Str("pool", "frame").Logger()),
|
||||
controlPoolMetrics: NewBufferPoolEfficiencyTracker("control_pool", logger.With().Str("pool", "control").Logger()),
|
||||
zeroCopyMetrics: NewBufferPoolEfficiencyTracker("zero_copy_pool", logger.With().Str("pool", "zero_copy").Logger()),
|
||||
|
@ -313,21 +172,6 @@ func NewGranularMetricsCollector(logger zerolog.Logger) *GranularMetricsCollecto
|
|||
}
|
||||
}
|
||||
|
||||
// RecordInputLatency records latency for input operations
|
||||
func (gmc *GranularMetricsCollector) RecordInputLatency(latency time.Duration) {
|
||||
gmc.inputLatencyHist.RecordLatency(latency)
|
||||
}
|
||||
|
||||
// RecordOutputLatency records latency for output operations
|
||||
func (gmc *GranularMetricsCollector) RecordOutputLatency(latency time.Duration) {
|
||||
gmc.outputLatencyHist.RecordLatency(latency)
|
||||
}
|
||||
|
||||
// RecordProcessingLatency records latency for processing operations
|
||||
func (gmc *GranularMetricsCollector) RecordProcessingLatency(latency time.Duration) {
|
||||
gmc.processingLatencyHist.RecordLatency(latency)
|
||||
}
|
||||
|
||||
// RecordFramePoolOperation records frame pool operations
|
||||
func (gmc *GranularMetricsCollector) RecordFramePoolGet(latency time.Duration, wasHit bool) {
|
||||
gmc.framePoolMetrics.RecordGetOperation(latency, wasHit)
|
||||
|
@ -355,44 +199,6 @@ func (gmc *GranularMetricsCollector) RecordZeroCopyPut(latency time.Duration, bu
|
|||
gmc.zeroCopyMetrics.RecordPutOperation(latency, bufferSize)
|
||||
}
|
||||
|
||||
// GetLatencyPercentiles returns percentiles for all latency types
|
||||
func (gmc *GranularMetricsCollector) GetLatencyPercentiles() map[string]LatencyPercentiles {
|
||||
gmc.mutex.RLock()
|
||||
defer gmc.mutex.RUnlock()
|
||||
|
||||
return map[string]LatencyPercentiles{
|
||||
"input": gmc.inputLatencyHist.GetPercentiles(),
|
||||
"output": gmc.outputLatencyHist.GetPercentiles(),
|
||||
"processing": gmc.processingLatencyHist.GetPercentiles(),
|
||||
}
|
||||
}
|
||||
|
||||
// GetInputLatencyHistogram returns histogram data for input latency
|
||||
func (gmc *GranularMetricsCollector) GetInputLatencyHistogram() *LatencyHistogramData {
|
||||
gmc.mutex.RLock()
|
||||
defer gmc.mutex.RUnlock()
|
||||
|
||||
if gmc.inputLatencyHist == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
data := gmc.inputLatencyHist.GetHistogramData()
|
||||
return &data
|
||||
}
|
||||
|
||||
// GetOutputLatencyHistogram returns histogram data for output latency
|
||||
func (gmc *GranularMetricsCollector) GetOutputLatencyHistogram() *LatencyHistogramData {
|
||||
gmc.mutex.RLock()
|
||||
defer gmc.mutex.RUnlock()
|
||||
|
||||
if gmc.outputLatencyHist == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
data := gmc.outputLatencyHist.GetHistogramData()
|
||||
return &data
|
||||
}
|
||||
|
||||
// GetBufferPoolEfficiency returns efficiency metrics for all buffer pools
|
||||
func (gmc *GranularMetricsCollector) GetBufferPoolEfficiency() map[string]BufferPoolEfficiencyMetrics {
|
||||
gmc.mutex.RLock()
|
||||
|
@ -407,22 +213,8 @@ func (gmc *GranularMetricsCollector) GetBufferPoolEfficiency() map[string]Buffer
|
|||
|
||||
// LogGranularMetrics logs comprehensive granular metrics
|
||||
func (gmc *GranularMetricsCollector) LogGranularMetrics() {
|
||||
latencyPercentiles := gmc.GetLatencyPercentiles()
|
||||
bufferEfficiency := gmc.GetBufferPoolEfficiency()
|
||||
|
||||
// Log latency percentiles
|
||||
for source, percentiles := range latencyPercentiles {
|
||||
gmc.logger.Info().
|
||||
Str("source", source).
|
||||
Dur("p50", percentiles.P50).
|
||||
Dur("p95", percentiles.P95).
|
||||
Dur("p99", percentiles.P99).
|
||||
Dur("min", percentiles.Min).
|
||||
Dur("max", percentiles.Max).
|
||||
Dur("avg", percentiles.Avg).
|
||||
Msg("Latency percentiles")
|
||||
}
|
||||
|
||||
// Log buffer pool efficiency
|
||||
for poolName, efficiency := range bufferEfficiency {
|
||||
gmc.logger.Info().
|
||||
|
|
|
@ -5,273 +5,10 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestLatencyHistogram tests the LatencyHistogram functionality
|
||||
func TestLatencyHistogram(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
testFunc func(t *testing.T)
|
||||
}{
|
||||
{"NewLatencyHistogram", testNewLatencyHistogram},
|
||||
{"RecordLatency", testRecordLatency},
|
||||
{"GetHistogramData", testGetHistogramData},
|
||||
{"GetPercentiles", testGetPercentiles},
|
||||
{"ConcurrentAccess", testLatencyHistogramConcurrentAccess},
|
||||
{"BucketDistribution", testBucketDistribution},
|
||||
{"OverflowBucket", testOverflowBucket},
|
||||
{"RecentSamplesLimit", testRecentSamplesLimit},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.testFunc(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// testNewLatencyHistogram tests LatencyHistogram creation
|
||||
func testNewLatencyHistogram(t *testing.T) {
|
||||
logger := zerolog.Nop()
|
||||
maxSamples := 100
|
||||
|
||||
hist := NewLatencyHistogram(maxSamples, logger)
|
||||
|
||||
require.NotNil(t, hist)
|
||||
assert.Equal(t, maxSamples, hist.maxSamples)
|
||||
assert.NotNil(t, hist.buckets)
|
||||
assert.NotNil(t, hist.counts)
|
||||
assert.Equal(t, len(hist.buckets)+1, len(hist.counts)) // +1 for overflow bucket
|
||||
assert.NotNil(t, hist.recentSamples)
|
||||
assert.Equal(t, 0, len(hist.recentSamples))
|
||||
}
|
||||
|
||||
// testRecordLatency tests latency recording functionality
|
||||
func testRecordLatency(t *testing.T) {
|
||||
logger := zerolog.Nop()
|
||||
hist := NewLatencyHistogram(100, logger)
|
||||
|
||||
// Test recording various latencies
|
||||
latencies := []time.Duration{
|
||||
500 * time.Microsecond, // Should go in first bucket (1ms)
|
||||
3 * time.Millisecond, // Should go in second bucket (5ms)
|
||||
15 * time.Millisecond, // Should go in third bucket (10ms)
|
||||
100 * time.Millisecond, // Should go in appropriate bucket
|
||||
3 * time.Second, // Should go in overflow bucket
|
||||
}
|
||||
|
||||
for _, latency := range latencies {
|
||||
hist.RecordLatency(latency)
|
||||
}
|
||||
|
||||
// Verify sample count
|
||||
assert.Equal(t, int64(len(latencies)), hist.sampleCount)
|
||||
|
||||
// Verify total latency is accumulated
|
||||
expectedTotal := int64(0)
|
||||
for _, latency := range latencies {
|
||||
expectedTotal += latency.Nanoseconds()
|
||||
}
|
||||
assert.Equal(t, expectedTotal, hist.totalLatency)
|
||||
|
||||
// Verify recent samples are stored
|
||||
assert.Equal(t, len(latencies), len(hist.recentSamples))
|
||||
}
|
||||
|
||||
// testGetHistogramData tests histogram data retrieval
|
||||
func testGetHistogramData(t *testing.T) {
|
||||
logger := zerolog.Nop()
|
||||
hist := NewLatencyHistogram(100, logger)
|
||||
|
||||
// Record some test latencies
|
||||
hist.RecordLatency(500 * time.Microsecond)
|
||||
hist.RecordLatency(3 * time.Millisecond)
|
||||
hist.RecordLatency(15 * time.Millisecond)
|
||||
hist.RecordLatency(3 * time.Second) // overflow
|
||||
|
||||
data := hist.GetHistogramData()
|
||||
|
||||
// Verify buckets are converted to milliseconds
|
||||
require.NotNil(t, data.Buckets)
|
||||
require.NotNil(t, data.Counts)
|
||||
assert.Equal(t, len(hist.buckets), len(data.Buckets))
|
||||
assert.Equal(t, len(hist.counts), len(data.Counts))
|
||||
|
||||
// Verify first bucket is 1ms
|
||||
assert.Equal(t, 1.0, data.Buckets[0])
|
||||
|
||||
// Verify counts are non-negative
|
||||
for i, count := range data.Counts {
|
||||
assert.GreaterOrEqual(t, count, int64(0), "Count at index %d should be non-negative", i)
|
||||
}
|
||||
|
||||
// Verify total counts match recorded samples
|
||||
totalCounts := int64(0)
|
||||
for _, count := range data.Counts {
|
||||
totalCounts += count
|
||||
}
|
||||
assert.Equal(t, int64(4), totalCounts)
|
||||
}
|
||||
|
||||
// testGetPercentiles tests percentile calculation
|
||||
func testGetPercentiles(t *testing.T) {
|
||||
logger := zerolog.Nop()
|
||||
hist := NewLatencyHistogram(100, logger)
|
||||
|
||||
// Record a known set of latencies
|
||||
latencies := []time.Duration{
|
||||
1 * time.Millisecond,
|
||||
2 * time.Millisecond,
|
||||
3 * time.Millisecond,
|
||||
4 * time.Millisecond,
|
||||
5 * time.Millisecond,
|
||||
10 * time.Millisecond,
|
||||
20 * time.Millisecond,
|
||||
50 * time.Millisecond,
|
||||
100 * time.Millisecond,
|
||||
200 * time.Millisecond,
|
||||
}
|
||||
|
||||
for _, latency := range latencies {
|
||||
hist.RecordLatency(latency)
|
||||
}
|
||||
|
||||
percentiles := hist.GetPercentiles()
|
||||
|
||||
// Verify percentiles are calculated
|
||||
assert.Greater(t, percentiles.P50, time.Duration(0))
|
||||
assert.Greater(t, percentiles.P95, time.Duration(0))
|
||||
assert.Greater(t, percentiles.P99, time.Duration(0))
|
||||
assert.Greater(t, percentiles.Min, time.Duration(0))
|
||||
assert.Greater(t, percentiles.Max, time.Duration(0))
|
||||
assert.Greater(t, percentiles.Avg, time.Duration(0))
|
||||
|
||||
// Verify ordering: Min <= P50 <= P95 <= P99 <= Max
|
||||
assert.LessOrEqual(t, percentiles.Min, percentiles.P50)
|
||||
assert.LessOrEqual(t, percentiles.P50, percentiles.P95)
|
||||
assert.LessOrEqual(t, percentiles.P95, percentiles.P99)
|
||||
assert.LessOrEqual(t, percentiles.P99, percentiles.Max)
|
||||
|
||||
// Verify min and max are correct
|
||||
assert.Equal(t, 1*time.Millisecond, percentiles.Min)
|
||||
assert.Equal(t, 200*time.Millisecond, percentiles.Max)
|
||||
}
|
||||
|
||||
// testLatencyHistogramConcurrentAccess tests thread safety
|
||||
func testLatencyHistogramConcurrentAccess(t *testing.T) {
|
||||
logger := zerolog.Nop()
|
||||
hist := NewLatencyHistogram(1000, logger)
|
||||
|
||||
const numGoroutines = 10
|
||||
const samplesPerGoroutine = 100
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(numGoroutines)
|
||||
|
||||
// Concurrent writers
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func(id int) {
|
||||
defer wg.Done()
|
||||
for j := 0; j < samplesPerGoroutine; j++ {
|
||||
latency := time.Duration(id*j+1) * time.Microsecond
|
||||
hist.RecordLatency(latency)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
// Concurrent readers
|
||||
for i := 0; i < 5; i++ {
|
||||
go func() {
|
||||
for j := 0; j < 50; j++ {
|
||||
_ = hist.GetHistogramData()
|
||||
_ = hist.GetPercentiles()
|
||||
time.Sleep(time.Microsecond)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// Verify final state
|
||||
assert.Equal(t, int64(numGoroutines*samplesPerGoroutine), hist.sampleCount)
|
||||
data := hist.GetHistogramData()
|
||||
assert.NotNil(t, data)
|
||||
}
|
||||
|
||||
// testBucketDistribution tests that latencies are distributed correctly across buckets
|
||||
func testBucketDistribution(t *testing.T) {
|
||||
logger := zerolog.Nop()
|
||||
hist := NewLatencyHistogram(100, logger)
|
||||
|
||||
// Record latencies that should go into specific buckets
|
||||
testCases := []struct {
|
||||
latency time.Duration
|
||||
expectedBucket int
|
||||
}{
|
||||
{500 * time.Microsecond, 0}, // < 1ms
|
||||
{3 * time.Millisecond, 1}, // < 5ms
|
||||
{8 * time.Millisecond, 2}, // < 10ms (assuming 10ms is bucket 2)
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
hist.RecordLatency(tc.latency)
|
||||
}
|
||||
|
||||
data := hist.GetHistogramData()
|
||||
|
||||
// Verify that counts are in expected buckets
|
||||
for i, tc := range testCases {
|
||||
if tc.expectedBucket < len(data.Counts) {
|
||||
assert.GreaterOrEqual(t, data.Counts[tc.expectedBucket], int64(1),
|
||||
"Test case %d: Expected bucket %d to have at least 1 count", i, tc.expectedBucket)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// testOverflowBucket tests the overflow bucket functionality
|
||||
func testOverflowBucket(t *testing.T) {
|
||||
logger := zerolog.Nop()
|
||||
hist := NewLatencyHistogram(100, logger)
|
||||
|
||||
// Record a latency that should go into overflow bucket
|
||||
veryHighLatency := 10 * time.Second
|
||||
hist.RecordLatency(veryHighLatency)
|
||||
|
||||
data := hist.GetHistogramData()
|
||||
|
||||
// Verify overflow bucket (last bucket) has the count
|
||||
overflowBucketIndex := len(data.Counts) - 1
|
||||
assert.Equal(t, int64(1), data.Counts[overflowBucketIndex])
|
||||
|
||||
// Verify other buckets are empty
|
||||
for i := 0; i < overflowBucketIndex; i++ {
|
||||
assert.Equal(t, int64(0), data.Counts[i], "Bucket %d should be empty", i)
|
||||
}
|
||||
}
|
||||
|
||||
// testRecentSamplesLimit tests that recent samples are limited correctly
|
||||
func testRecentSamplesLimit(t *testing.T) {
|
||||
logger := zerolog.Nop()
|
||||
maxSamples := 5
|
||||
hist := NewLatencyHistogram(maxSamples, logger)
|
||||
|
||||
// Record more samples than the limit
|
||||
for i := 0; i < maxSamples*2; i++ {
|
||||
hist.RecordLatency(time.Duration(i+1) * time.Millisecond)
|
||||
}
|
||||
|
||||
// Verify recent samples are limited
|
||||
hist.samplesMutex.RLock()
|
||||
assert.Equal(t, maxSamples, len(hist.recentSamples))
|
||||
hist.samplesMutex.RUnlock()
|
||||
|
||||
// Verify total sample count is still correct
|
||||
assert.Equal(t, int64(maxSamples*2), hist.sampleCount)
|
||||
}
|
||||
|
||||
// TestGranularMetricsCollector tests the GranularMetricsCollector functionality
|
||||
func TestGranularMetricsCollector(t *testing.T) {
|
||||
tests := []struct {
|
||||
|
@ -279,11 +16,6 @@ func TestGranularMetricsCollector(t *testing.T) {
|
|||
testFunc func(t *testing.T)
|
||||
}{
|
||||
{"GetGranularMetricsCollector", testGetGranularMetricsCollector},
|
||||
{"RecordInputLatency", testRecordInputLatency},
|
||||
{"RecordOutputLatency", testRecordOutputLatency},
|
||||
{"GetInputLatencyHistogram", testGetInputLatencyHistogram},
|
||||
{"GetOutputLatencyHistogram", testGetOutputLatencyHistogram},
|
||||
{"GetLatencyPercentiles", testGetLatencyPercentiles},
|
||||
{"ConcurrentCollectorAccess", testConcurrentCollectorAccess},
|
||||
}
|
||||
|
||||
|
@ -304,128 +36,6 @@ func testGetGranularMetricsCollector(t *testing.T) {
|
|||
assert.Same(t, collector1, collector2, "Should return the same singleton instance")
|
||||
}
|
||||
|
||||
// testRecordInputLatency tests input latency recording
|
||||
func testRecordInputLatency(t *testing.T) {
|
||||
collector := GetGranularMetricsCollector()
|
||||
require.NotNil(t, collector)
|
||||
|
||||
testLatency := 5 * time.Millisecond
|
||||
collector.RecordInputLatency(testLatency)
|
||||
|
||||
// Verify histogram data is available
|
||||
histData := collector.GetInputLatencyHistogram()
|
||||
require.NotNil(t, histData)
|
||||
assert.NotNil(t, histData.Buckets)
|
||||
assert.NotNil(t, histData.Counts)
|
||||
|
||||
// Verify at least one count is recorded
|
||||
totalCounts := int64(0)
|
||||
for _, count := range histData.Counts {
|
||||
totalCounts += count
|
||||
}
|
||||
assert.Equal(t, int64(1), totalCounts)
|
||||
}
|
||||
|
||||
// testRecordOutputLatency tests output latency recording
|
||||
func testRecordOutputLatency(t *testing.T) {
|
||||
collector := GetGranularMetricsCollector()
|
||||
require.NotNil(t, collector)
|
||||
|
||||
testLatency := 10 * time.Millisecond
|
||||
collector.RecordOutputLatency(testLatency)
|
||||
|
||||
// Verify histogram data is available
|
||||
histData := collector.GetOutputLatencyHistogram()
|
||||
require.NotNil(t, histData)
|
||||
assert.NotNil(t, histData.Buckets)
|
||||
assert.NotNil(t, histData.Counts)
|
||||
|
||||
// Verify at least one count is recorded
|
||||
totalCounts := int64(0)
|
||||
for _, count := range histData.Counts {
|
||||
totalCounts += count
|
||||
}
|
||||
assert.Equal(t, int64(1), totalCounts)
|
||||
}
|
||||
|
||||
// testGetInputLatencyHistogram tests input histogram retrieval
|
||||
func testGetInputLatencyHistogram(t *testing.T) {
|
||||
collector := GetGranularMetricsCollector()
|
||||
require.NotNil(t, collector)
|
||||
|
||||
// Test when no data is recorded
|
||||
histData := collector.GetInputLatencyHistogram()
|
||||
if histData != nil {
|
||||
assert.NotNil(t, histData.Buckets)
|
||||
assert.NotNil(t, histData.Counts)
|
||||
}
|
||||
|
||||
// Record some data and test again
|
||||
collector.RecordInputLatency(2 * time.Millisecond)
|
||||
histData = collector.GetInputLatencyHistogram()
|
||||
require.NotNil(t, histData)
|
||||
assert.NotNil(t, histData.Buckets)
|
||||
assert.NotNil(t, histData.Counts)
|
||||
}
|
||||
|
||||
// testGetOutputLatencyHistogram tests output histogram retrieval
|
||||
func testGetOutputLatencyHistogram(t *testing.T) {
|
||||
collector := GetGranularMetricsCollector()
|
||||
require.NotNil(t, collector)
|
||||
|
||||
// Test when no data is recorded
|
||||
histData := collector.GetOutputLatencyHistogram()
|
||||
if histData != nil {
|
||||
assert.NotNil(t, histData.Buckets)
|
||||
assert.NotNil(t, histData.Counts)
|
||||
}
|
||||
|
||||
// Record some data and test again
|
||||
collector.RecordOutputLatency(7 * time.Millisecond)
|
||||
histData = collector.GetOutputLatencyHistogram()
|
||||
require.NotNil(t, histData)
|
||||
assert.NotNil(t, histData.Buckets)
|
||||
assert.NotNil(t, histData.Counts)
|
||||
}
|
||||
|
||||
// testGetLatencyPercentiles tests percentile retrieval from collector
|
||||
func testGetLatencyPercentiles(t *testing.T) {
|
||||
collector := GetGranularMetricsCollector()
|
||||
require.NotNil(t, collector)
|
||||
|
||||
// Record some test data
|
||||
latencies := []time.Duration{
|
||||
1 * time.Millisecond,
|
||||
5 * time.Millisecond,
|
||||
10 * time.Millisecond,
|
||||
20 * time.Millisecond,
|
||||
50 * time.Millisecond,
|
||||
}
|
||||
|
||||
for _, latency := range latencies {
|
||||
collector.RecordInputLatency(latency)
|
||||
collector.RecordOutputLatency(latency)
|
||||
}
|
||||
|
||||
// Test percentiles map
|
||||
percentilesMap := collector.GetLatencyPercentiles()
|
||||
require.NotNil(t, percentilesMap)
|
||||
|
||||
// Test input percentiles if available
|
||||
if inputPercentiles, exists := percentilesMap["input"]; exists {
|
||||
assert.Greater(t, inputPercentiles.P50, time.Duration(0))
|
||||
assert.Greater(t, inputPercentiles.P95, time.Duration(0))
|
||||
assert.Greater(t, inputPercentiles.P99, time.Duration(0))
|
||||
}
|
||||
|
||||
// Test output percentiles if available
|
||||
if outputPercentiles, exists := percentilesMap["output"]; exists {
|
||||
assert.Greater(t, outputPercentiles.P50, time.Duration(0))
|
||||
assert.Greater(t, outputPercentiles.P95, time.Duration(0))
|
||||
assert.Greater(t, outputPercentiles.P99, time.Duration(0))
|
||||
}
|
||||
}
|
||||
|
||||
// testConcurrentCollectorAccess tests thread safety of the collector
|
||||
func testConcurrentCollectorAccess(t *testing.T) {
|
||||
collector := GetGranularMetricsCollector()
|
||||
|
@ -435,126 +45,56 @@ func testConcurrentCollectorAccess(t *testing.T) {
|
|||
const operationsPerGoroutine = 50
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(numGoroutines * 3) // 3 types of operations
|
||||
wg.Add(numGoroutines)
|
||||
|
||||
// Concurrent input latency recording
|
||||
// Concurrent buffer pool operations
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func(id int) {
|
||||
defer wg.Done()
|
||||
for j := 0; j < operationsPerGoroutine; j++ {
|
||||
latency := time.Duration(id*j+1) * time.Microsecond
|
||||
collector.RecordInputLatency(latency)
|
||||
// Test buffer pool operations
|
||||
latency := time.Duration(id*operationsPerGoroutine+j) * time.Microsecond
|
||||
collector.RecordFramePoolGet(latency, true)
|
||||
collector.RecordFramePoolPut(latency, 1024)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
// Concurrent output latency recording
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func(id int) {
|
||||
defer wg.Done()
|
||||
for j := 0; j < operationsPerGoroutine; j++ {
|
||||
latency := time.Duration(id*j+1) * time.Microsecond
|
||||
collector.RecordOutputLatency(latency)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
// Concurrent data retrieval
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for j := 0; j < operationsPerGoroutine; j++ {
|
||||
_ = collector.GetInputLatencyHistogram()
|
||||
_ = collector.GetOutputLatencyHistogram()
|
||||
_ = collector.GetLatencyPercentiles()
|
||||
time.Sleep(time.Microsecond)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// Verify final state is consistent
|
||||
inputData := collector.GetInputLatencyHistogram()
|
||||
outputData := collector.GetOutputLatencyHistogram()
|
||||
assert.NotNil(t, inputData)
|
||||
assert.NotNil(t, outputData)
|
||||
}
|
||||
|
||||
// Benchmark tests for performance validation
|
||||
func BenchmarkLatencyHistogram(b *testing.B) {
|
||||
logger := zerolog.Nop()
|
||||
hist := NewLatencyHistogram(1000, logger)
|
||||
|
||||
b.Run("RecordLatency", func(b *testing.B) {
|
||||
latency := 5 * time.Millisecond
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
hist.RecordLatency(latency)
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("GetHistogramData", func(b *testing.B) {
|
||||
// Pre-populate with some data
|
||||
for i := 0; i < 100; i++ {
|
||||
hist.RecordLatency(time.Duration(i) * time.Microsecond)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = hist.GetHistogramData()
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("GetPercentiles", func(b *testing.B) {
|
||||
// Pre-populate with some data
|
||||
for i := 0; i < 100; i++ {
|
||||
hist.RecordLatency(time.Duration(i) * time.Microsecond)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = hist.GetPercentiles()
|
||||
}
|
||||
})
|
||||
// Verify collector is still functional
|
||||
efficiency := collector.GetBufferPoolEfficiency()
|
||||
assert.NotNil(t, efficiency)
|
||||
}
|
||||
|
||||
func BenchmarkGranularMetricsCollector(b *testing.B) {
|
||||
collector := GetGranularMetricsCollector()
|
||||
|
||||
b.Run("RecordInputLatency", func(b *testing.B) {
|
||||
b.Run("RecordFramePoolGet", func(b *testing.B) {
|
||||
latency := 5 * time.Millisecond
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
collector.RecordInputLatency(latency)
|
||||
collector.RecordFramePoolGet(latency, true)
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("RecordOutputLatency", func(b *testing.B) {
|
||||
b.Run("RecordFramePoolPut", func(b *testing.B) {
|
||||
latency := 5 * time.Millisecond
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
collector.RecordOutputLatency(latency)
|
||||
collector.RecordFramePoolPut(latency, 1024)
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("GetInputLatencyHistogram", func(b *testing.B) {
|
||||
b.Run("GetBufferPoolEfficiency", func(b *testing.B) {
|
||||
// Pre-populate with some data
|
||||
for i := 0; i < 100; i++ {
|
||||
collector.RecordInputLatency(time.Duration(i) * time.Microsecond)
|
||||
collector.RecordFramePoolGet(time.Duration(i)*time.Microsecond, true)
|
||||
collector.RecordFramePoolPut(time.Duration(i)*time.Microsecond, 1024)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = collector.GetInputLatencyHistogram()
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("GetOutputLatencyHistogram", func(b *testing.B) {
|
||||
// Pre-populate with some data
|
||||
for i := 0; i < 100; i++ {
|
||||
collector.RecordOutputLatency(time.Duration(i) * time.Microsecond)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = collector.GetOutputLatencyHistogram()
|
||||
_ = collector.GetBufferPoolEfficiency()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -111,11 +111,6 @@ func (aim *AudioInputManager) WriteOpusFrame(frame []byte) error {
|
|||
aim.recordFrameProcessed(len(frame))
|
||||
aim.updateLatency(processingTime)
|
||||
|
||||
// Record latency to granular metrics collector for histogram
|
||||
if granularCollector := GetGranularMetricsCollector(); granularCollector != nil {
|
||||
granularCollector.RecordInputLatency(processingTime)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -152,11 +147,6 @@ func (aim *AudioInputManager) WriteOpusFrameZeroCopy(frame *ZeroCopyAudioFrame)
|
|||
aim.recordFrameProcessed(frame.Length())
|
||||
aim.updateLatency(processingTime)
|
||||
|
||||
// Record latency to granular metrics collector for histogram
|
||||
if granularCollector := GetGranularMetricsCollector(); granularCollector != nil {
|
||||
granularCollector.RecordInputLatency(processingTime)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -132,9 +132,6 @@ func (lm *LatencyMonitor) RecordLatency(latency time.Duration, source string) {
|
|||
now := time.Now()
|
||||
latencyNanos := latency.Nanoseconds()
|
||||
|
||||
// Record in granular metrics histogram
|
||||
GetGranularMetricsCollector().RecordProcessingLatency(latency)
|
||||
|
||||
// Update atomic counters
|
||||
atomic.StoreInt64(&lm.currentLatency, latencyNanos)
|
||||
atomic.AddInt64(&lm.latencySamples, 1)
|
||||
|
|
|
@ -101,10 +101,10 @@ var (
|
|||
},
|
||||
)
|
||||
|
||||
audioAverageLatencySeconds = promauto.NewGauge(
|
||||
audioAverageLatencyMilliseconds = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_average_latency_seconds",
|
||||
Help: "Average audio latency in seconds",
|
||||
Name: "jetkvm_audio_average_latency_milliseconds",
|
||||
Help: "Average audio latency in milliseconds",
|
||||
},
|
||||
)
|
||||
|
||||
|
@ -144,10 +144,10 @@ var (
|
|||
},
|
||||
)
|
||||
|
||||
microphoneAverageLatencySeconds = promauto.NewGauge(
|
||||
microphoneAverageLatencyMilliseconds = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_microphone_average_latency_seconds",
|
||||
Help: "Average microphone latency in seconds",
|
||||
Name: "jetkvm_microphone_average_latency_milliseconds",
|
||||
Help: "Average microphone latency in milliseconds",
|
||||
},
|
||||
)
|
||||
|
||||
|
@ -416,8 +416,8 @@ var (
|
|||
// Latency percentile metrics
|
||||
latencyPercentile = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_latency_percentile_seconds",
|
||||
Help: "Audio latency percentiles in seconds",
|
||||
Name: "jetkvm_audio_latency_percentile_milliseconds",
|
||||
Help: "Audio latency percentiles in milliseconds",
|
||||
},
|
||||
[]string{"source", "percentile"}, // source: input, output, processing; percentile: p50, p95, p99, min, max, avg
|
||||
)
|
||||
|
@ -506,7 +506,7 @@ func UpdateAudioMetrics(metrics UnifiedAudioMetrics) {
|
|||
}
|
||||
|
||||
// Update gauges
|
||||
audioAverageLatencySeconds.Set(float64(metrics.AverageLatency.Nanoseconds()) / 1e9)
|
||||
audioAverageLatencyMilliseconds.Set(float64(metrics.AverageLatency.Nanoseconds()) / 1e6)
|
||||
if !metrics.LastFrameTime.IsZero() {
|
||||
audioLastFrameTimestamp.Set(float64(metrics.LastFrameTime.Unix()))
|
||||
}
|
||||
|
@ -537,7 +537,7 @@ func UpdateMicrophoneMetrics(metrics UnifiedAudioMetrics) {
|
|||
}
|
||||
|
||||
// Update gauges
|
||||
microphoneAverageLatencySeconds.Set(float64(metrics.AverageLatency.Nanoseconds()) / 1e9)
|
||||
microphoneAverageLatencyMilliseconds.Set(float64(metrics.AverageLatency.Nanoseconds()) / 1e6)
|
||||
if !metrics.LastFrameTime.IsZero() {
|
||||
microphoneLastFrameTimestamp.Set(float64(metrics.LastFrameTime.Unix()))
|
||||
}
|
||||
|
@ -704,11 +704,11 @@ func UpdateBufferPoolMetrics(poolName string, hitRate, missRate, utilization, th
|
|||
}
|
||||
|
||||
// UpdateLatencyMetrics updates latency percentile metrics
|
||||
func UpdateLatencyMetrics(source, percentile string, latencySeconds float64) {
|
||||
func UpdateLatencyMetrics(source, percentile string, latencyMilliseconds float64) {
|
||||
metricsUpdateMutex.Lock()
|
||||
defer metricsUpdateMutex.Unlock()
|
||||
|
||||
latencyPercentile.WithLabelValues(source, percentile).Set(latencySeconds)
|
||||
latencyPercentile.WithLabelValues(source, percentile).Set(latencyMilliseconds)
|
||||
|
||||
atomic.StoreInt64(&lastMetricsUpdate, time.Now().Unix())
|
||||
}
|
||||
|
|
|
@ -103,11 +103,6 @@ func (aom *AudioOutputIPCManager) WriteOpusFrame(frame *ZeroCopyAudioFrame) erro
|
|||
aom.recordFrameProcessed(frame.Length())
|
||||
aom.updateLatency(processingTime)
|
||||
|
||||
// Record latency to granular metrics collector for histogram
|
||||
if granularCollector := GetGranularMetricsCollector(); granularCollector != nil {
|
||||
granularCollector.RecordOutputLatency(processingTime)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -137,11 +132,6 @@ func (aom *AudioOutputIPCManager) WriteOpusFrameZeroCopy(frame *ZeroCopyAudioFra
|
|||
aom.recordFrameProcessed(len(frameData))
|
||||
aom.updateLatency(processingTime)
|
||||
|
||||
// Record latency to granular metrics collector for histogram
|
||||
if granularCollector := GetGranularMetricsCollector(); granularCollector != nil {
|
||||
granularCollector.RecordOutputLatency(processingTime)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -4,7 +4,6 @@ import { LuActivity, LuClock, LuHardDrive, LuSettings, LuCpu, LuMemoryStick } fr
|
|||
|
||||
import { AudioLevelMeter } from "@components/AudioLevelMeter";
|
||||
import StatChart from "@components/StatChart";
|
||||
import LatencyHistogram from "@components/charts/LatencyHistogram";
|
||||
import { cx } from "@/cva.config";
|
||||
import { useMicrophone } from "@/hooks/useMicrophone";
|
||||
import { useAudioLevel } from "@/hooks/useAudioLevel";
|
||||
|
@ -13,11 +12,6 @@ import api from "@/api";
|
|||
import { AUDIO_CONFIG } from "@/config/constants";
|
||||
import audioQualityService from "@/services/audioQualityService";
|
||||
|
||||
interface LatencyHistogramData {
|
||||
buckets: number[]; // Bucket boundaries in milliseconds
|
||||
counts: number[]; // Count for each bucket
|
||||
}
|
||||
|
||||
interface AudioMetrics {
|
||||
frames_received: number;
|
||||
frames_dropped: number;
|
||||
|
@ -25,7 +19,6 @@ interface AudioMetrics {
|
|||
last_frame_time: string;
|
||||
connection_drops: number;
|
||||
average_latency: string;
|
||||
latency_histogram?: LatencyHistogramData;
|
||||
}
|
||||
|
||||
interface MicrophoneMetrics {
|
||||
|
@ -35,7 +28,6 @@ interface MicrophoneMetrics {
|
|||
last_frame_time: string;
|
||||
connection_drops: number;
|
||||
average_latency: string;
|
||||
latency_histogram?: LatencyHistogramData;
|
||||
}
|
||||
|
||||
interface ProcessMetrics {
|
||||
|
@ -511,25 +503,7 @@ export default function AudioMetricsDashboard() {
|
|||
)}
|
||||
</div>
|
||||
|
||||
{/* Latency Histograms */}
|
||||
{metrics && (
|
||||
<div className="grid grid-cols-1 md:grid-cols-2 gap-4">
|
||||
<LatencyHistogram
|
||||
data={audioMetrics?.latency_histogram}
|
||||
title="Audio Output Latency Distribution"
|
||||
height={180}
|
||||
className=""
|
||||
/>
|
||||
{microphoneMetrics && (
|
||||
<LatencyHistogram
|
||||
data={microphoneMetrics.latency_histogram}
|
||||
title="Microphone Input Latency Distribution"
|
||||
height={180}
|
||||
className=""
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
||||
|
||||
{/* Subprocess Resource Usage - Histogram View */}
|
||||
<div className="grid grid-cols-1 md:grid-cols-2 gap-4">
|
||||
|
|
|
@ -1,145 +0,0 @@
|
|||
import React, { useMemo } from 'react';
|
||||
import { BarChart, Bar, XAxis, YAxis, CartesianGrid, Tooltip, ResponsiveContainer } from 'recharts';
|
||||
|
||||
import { LatencyHistogramData } from '../../hooks/useAudioEvents';
|
||||
|
||||
interface LatencyHistogramProps {
|
||||
data?: LatencyHistogramData;
|
||||
title: string;
|
||||
height?: number;
|
||||
className?: string;
|
||||
}
|
||||
|
||||
interface ChartDataPoint {
|
||||
bucket: string;
|
||||
count: number;
|
||||
bucketValue: number;
|
||||
}
|
||||
|
||||
const LatencyHistogram: React.FC<LatencyHistogramProps> = ({
|
||||
data,
|
||||
title,
|
||||
height = 200,
|
||||
className = ''
|
||||
}) => {
|
||||
// Memoize chart data transformation to avoid recalculation on every render
|
||||
const chartData = useMemo((): ChartDataPoint[] => {
|
||||
if (!data || !data.buckets || !data.counts || data.buckets.length === 0) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const transformedData: ChartDataPoint[] = [];
|
||||
|
||||
// Process each bucket with its count
|
||||
for (let i = 0; i < data.buckets.length; i++) {
|
||||
const bucketValue = data.buckets[i];
|
||||
const count = data.counts[i] || 0;
|
||||
|
||||
// Skip empty buckets to reduce chart clutter
|
||||
if (count === 0) continue;
|
||||
|
||||
// Format bucket label based on value
|
||||
let bucketLabel: string;
|
||||
if (bucketValue < 1) {
|
||||
bucketLabel = `${(bucketValue * 1000).toFixed(0)}μs`;
|
||||
} else if (bucketValue < 1000) {
|
||||
bucketLabel = `${bucketValue.toFixed(1)}ms`;
|
||||
} else {
|
||||
bucketLabel = `${(bucketValue / 1000).toFixed(1)}s`;
|
||||
}
|
||||
|
||||
transformedData.push({
|
||||
bucket: bucketLabel,
|
||||
count,
|
||||
bucketValue
|
||||
});
|
||||
}
|
||||
|
||||
// Handle overflow bucket (last count if it exists)
|
||||
if (data.counts.length > data.buckets.length) {
|
||||
const overflowCount = data.counts[data.counts.length - 1];
|
||||
if (overflowCount > 0) {
|
||||
transformedData.push({
|
||||
bucket: '>2s',
|
||||
count: overflowCount,
|
||||
bucketValue: 2000 // 2 seconds in ms
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return transformedData;
|
||||
}, [data]);
|
||||
|
||||
// Custom tooltip for better UX
|
||||
const CustomTooltip = ({ active, payload, label }: {
|
||||
active?: boolean;
|
||||
payload?: { payload: ChartDataPoint }[];
|
||||
label?: string;
|
||||
}) => {
|
||||
if (active && payload && payload.length) {
|
||||
const data = payload[0].payload;
|
||||
return (
|
||||
<div className="bg-gray-800 text-white p-2 rounded shadow-lg border border-gray-600">
|
||||
<p className="font-medium">{`Latency: ${label}`}</p>
|
||||
<p className="text-blue-300">{`Count: ${data.count}`}</p>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
return null;
|
||||
};
|
||||
|
||||
if (!data || chartData.length === 0) {
|
||||
return (
|
||||
<div className={`bg-gray-50 dark:bg-gray-800 rounded-lg p-4 ${className}`}>
|
||||
<h3 className="text-sm font-medium text-gray-700 dark:text-gray-300 mb-2">
|
||||
{title}
|
||||
</h3>
|
||||
<div className="flex items-center justify-center h-32 text-gray-500 dark:text-gray-400">
|
||||
<span className="text-sm">No latency data available</span>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className={`bg-gray-50 dark:bg-gray-800 rounded-lg p-4 ${className}`}>
|
||||
<h3 className="text-sm font-medium text-gray-700 dark:text-gray-300 mb-2">
|
||||
{title}
|
||||
</h3>
|
||||
<ResponsiveContainer width="100%" height={height}>
|
||||
<BarChart
|
||||
data={chartData}
|
||||
margin={{
|
||||
top: 5,
|
||||
right: 5,
|
||||
left: 5,
|
||||
bottom: 5,
|
||||
}}
|
||||
>
|
||||
<CartesianGrid strokeDasharray="3 3" stroke="#374151" opacity={0.3} />
|
||||
<XAxis
|
||||
dataKey="bucket"
|
||||
tick={{ fontSize: 11, fill: '#6B7280' }}
|
||||
axisLine={{ stroke: '#6B7280' }}
|
||||
tickLine={{ stroke: '#6B7280' }}
|
||||
/>
|
||||
<YAxis
|
||||
tick={{ fontSize: 11, fill: '#6B7280' }}
|
||||
axisLine={{ stroke: '#6B7280' }}
|
||||
tickLine={{ stroke: '#6B7280' }}
|
||||
/>
|
||||
<Tooltip content={<CustomTooltip />} />
|
||||
<Bar
|
||||
dataKey="count"
|
||||
fill="#3B82F6"
|
||||
radius={[2, 2, 0, 0]}
|
||||
stroke="#1E40AF"
|
||||
strokeWidth={1}
|
||||
/>
|
||||
</BarChart>
|
||||
</ResponsiveContainer>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default LatencyHistogram;
|
|
@ -19,11 +19,6 @@ export interface AudioMuteData {
|
|||
muted: boolean;
|
||||
}
|
||||
|
||||
export interface LatencyHistogramData {
|
||||
buckets: number[]; // Bucket boundaries in milliseconds
|
||||
counts: number[]; // Count for each bucket
|
||||
}
|
||||
|
||||
export interface AudioMetricsData {
|
||||
frames_received: number;
|
||||
frames_dropped: number;
|
||||
|
@ -31,7 +26,6 @@ export interface AudioMetricsData {
|
|||
last_frame_time: string;
|
||||
connection_drops: number;
|
||||
average_latency: string;
|
||||
latency_histogram?: LatencyHistogramData;
|
||||
}
|
||||
|
||||
export interface MicrophoneStateData {
|
||||
|
@ -46,7 +40,6 @@ export interface MicrophoneMetricsData {
|
|||
last_frame_time: string;
|
||||
connection_drops: number;
|
||||
average_latency: string;
|
||||
latency_histogram?: LatencyHistogramData;
|
||||
}
|
||||
|
||||
export interface ProcessMetricsData {
|
||||
|
|
Loading…
Reference in New Issue