perf(audio): increase worker pool sizes and optimize worker management

Double worker counts and queue sizes to handle higher load scenarios
Modify worker management to maintain minimum 2 persistent workers with longer idle timeout
This commit is contained in:
Alex P 2025-09-03 12:54:07 +00:00
parent d7b67e5012
commit f9781f170c
2 changed files with 27 additions and 16 deletions

View File

@ -2435,11 +2435,11 @@ func DefaultAudioConfig() *AudioConfigConstants {
EventSubscriptionDelayMS: 100, // 100ms subscription delay EventSubscriptionDelayMS: 100, // 100ms subscription delay
// Goroutine Pool Configuration // Goroutine Pool Configuration
MaxAudioProcessorWorkers: 8, // 8 workers for audio processing tasks MaxAudioProcessorWorkers: 16, // 16 workers for audio processing tasks
MaxAudioReaderWorkers: 4, // 4 workers for audio reading tasks MaxAudioReaderWorkers: 8, // 8 workers for audio reading tasks
AudioProcessorQueueSize: 32, // 32 tasks queue size for processor pool AudioProcessorQueueSize: 64, // 64 tasks queue size for processor pool
AudioReaderQueueSize: 16, // 16 tasks queue size for reader pool AudioReaderQueueSize: 32, // 32 tasks queue size for reader pool
WorkerMaxIdleTime: 30 * time.Second, // 30s maximum idle time before worker termination WorkerMaxIdleTime: 60 * time.Second, // 60s maximum idle time before worker termination
// Input Processing Constants // Input Processing Constants
InputProcessingTimeoutMS: 10, // 10ms processing timeout threshold InputProcessingTimeoutMS: 10, // 10ms processing timeout threshold

View File

@ -67,6 +67,14 @@ func (p *GoroutinePool) Submit(task Task) bool {
// ensureWorkerAvailable makes sure at least one worker is available to process tasks // ensureWorkerAvailable makes sure at least one worker is available to process tasks
func (p *GoroutinePool) ensureWorkerAvailable() { func (p *GoroutinePool) ensureWorkerAvailable() {
// Check if we already have enough workers
currentWorkers := atomic.LoadInt64(&p.workerCount)
// Only start new workers if:
// 1. We have no workers at all, or
// 2. The queue is growing and we're below max workers
queueLen := len(p.taskQueue)
if currentWorkers == 0 || (queueLen > int(currentWorkers) && currentWorkers < int64(p.maxWorkers)) {
// Try to acquire a semaphore slot without blocking // Try to acquire a semaphore slot without blocking
select { select {
case p.workerSem <- struct{}{}: case p.workerSem <- struct{}{}:
@ -76,6 +84,7 @@ func (p *GoroutinePool) ensureWorkerAvailable() {
// All worker slots are taken, which means we have enough workers // All worker slots are taken, which means we have enough workers
} }
} }
}
// startWorker launches a new worker goroutine // startWorker launches a new worker goroutine
func (p *GoroutinePool) startWorker() { func (p *GoroutinePool) startWorker() {
@ -124,12 +133,14 @@ func (p *GoroutinePool) startWorker() {
atomic.AddInt64(&p.taskCount, 1) atomic.AddInt64(&p.taskCount, 1)
case <-idleTimer.C: case <-idleTimer.C:
// Worker has been idle for too long, exit if we have more than minimum workers // Worker has been idle for too long
if atomic.LoadInt64(&p.workerCount) > 1 { // Keep at least 2 workers alive to handle incoming tasks without creating new goroutines
if atomic.LoadInt64(&p.workerCount) > 2 {
return return
} }
// Reset timer for the minimum worker // For persistent workers (the minimum 2), use a longer idle timeout
idleTimer.Reset(p.maxIdleTime) // This prevents excessive worker creation/destruction cycles
idleTimer.Reset(p.maxIdleTime * 3) // Triple the idle time for persistent workers
} }
} }
}() }()