perf(audio): increase worker pool sizes and optimize worker management

Double worker counts and queue sizes to handle higher load scenarios
Modify worker management to maintain minimum 2 persistent workers with longer idle timeout
This commit is contained in:
Alex P 2025-09-03 12:54:07 +00:00
parent d7b67e5012
commit f9781f170c
2 changed files with 27 additions and 16 deletions

View File

@ -2435,11 +2435,11 @@ func DefaultAudioConfig() *AudioConfigConstants {
EventSubscriptionDelayMS: 100, // 100ms subscription delay
// Goroutine Pool Configuration
MaxAudioProcessorWorkers: 8, // 8 workers for audio processing tasks
MaxAudioReaderWorkers: 4, // 4 workers for audio reading tasks
AudioProcessorQueueSize: 32, // 32 tasks queue size for processor pool
AudioReaderQueueSize: 16, // 16 tasks queue size for reader pool
WorkerMaxIdleTime: 30 * time.Second, // 30s maximum idle time before worker termination
MaxAudioProcessorWorkers: 16, // 16 workers for audio processing tasks
MaxAudioReaderWorkers: 8, // 8 workers for audio reading tasks
AudioProcessorQueueSize: 64, // 64 tasks queue size for processor pool
AudioReaderQueueSize: 32, // 32 tasks queue size for reader pool
WorkerMaxIdleTime: 60 * time.Second, // 60s maximum idle time before worker termination
// Input Processing Constants
InputProcessingTimeoutMS: 10, // 10ms processing timeout threshold

View File

@ -67,13 +67,22 @@ func (p *GoroutinePool) Submit(task Task) bool {
// ensureWorkerAvailable makes sure at least one worker is available to process tasks
func (p *GoroutinePool) ensureWorkerAvailable() {
// Try to acquire a semaphore slot without blocking
select {
case p.workerSem <- struct{}{}:
// We got a slot, start a new worker
p.startWorker()
default:
// All worker slots are taken, which means we have enough workers
// Check if we already have enough workers
currentWorkers := atomic.LoadInt64(&p.workerCount)
// Only start new workers if:
// 1. We have no workers at all, or
// 2. The queue is growing and we're below max workers
queueLen := len(p.taskQueue)
if currentWorkers == 0 || (queueLen > int(currentWorkers) && currentWorkers < int64(p.maxWorkers)) {
// Try to acquire a semaphore slot without blocking
select {
case p.workerSem <- struct{}{}:
// We got a slot, start a new worker
p.startWorker()
default:
// All worker slots are taken, which means we have enough workers
}
}
}
@ -124,12 +133,14 @@ func (p *GoroutinePool) startWorker() {
atomic.AddInt64(&p.taskCount, 1)
case <-idleTimer.C:
// Worker has been idle for too long, exit if we have more than minimum workers
if atomic.LoadInt64(&p.workerCount) > 1 {
// Worker has been idle for too long
// Keep at least 2 workers alive to handle incoming tasks without creating new goroutines
if atomic.LoadInt64(&p.workerCount) > 2 {
return
}
// Reset timer for the minimum worker
idleTimer.Reset(p.maxIdleTime)
// For persistent workers (the minimum 2), use a longer idle timeout
// This prevents excessive worker creation/destruction cycles
idleTimer.Reset(p.maxIdleTime * 3) // Triple the idle time for persistent workers
}
}
}()