Fix: linting errors

This commit is contained in:
Alex P 2025-08-24 23:36:29 +00:00
parent a9a1082bcc
commit 3a28105f56
15 changed files with 320 additions and 317 deletions

View File

@ -27,7 +27,6 @@ type AdaptiveOptimizer struct {
// Configuration
config OptimizerConfig
mutex sync.RWMutex
}
// OptimizerConfig holds configuration for the adaptive optimizer
@ -39,8 +38,6 @@ type OptimizerConfig struct {
StabilityPeriod time.Duration // Time to wait for stability after optimization
}
// DefaultOptimizerConfig returns a sensible default configuration
func DefaultOptimizerConfig() OptimizerConfig {
return OptimizerConfig{
@ -65,8 +62,6 @@ func NewAdaptiveOptimizer(latencyMonitor *LatencyMonitor, bufferManager *Adaptiv
cancel: cancel,
}
// Register as latency monitor callback
latencyMonitor.AddOptimizationCallback(optimizer.handleLatencyOptimization)
@ -89,7 +84,6 @@ func (ao *AdaptiveOptimizer) Stop() {
// initializeStrategies sets up the available optimization strategies
// handleLatencyOptimization is called when latency optimization is needed
func (ao *AdaptiveOptimizer) handleLatencyOptimization(metrics LatencyMetrics) error {
currentLevel := atomic.LoadInt64(&ao.optimizationLevel)
@ -185,7 +179,9 @@ func (ao *AdaptiveOptimizer) checkStability() {
currentLevel := int(atomic.LoadInt64(&ao.optimizationLevel))
if currentLevel > 0 {
ao.logger.Warn().Dur("current_latency", metrics.Current).Dur("threshold", ao.config.RollbackThreshold).Msg("Rolling back optimizations due to excessive latency")
ao.decreaseOptimization(currentLevel - 1)
if err := ao.decreaseOptimization(currentLevel - 1); err != nil {
ao.logger.Error().Err(err).Msg("Failed to decrease optimization level")
}
}
}
}

View File

@ -59,7 +59,7 @@ func (p *AudioBufferPool) Get() []byte {
// Try sync.Pool next
if buf := p.pool.Get(); buf != nil {
bufSlice := buf.([]byte)
bufPtr := buf.(*[]byte)
// Update pool size counter when retrieving from pool
p.mutex.Lock()
if p.currentSize > 0 {
@ -67,7 +67,7 @@ func (p *AudioBufferPool) Get() []byte {
}
p.mutex.Unlock()
atomic.AddInt64(&p.hitCount, 1)
return bufSlice[:0] // Reset length but keep capacity
return (*bufPtr)[:0] // Reset length but keep capacity
}
// Last resort: allocate new buffer
@ -102,7 +102,7 @@ func (p *AudioBufferPool) Put(buf []byte) {
}
// Return to sync.Pool
p.pool.Put(resetBuf)
p.pool.Put(&resetBuf)
// Update pool size counter
p.mutex.Lock()

View File

@ -73,8 +73,11 @@ var globalMessagePool = &MessagePool{
pool: make(chan *OptimizedIPCMessage, messagePoolSize),
}
// Initialize the message pool with pre-allocated messages
func init() {
var messagePoolInitOnce sync.Once
// initializeMessagePool initializes the message pool with pre-allocated messages
func initializeMessagePool() {
messagePoolInitOnce.Do(func() {
// Pre-allocate 30% of pool size for immediate availability
preallocSize := messagePoolSize * 30 / 100
globalMessagePool.preallocSize = preallocSize
@ -95,10 +98,12 @@ func init() {
data: make([]byte, 0, maxFrameSize),
}
}
})
}
// Get retrieves a message from the pool
func (mp *MessagePool) Get() *OptimizedIPCMessage {
initializeMessagePool()
// First try pre-allocated messages for fastest access
mp.mutex.Lock()
if len(mp.preallocated) > 0 {

View File

@ -102,7 +102,6 @@ var globalOutputMessagePool = NewOutputMessagePool(outputMessagePoolSize)
type AudioServer struct {
// Atomic fields must be first for proper alignment on ARM
bufferSize int64 // Current buffer size (atomic)
processingTime int64 // Average processing time in nanoseconds (atomic)
droppedFrames int64 // Dropped frames counter (atomic)
totalFrames int64 // Total frames counter (atomic)
@ -216,7 +215,10 @@ func (s *AudioServer) startProcessorGoroutine() {
case msg := <-s.messageChan:
// Process message (currently just frame sending)
if msg.Type == OutputMessageTypeOpusFrame {
s.sendFrameToClient(msg.Data)
if err := s.sendFrameToClient(msg.Data); err != nil {
// Log error but continue processing
atomic.AddInt64(&s.droppedFrames, 1)
}
}
case <-s.stopChan:
return

View File

@ -192,7 +192,7 @@ func (s *OutputStreamer) processingLoop() {
}
}()
for _ = range s.processingChan {
for range s.processingChan {
// Process frame (currently just receiving, but can be extended)
if _, err := s.client.ReceiveFrame(); err != nil {
if s.client.IsConnected() {