mirror of https://github.com/jetkvm/kvm.git
feat(audio): centralize audio configuration and improve debugging
- Add debug utilities with development-only logging - Create centralized audio configuration constants - Implement audio quality service for managing presets - Replace console logging with debug utilities - Update audio metrics with unified structure - Improve microphone error handling and state management
This commit is contained in:
parent
e8d12bae4b
commit
8fb0b9f9c6
|
@ -301,8 +301,45 @@ var (
|
||||||
micConnectionDropsValue int64
|
micConnectionDropsValue int64
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// UnifiedAudioMetrics provides a common structure for both input and output audio streams
|
||||||
|
type UnifiedAudioMetrics struct {
|
||||||
|
FramesReceived int64 `json:"frames_received"`
|
||||||
|
FramesDropped int64 `json:"frames_dropped"`
|
||||||
|
FramesSent int64 `json:"frames_sent,omitempty"`
|
||||||
|
BytesProcessed int64 `json:"bytes_processed"`
|
||||||
|
ConnectionDrops int64 `json:"connection_drops"`
|
||||||
|
LastFrameTime time.Time `json:"last_frame_time"`
|
||||||
|
AverageLatency time.Duration `json:"average_latency"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// convertAudioMetricsToUnified converts AudioMetrics to UnifiedAudioMetrics
|
||||||
|
func convertAudioMetricsToUnified(metrics AudioMetrics) UnifiedAudioMetrics {
|
||||||
|
return UnifiedAudioMetrics{
|
||||||
|
FramesReceived: metrics.FramesReceived,
|
||||||
|
FramesDropped: metrics.FramesDropped,
|
||||||
|
FramesSent: 0, // AudioMetrics doesn't have FramesSent
|
||||||
|
BytesProcessed: metrics.BytesProcessed,
|
||||||
|
ConnectionDrops: metrics.ConnectionDrops,
|
||||||
|
LastFrameTime: metrics.LastFrameTime,
|
||||||
|
AverageLatency: metrics.AverageLatency,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// convertAudioInputMetricsToUnified converts AudioInputMetrics to UnifiedAudioMetrics
|
||||||
|
func convertAudioInputMetricsToUnified(metrics AudioInputMetrics) UnifiedAudioMetrics {
|
||||||
|
return UnifiedAudioMetrics{
|
||||||
|
FramesReceived: 0, // AudioInputMetrics doesn't have FramesReceived
|
||||||
|
FramesDropped: metrics.FramesDropped,
|
||||||
|
FramesSent: metrics.FramesSent,
|
||||||
|
BytesProcessed: metrics.BytesProcessed,
|
||||||
|
ConnectionDrops: metrics.ConnectionDrops,
|
||||||
|
LastFrameTime: metrics.LastFrameTime,
|
||||||
|
AverageLatency: metrics.AverageLatency,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// UpdateAudioMetrics updates Prometheus metrics with current audio data
|
// UpdateAudioMetrics updates Prometheus metrics with current audio data
|
||||||
func UpdateAudioMetrics(metrics AudioMetrics) {
|
func UpdateAudioMetrics(metrics UnifiedAudioMetrics) {
|
||||||
oldReceived := atomic.SwapInt64(&audioFramesReceivedValue, metrics.FramesReceived)
|
oldReceived := atomic.SwapInt64(&audioFramesReceivedValue, metrics.FramesReceived)
|
||||||
if metrics.FramesReceived > oldReceived {
|
if metrics.FramesReceived > oldReceived {
|
||||||
audioFramesReceivedTotal.Add(float64(metrics.FramesReceived - oldReceived))
|
audioFramesReceivedTotal.Add(float64(metrics.FramesReceived - oldReceived))
|
||||||
|
@ -333,7 +370,7 @@ func UpdateAudioMetrics(metrics AudioMetrics) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateMicrophoneMetrics updates Prometheus metrics with current microphone data
|
// UpdateMicrophoneMetrics updates Prometheus metrics with current microphone data
|
||||||
func UpdateMicrophoneMetrics(metrics AudioInputMetrics) {
|
func UpdateMicrophoneMetrics(metrics UnifiedAudioMetrics) {
|
||||||
oldSent := atomic.SwapInt64(&micFramesSentValue, metrics.FramesSent)
|
oldSent := atomic.SwapInt64(&micFramesSentValue, metrics.FramesSent)
|
||||||
if metrics.FramesSent > oldSent {
|
if metrics.FramesSent > oldSent {
|
||||||
microphoneFramesSentTotal.Add(float64(metrics.FramesSent - oldSent))
|
microphoneFramesSentTotal.Add(float64(metrics.FramesSent - oldSent))
|
||||||
|
@ -457,11 +494,11 @@ func StartMetricsUpdater() {
|
||||||
for range ticker.C {
|
for range ticker.C {
|
||||||
// Update audio output metrics
|
// Update audio output metrics
|
||||||
audioMetrics := GetAudioMetrics()
|
audioMetrics := GetAudioMetrics()
|
||||||
UpdateAudioMetrics(audioMetrics)
|
UpdateAudioMetrics(convertAudioMetricsToUnified(audioMetrics))
|
||||||
|
|
||||||
// Update microphone input metrics
|
// Update microphone input metrics
|
||||||
micMetrics := GetAudioInputMetrics()
|
micMetrics := GetAudioInputMetrics()
|
||||||
UpdateMicrophoneMetrics(micMetrics)
|
UpdateMicrophoneMetrics(convertAudioInputMetricsToUnified(micMetrics))
|
||||||
|
|
||||||
// Update microphone subprocess process metrics
|
// Update microphone subprocess process metrics
|
||||||
if inputSupervisor := GetAudioInputIPCSupervisor(); inputSupervisor != nil {
|
if inputSupervisor := GetAudioInputIPCSupervisor(); inputSupervisor != nil {
|
||||||
|
|
|
@ -9,6 +9,8 @@ import { useMicrophone } from "@/hooks/useMicrophone";
|
||||||
import { useAudioLevel } from "@/hooks/useAudioLevel";
|
import { useAudioLevel } from "@/hooks/useAudioLevel";
|
||||||
import { useAudioEvents } from "@/hooks/useAudioEvents";
|
import { useAudioEvents } from "@/hooks/useAudioEvents";
|
||||||
import api from "@/api";
|
import api from "@/api";
|
||||||
|
import { AUDIO_CONFIG } from "@/config/constants";
|
||||||
|
import audioQualityService from "@/services/audioQualityService";
|
||||||
|
|
||||||
interface AudioMetrics {
|
interface AudioMetrics {
|
||||||
frames_received: number;
|
frames_received: number;
|
||||||
|
@ -44,12 +46,8 @@ interface AudioConfig {
|
||||||
FrameSize: string;
|
FrameSize: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
const qualityLabels = {
|
// Quality labels will be managed by the audio quality service
|
||||||
0: "Low",
|
const getQualityLabels = () => audioQualityService.getQualityLabels();
|
||||||
1: "Medium",
|
|
||||||
2: "High",
|
|
||||||
3: "Ultra"
|
|
||||||
};
|
|
||||||
|
|
||||||
// Format percentage values to 2 decimal places
|
// Format percentage values to 2 decimal places
|
||||||
function formatPercentage(value: number | null | undefined): string {
|
function formatPercentage(value: number | null | undefined): string {
|
||||||
|
@ -246,22 +244,15 @@ export default function AudioMetricsDashboard() {
|
||||||
|
|
||||||
const loadAudioConfig = async () => {
|
const loadAudioConfig = async () => {
|
||||||
try {
|
try {
|
||||||
// Load config
|
// Use centralized audio quality service
|
||||||
const configResp = await api.GET("/audio/quality");
|
const { audio, microphone } = await audioQualityService.loadAllConfigurations();
|
||||||
if (configResp.ok) {
|
|
||||||
const configData = await configResp.json();
|
if (audio) {
|
||||||
setConfig(configData.current);
|
setConfig(audio.current);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load microphone config
|
if (microphone) {
|
||||||
try {
|
setMicrophoneConfig(microphone.current);
|
||||||
const micConfigResp = await api.GET("/microphone/quality");
|
|
||||||
if (micConfigResp.ok) {
|
|
||||||
const micConfigData = await micConfigResp.json();
|
|
||||||
setMicrophoneConfig(micConfigData.current);
|
|
||||||
}
|
|
||||||
} catch {
|
|
||||||
// Microphone config not available
|
|
||||||
}
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error("Failed to load audio config:", error);
|
console.error("Failed to load audio config:", error);
|
||||||
|
@ -397,7 +388,7 @@ export default function AudioMetricsDashboard() {
|
||||||
|
|
||||||
const getDropRate = () => {
|
const getDropRate = () => {
|
||||||
if (!metrics || metrics.frames_received === 0) return 0;
|
if (!metrics || metrics.frames_received === 0) return 0;
|
||||||
return ((metrics.frames_dropped / metrics.frames_received) * 100);
|
return ((metrics.frames_dropped / metrics.frames_received) * AUDIO_CONFIG.PERCENTAGE_MULTIPLIER);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -449,7 +440,7 @@ export default function AudioMetricsDashboard() {
|
||||||
<div className="flex justify-between">
|
<div className="flex justify-between">
|
||||||
<span className="text-slate-500 dark:text-slate-400">Quality:</span>
|
<span className="text-slate-500 dark:text-slate-400">Quality:</span>
|
||||||
<span className={cx("font-medium", getQualityColor(config.Quality))}>
|
<span className={cx("font-medium", getQualityColor(config.Quality))}>
|
||||||
{qualityLabels[config.Quality as keyof typeof qualityLabels]}
|
{getQualityLabels()[config.Quality]}
|
||||||
</span>
|
</span>
|
||||||
</div>
|
</div>
|
||||||
<div className="flex justify-between">
|
<div className="flex justify-between">
|
||||||
|
@ -486,7 +477,7 @@ export default function AudioMetricsDashboard() {
|
||||||
<div className="flex justify-between">
|
<div className="flex justify-between">
|
||||||
<span className="text-slate-500 dark:text-slate-400">Quality:</span>
|
<span className="text-slate-500 dark:text-slate-400">Quality:</span>
|
||||||
<span className={cx("font-medium", getQualityColor(microphoneConfig.Quality))}>
|
<span className={cx("font-medium", getQualityColor(microphoneConfig.Quality))}>
|
||||||
{qualityLabels[microphoneConfig.Quality as keyof typeof qualityLabels]}
|
{getQualityLabels()[microphoneConfig.Quality]}
|
||||||
</span>
|
</span>
|
||||||
</div>
|
</div>
|
||||||
<div className="flex justify-between">
|
<div className="flex justify-between">
|
||||||
|
@ -668,26 +659,26 @@ export default function AudioMetricsDashboard() {
|
||||||
</span>
|
</span>
|
||||||
<span className={cx(
|
<span className={cx(
|
||||||
"font-bold",
|
"font-bold",
|
||||||
getDropRate() > 5
|
getDropRate() > AUDIO_CONFIG.DROP_RATE_CRITICAL_THRESHOLD
|
||||||
? "text-red-600 dark:text-red-400"
|
? "text-red-600 dark:text-red-400"
|
||||||
: getDropRate() > 1
|
: getDropRate() > AUDIO_CONFIG.DROP_RATE_WARNING_THRESHOLD
|
||||||
? "text-yellow-600 dark:text-yellow-400"
|
? "text-yellow-600 dark:text-yellow-400"
|
||||||
: "text-green-600 dark:text-green-400"
|
: "text-green-600 dark:text-green-400"
|
||||||
)}>
|
)}>
|
||||||
{getDropRate().toFixed(2)}%
|
{getDropRate().toFixed(AUDIO_CONFIG.PERCENTAGE_DECIMAL_PLACES)}%
|
||||||
</span>
|
</span>
|
||||||
</div>
|
</div>
|
||||||
<div className="mt-1 h-2 w-full rounded-full bg-slate-200 dark:bg-slate-600">
|
<div className="mt-1 h-2 w-full rounded-full bg-slate-200 dark:bg-slate-600">
|
||||||
<div
|
<div
|
||||||
className={cx(
|
className={cx(
|
||||||
"h-2 rounded-full transition-all duration-300",
|
"h-2 rounded-full transition-all duration-300",
|
||||||
getDropRate() > 5
|
getDropRate() > AUDIO_CONFIG.DROP_RATE_CRITICAL_THRESHOLD
|
||||||
? "bg-red-500"
|
? "bg-red-500"
|
||||||
: getDropRate() > 1
|
: getDropRate() > AUDIO_CONFIG.DROP_RATE_WARNING_THRESHOLD
|
||||||
? "bg-yellow-500"
|
? "bg-yellow-500"
|
||||||
: "bg-green-500"
|
: "bg-green-500"
|
||||||
)}
|
)}
|
||||||
style={{ width: `${Math.min(getDropRate(), 100)}%` }}
|
style={{ width: `${Math.min(getDropRate(), AUDIO_CONFIG.MAX_LEVEL_PERCENTAGE)}%` }}
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
@ -734,27 +725,27 @@ export default function AudioMetricsDashboard() {
|
||||||
</span>
|
</span>
|
||||||
<span className={cx(
|
<span className={cx(
|
||||||
"font-bold",
|
"font-bold",
|
||||||
(microphoneMetrics.frames_sent > 0 ? (microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * 100 : 0) > 5
|
(microphoneMetrics.frames_sent > 0 ? (microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * AUDIO_CONFIG.PERCENTAGE_MULTIPLIER : 0) > AUDIO_CONFIG.DROP_RATE_CRITICAL_THRESHOLD
|
||||||
? "text-red-600 dark:text-red-400"
|
? "text-red-600 dark:text-red-400"
|
||||||
: (microphoneMetrics.frames_sent > 0 ? (microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * 100 : 0) > 1
|
: (microphoneMetrics.frames_sent > 0 ? (microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * AUDIO_CONFIG.PERCENTAGE_MULTIPLIER : 0) > AUDIO_CONFIG.DROP_RATE_WARNING_THRESHOLD
|
||||||
? "text-yellow-600 dark:text-yellow-400"
|
? "text-yellow-600 dark:text-yellow-400"
|
||||||
: "text-green-600 dark:text-green-400"
|
: "text-green-600 dark:text-green-400"
|
||||||
)}>
|
)}>
|
||||||
{microphoneMetrics.frames_sent > 0 ? ((microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * 100).toFixed(2) : "0.00"}%
|
{microphoneMetrics.frames_sent > 0 ? ((microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * AUDIO_CONFIG.PERCENTAGE_MULTIPLIER).toFixed(AUDIO_CONFIG.PERCENTAGE_DECIMAL_PLACES) : "0.00"}%
|
||||||
</span>
|
</span>
|
||||||
</div>
|
</div>
|
||||||
<div className="mt-1 h-2 w-full rounded-full bg-slate-200 dark:bg-slate-600">
|
<div className="mt-1 h-2 w-full rounded-full bg-slate-200 dark:bg-slate-600">
|
||||||
<div
|
<div
|
||||||
className={cx(
|
className={cx(
|
||||||
"h-2 rounded-full transition-all duration-300",
|
"h-2 rounded-full transition-all duration-300",
|
||||||
(microphoneMetrics.frames_sent > 0 ? (microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * 100 : 0) > 5
|
(microphoneMetrics.frames_sent > 0 ? (microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * AUDIO_CONFIG.PERCENTAGE_MULTIPLIER : 0) > AUDIO_CONFIG.DROP_RATE_CRITICAL_THRESHOLD
|
||||||
? "bg-red-500"
|
? "bg-red-500"
|
||||||
: (microphoneMetrics.frames_sent > 0 ? (microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * 100 : 0) > 1
|
: (microphoneMetrics.frames_sent > 0 ? (microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * AUDIO_CONFIG.PERCENTAGE_MULTIPLIER : 0) > AUDIO_CONFIG.DROP_RATE_WARNING_THRESHOLD
|
||||||
? "bg-yellow-500"
|
? "bg-yellow-500"
|
||||||
: "bg-green-500"
|
: "bg-green-500"
|
||||||
)}
|
)}
|
||||||
style={{
|
style={{
|
||||||
width: `${Math.min(microphoneMetrics.frames_sent > 0 ? (microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * 100 : 0, 100)}%`
|
width: `${Math.min(microphoneMetrics.frames_sent > 0 ? (microphoneMetrics.frames_dropped / microphoneMetrics.frames_sent) * AUDIO_CONFIG.PERCENTAGE_MULTIPLIER : 0, AUDIO_CONFIG.MAX_LEVEL_PERCENTAGE)}%`
|
||||||
}}
|
}}
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
|
|
|
@ -11,6 +11,8 @@ import { useAudioLevel } from "@/hooks/useAudioLevel";
|
||||||
import { useAudioEvents } from "@/hooks/useAudioEvents";
|
import { useAudioEvents } from "@/hooks/useAudioEvents";
|
||||||
import api from "@/api";
|
import api from "@/api";
|
||||||
import notifications from "@/notifications";
|
import notifications from "@/notifications";
|
||||||
|
import { AUDIO_CONFIG } from "@/config/constants";
|
||||||
|
import audioQualityService from "@/services/audioQualityService";
|
||||||
|
|
||||||
// Type for microphone error
|
// Type for microphone error
|
||||||
interface MicrophoneError {
|
interface MicrophoneError {
|
||||||
|
@ -41,12 +43,8 @@ interface AudioConfig {
|
||||||
FrameSize: string;
|
FrameSize: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
const qualityLabels = {
|
// Quality labels will be managed by the audio quality service
|
||||||
0: "Low (32kbps)",
|
const getQualityLabels = () => audioQualityService.getQualityLabels();
|
||||||
1: "Medium (64kbps)",
|
|
||||||
2: "High (128kbps)",
|
|
||||||
3: "Ultra (256kbps)"
|
|
||||||
};
|
|
||||||
|
|
||||||
interface AudioControlPopoverProps {
|
interface AudioControlPopoverProps {
|
||||||
microphone: MicrophoneHookReturn;
|
microphone: MicrophoneHookReturn;
|
||||||
|
@ -138,20 +136,15 @@ export default function AudioControlPopover({ microphone, open }: AudioControlPo
|
||||||
|
|
||||||
const loadAudioConfigurations = async () => {
|
const loadAudioConfigurations = async () => {
|
||||||
try {
|
try {
|
||||||
// Parallel loading for better performance
|
// Use centralized audio quality service
|
||||||
const [qualityResp, micQualityResp] = await Promise.all([
|
const { audio, microphone } = await audioQualityService.loadAllConfigurations();
|
||||||
api.GET("/audio/quality"),
|
|
||||||
api.GET("/microphone/quality")
|
|
||||||
]);
|
|
||||||
|
|
||||||
if (qualityResp.ok) {
|
if (audio) {
|
||||||
const qualityData = await qualityResp.json();
|
setCurrentConfig(audio.current);
|
||||||
setCurrentConfig(qualityData.current);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (micQualityResp.ok) {
|
if (microphone) {
|
||||||
const micQualityData = await micQualityResp.json();
|
setCurrentMicrophoneConfig(microphone.current);
|
||||||
setCurrentMicrophoneConfig(micQualityData.current);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
setConfigsLoaded(true);
|
setConfigsLoaded(true);
|
||||||
|
@ -511,7 +504,7 @@ export default function AudioControlPopover({ microphone, open }: AudioControlPo
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div className="grid grid-cols-2 gap-2">
|
<div className="grid grid-cols-2 gap-2">
|
||||||
{Object.entries(qualityLabels).map(([quality, label]) => (
|
{Object.entries(getQualityLabels()).map(([quality, label]) => (
|
||||||
<button
|
<button
|
||||||
key={`mic-${quality}`}
|
key={`mic-${quality}`}
|
||||||
onClick={() => handleMicrophoneQualityChange(parseInt(quality))}
|
onClick={() => handleMicrophoneQualityChange(parseInt(quality))}
|
||||||
|
@ -552,7 +545,7 @@ export default function AudioControlPopover({ microphone, open }: AudioControlPo
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div className="grid grid-cols-2 gap-2">
|
<div className="grid grid-cols-2 gap-2">
|
||||||
{Object.entries(qualityLabels).map(([quality, label]) => (
|
{Object.entries(getQualityLabels()).map(([quality, label]) => (
|
||||||
<button
|
<button
|
||||||
key={quality}
|
key={quality}
|
||||||
onClick={() => handleQualityChange(parseInt(quality))}
|
onClick={() => handleQualityChange(parseInt(quality))}
|
||||||
|
@ -704,13 +697,13 @@ export default function AudioControlPopover({ microphone, open }: AudioControlPo
|
||||||
<div className="text-xs text-slate-500 dark:text-slate-400">Drop Rate</div>
|
<div className="text-xs text-slate-500 dark:text-slate-400">Drop Rate</div>
|
||||||
<div className={cx(
|
<div className={cx(
|
||||||
"font-mono text-sm",
|
"font-mono text-sm",
|
||||||
((metrics.frames_dropped / metrics.frames_received) * 100) > 5
|
((metrics.frames_dropped / metrics.frames_received) * AUDIO_CONFIG.PERCENTAGE_MULTIPLIER) > AUDIO_CONFIG.DROP_RATE_CRITICAL_THRESHOLD
|
||||||
? "text-red-600 dark:text-red-400"
|
? "text-red-600 dark:text-red-400"
|
||||||
: ((metrics.frames_dropped / metrics.frames_received) * 100) > 1
|
: ((metrics.frames_dropped / metrics.frames_received) * AUDIO_CONFIG.PERCENTAGE_MULTIPLIER) > AUDIO_CONFIG.DROP_RATE_WARNING_THRESHOLD
|
||||||
? "text-yellow-600 dark:text-yellow-400"
|
? "text-yellow-600 dark:text-yellow-400"
|
||||||
: "text-green-600 dark:text-green-400"
|
: "text-green-600 dark:text-green-400"
|
||||||
)}>
|
)}>
|
||||||
{((metrics.frames_dropped / metrics.frames_received) * 100).toFixed(2)}%
|
{((metrics.frames_dropped / metrics.frames_received) * AUDIO_CONFIG.PERCENTAGE_MULTIPLIER).toFixed(AUDIO_CONFIG.PERCENTAGE_DECIMAL_PLACES)}%
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
|
|
|
@ -0,0 +1,167 @@
|
||||||
|
// Centralized configuration constants
|
||||||
|
|
||||||
|
// Network and API Configuration
|
||||||
|
export const NETWORK_CONFIG = {
|
||||||
|
WEBSOCKET_RECONNECT_INTERVAL: 3000,
|
||||||
|
LONG_PRESS_DURATION: 3000,
|
||||||
|
ERROR_MESSAGE_TIMEOUT: 3000,
|
||||||
|
AUDIO_TEST_DURATION: 5000,
|
||||||
|
BACKEND_RETRY_DELAY: 500,
|
||||||
|
RESET_DELAY: 200,
|
||||||
|
STATE_CHECK_DELAY: 100,
|
||||||
|
VERIFICATION_DELAY: 1000,
|
||||||
|
} as const;
|
||||||
|
|
||||||
|
// Default URLs and Endpoints
|
||||||
|
export const DEFAULT_URLS = {
|
||||||
|
JETKVM_PROD_API: "https://api.jetkvm.com",
|
||||||
|
JETKVM_PROD_APP: "https://app.jetkvm.com",
|
||||||
|
JETKVM_DOCS_TROUBLESHOOTING: "https://jetkvm.com/docs/getting-started/troubleshooting",
|
||||||
|
JETKVM_DOCS_REMOTE_ACCESS: "https://jetkvm.com/docs/networking/remote-access",
|
||||||
|
JETKVM_DOCS_LOCAL_ACCESS_RESET: "https://jetkvm.com/docs/networking/local-access#reset-password",
|
||||||
|
JETKVM_GITHUB: "https://github.com/jetkvm",
|
||||||
|
CRONTAB_GURU: "https://crontab.guru/examples.html",
|
||||||
|
} as const;
|
||||||
|
|
||||||
|
// Sample ISO URLs for mounting
|
||||||
|
export const SAMPLE_ISOS = {
|
||||||
|
UBUNTU_24_04: {
|
||||||
|
name: "Ubuntu 24.04.2 Desktop",
|
||||||
|
url: "https://releases.ubuntu.com/24.04.2/ubuntu-24.04.2-desktop-amd64.iso",
|
||||||
|
},
|
||||||
|
DEBIAN_13: {
|
||||||
|
name: "Debian 13.0.0 (Testing)",
|
||||||
|
url: "https://cdimage.debian.org/debian-cd/current/amd64/iso-cd/debian-13.0.0-amd64-netinst.iso",
|
||||||
|
},
|
||||||
|
DEBIAN_12: {
|
||||||
|
name: "Debian 12.11.0 (Stable)",
|
||||||
|
url: "https://cdimage.debian.org/mirror/cdimage/archive/12.11.0/amd64/iso-cd/debian-12.11.0-amd64-netinst.iso",
|
||||||
|
},
|
||||||
|
FEDORA_41: {
|
||||||
|
name: "Fedora 41 Workstation",
|
||||||
|
url: "https://download.fedoraproject.org/pub/fedora/linux/releases/41/Workstation/x86_64/iso/Fedora-Workstation-Live-x86_64-41-1.4.iso",
|
||||||
|
},
|
||||||
|
OPENSUSE_LEAP: {
|
||||||
|
name: "openSUSE Leap 15.6",
|
||||||
|
url: "https://download.opensuse.org/distribution/leap/15.6/iso/openSUSE-Leap-15.6-NET-x86_64-Media.iso",
|
||||||
|
},
|
||||||
|
OPENSUSE_TUMBLEWEED: {
|
||||||
|
name: "openSUSE Tumbleweed",
|
||||||
|
url: "https://download.opensuse.org/tumbleweed/iso/openSUSE-Tumbleweed-NET-x86_64-Current.iso",
|
||||||
|
},
|
||||||
|
ARCH_LINUX: {
|
||||||
|
name: "Arch Linux",
|
||||||
|
url: "https://archlinux.doridian.net/iso/2025.02.01/archlinux-2025.02.01-x86_64.iso",
|
||||||
|
},
|
||||||
|
NETBOOT_XYZ: {
|
||||||
|
name: "netboot.xyz",
|
||||||
|
url: "https://boot.netboot.xyz/ipxe/netboot.xyz.iso",
|
||||||
|
},
|
||||||
|
} as const;
|
||||||
|
|
||||||
|
// Security and Access Configuration
|
||||||
|
export const SECURITY_CONFIG = {
|
||||||
|
LOCALHOST_ONLY_IP: "127.0.0.1",
|
||||||
|
LOCALHOST_HOSTNAME: "localhost",
|
||||||
|
HTTPS_PROTOCOL: "https:",
|
||||||
|
} as const;
|
||||||
|
|
||||||
|
// Default Hardware Configuration
|
||||||
|
export const HARDWARE_CONFIG = {
|
||||||
|
DEFAULT_OFF_AFTER: 50000,
|
||||||
|
SAMPLE_EDID: "00FFFFFFFFFFFF00047265058A3F6101101E0104A53420783FC125A8554EA0260D5054BFEF80714F8140818081C081008B009500B300283C80A070B023403020360006442100001A000000FD00304C575716010A202020202020000000FC0042323436574C0A202020202020000000FF0054384E4545303033383532320A01F802031CF14F90020304050607011112131415161F2309070783010000011D8018711C1620582C250006442100009E011D007251D01E206E28550006442100001E8C0AD08A20E02D10103E9600064421000018C344806E70B028401720A80406442100001E00000000000000000000000000000000000000000000000000000096",
|
||||||
|
} as const;
|
||||||
|
|
||||||
|
// Audio Configuration
|
||||||
|
export const AUDIO_CONFIG = {
|
||||||
|
// Audio Level Analysis
|
||||||
|
LEVEL_UPDATE_INTERVAL: 100, // ms - throttle audio level updates for performance
|
||||||
|
FFT_SIZE: 128, // reduced from 256 for better performance
|
||||||
|
SMOOTHING_TIME_CONSTANT: 0.8,
|
||||||
|
RELEVANT_FREQUENCY_BINS: 32, // focus on lower frequencies for voice
|
||||||
|
RMS_SCALING_FACTOR: 180, // for converting RMS to percentage
|
||||||
|
MAX_LEVEL_PERCENTAGE: 100,
|
||||||
|
|
||||||
|
// Microphone Configuration
|
||||||
|
SAMPLE_RATE: 48000, // Hz - high quality audio sampling
|
||||||
|
CHANNEL_COUNT: 1, // mono for microphone input
|
||||||
|
OPERATION_DEBOUNCE_MS: 1000, // debounce microphone operations
|
||||||
|
SYNC_DEBOUNCE_MS: 1000, // debounce state synchronization
|
||||||
|
AUDIO_TEST_TIMEOUT: 100, // ms - timeout for audio testing
|
||||||
|
|
||||||
|
// Audio Output Quality Bitrates (matching backend config_constants.go)
|
||||||
|
OUTPUT_QUALITY_BITRATES: {
|
||||||
|
LOW: 32, // AudioQualityLowOutputBitrate
|
||||||
|
MEDIUM: 64, // AudioQualityMediumOutputBitrate
|
||||||
|
HIGH: 128, // AudioQualityHighOutputBitrate
|
||||||
|
ULTRA: 192, // AudioQualityUltraOutputBitrate
|
||||||
|
} as const,
|
||||||
|
// Audio Input Quality Bitrates (matching backend config_constants.go)
|
||||||
|
INPUT_QUALITY_BITRATES: {
|
||||||
|
LOW: 16, // AudioQualityLowInputBitrate
|
||||||
|
MEDIUM: 32, // AudioQualityMediumInputBitrate
|
||||||
|
HIGH: 64, // AudioQualityHighInputBitrate
|
||||||
|
ULTRA: 96, // AudioQualityUltraInputBitrate
|
||||||
|
} as const,
|
||||||
|
// Sample Rates (matching backend config_constants.go)
|
||||||
|
QUALITY_SAMPLE_RATES: {
|
||||||
|
LOW: 22050, // AudioQualityLowSampleRate
|
||||||
|
MEDIUM: 44100, // AudioQualityMediumSampleRate
|
||||||
|
HIGH: 48000, // Default SampleRate
|
||||||
|
ULTRA: 48000, // Default SampleRate
|
||||||
|
} as const,
|
||||||
|
// Microphone Sample Rates
|
||||||
|
MIC_QUALITY_SAMPLE_RATES: {
|
||||||
|
LOW: 16000, // AudioQualityMicLowSampleRate
|
||||||
|
MEDIUM: 44100, // AudioQualityMediumSampleRate
|
||||||
|
HIGH: 48000, // Default SampleRate
|
||||||
|
ULTRA: 48000, // Default SampleRate
|
||||||
|
} as const,
|
||||||
|
// Channels (matching backend config_constants.go)
|
||||||
|
QUALITY_CHANNELS: {
|
||||||
|
LOW: 1, // AudioQualityLowChannels (mono)
|
||||||
|
MEDIUM: 2, // AudioQualityMediumChannels (stereo)
|
||||||
|
HIGH: 2, // AudioQualityHighChannels (stereo)
|
||||||
|
ULTRA: 2, // AudioQualityUltraChannels (stereo)
|
||||||
|
} as const,
|
||||||
|
// Frame Sizes in milliseconds (matching backend config_constants.go)
|
||||||
|
QUALITY_FRAME_SIZES: {
|
||||||
|
LOW: 40, // AudioQualityLowFrameSize (40ms)
|
||||||
|
MEDIUM: 20, // AudioQualityMediumFrameSize (20ms)
|
||||||
|
HIGH: 20, // AudioQualityHighFrameSize (20ms)
|
||||||
|
ULTRA: 10, // AudioQualityUltraFrameSize (10ms)
|
||||||
|
} as const,
|
||||||
|
// Updated Quality Labels with correct output bitrates
|
||||||
|
QUALITY_LABELS: {
|
||||||
|
0: "Low (32 kbps)",
|
||||||
|
1: "Medium (64 kbps)",
|
||||||
|
2: "High (128 kbps)",
|
||||||
|
3: "Ultra (192 kbps)",
|
||||||
|
} as const,
|
||||||
|
// Legacy support - keeping for backward compatibility
|
||||||
|
QUALITY_BITRATES: {
|
||||||
|
LOW: 32,
|
||||||
|
MEDIUM: 64,
|
||||||
|
HIGH: 128,
|
||||||
|
ULTRA: 192, // Updated to match backend
|
||||||
|
},
|
||||||
|
|
||||||
|
// Audio Analysis
|
||||||
|
ANALYSIS_FFT_SIZE: 256, // for detailed audio analysis
|
||||||
|
ANALYSIS_UPDATE_INTERVAL: 100, // ms - 10fps for audio level updates
|
||||||
|
LEVEL_SCALING_FACTOR: 255, // for RMS to percentage conversion
|
||||||
|
|
||||||
|
// Audio Metrics Thresholds
|
||||||
|
DROP_RATE_WARNING_THRESHOLD: 1, // percentage - yellow warning
|
||||||
|
DROP_RATE_CRITICAL_THRESHOLD: 5, // percentage - red critical
|
||||||
|
PERCENTAGE_MULTIPLIER: 100, // for converting ratios to percentages
|
||||||
|
PERCENTAGE_DECIMAL_PLACES: 2, // decimal places for percentage display
|
||||||
|
} as const;
|
||||||
|
|
||||||
|
// Placeholder URLs
|
||||||
|
export const PLACEHOLDERS = {
|
||||||
|
ISO_URL: "https://example.com/image.iso",
|
||||||
|
PROXY_URL: "http://proxy.example.com:8080/",
|
||||||
|
API_URL: "https://api.example.com",
|
||||||
|
APP_URL: "https://app.example.com",
|
||||||
|
} as const;
|
|
@ -7,6 +7,8 @@ import {
|
||||||
MAX_KEYS_PER_STEP,
|
MAX_KEYS_PER_STEP,
|
||||||
} from "@/constants/macros";
|
} from "@/constants/macros";
|
||||||
|
|
||||||
|
import { devWarn } from '../utils/debug';
|
||||||
|
|
||||||
// Define the JsonRpc types for better type checking
|
// Define the JsonRpc types for better type checking
|
||||||
interface JsonRpcResponse {
|
interface JsonRpcResponse {
|
||||||
jsonrpc: string;
|
jsonrpc: string;
|
||||||
|
@ -782,7 +784,7 @@ export const useNetworkStateStore = create<NetworkState>((set, get) => ({
|
||||||
setDhcpLeaseExpiry: (expiry: Date) => {
|
setDhcpLeaseExpiry: (expiry: Date) => {
|
||||||
const lease = get().dhcp_lease;
|
const lease = get().dhcp_lease;
|
||||||
if (!lease) {
|
if (!lease) {
|
||||||
console.warn("No lease found");
|
devWarn("No lease found");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,6 +2,7 @@ import { useNavigate, useParams, NavigateOptions } from "react-router-dom";
|
||||||
import { useCallback, useMemo } from "react";
|
import { useCallback, useMemo } from "react";
|
||||||
|
|
||||||
import { isOnDevice } from "../main";
|
import { isOnDevice } from "../main";
|
||||||
|
import { devError } from '../utils/debug';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generates the correct path based on whether the app is running on device or in cloud mode
|
* Generates the correct path based on whether the app is running on device or in cloud mode
|
||||||
|
@ -21,7 +22,7 @@ export function getDeviceUiPath(path: string, deviceId?: string): string {
|
||||||
return normalizedPath;
|
return normalizedPath;
|
||||||
} else {
|
} else {
|
||||||
if (!deviceId) {
|
if (!deviceId) {
|
||||||
console.error("No device ID provided when generating path in cloud mode");
|
devError("No device ID provided when generating path in cloud mode");
|
||||||
throw new Error("Device ID is required for cloud mode path generation");
|
throw new Error("Device ID is required for cloud mode path generation");
|
||||||
}
|
}
|
||||||
return `/devices/${deviceId}${normalizedPath}`;
|
return `/devices/${deviceId}${normalizedPath}`;
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
import { useState, useEffect, useCallback } from 'react';
|
import { useState, useEffect, useCallback } from 'react';
|
||||||
|
|
||||||
|
import { devError } from '../utils/debug';
|
||||||
|
|
||||||
export interface AudioDevice {
|
export interface AudioDevice {
|
||||||
deviceId: string;
|
deviceId: string;
|
||||||
label: string;
|
label: string;
|
||||||
|
@ -66,7 +68,7 @@ export function useAudioDevices(): UseAudioDevicesReturn {
|
||||||
// Audio devices enumerated
|
// Audio devices enumerated
|
||||||
|
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
console.error('Failed to enumerate audio devices:', err);
|
devError('Failed to enumerate audio devices:', err);
|
||||||
setError(err instanceof Error ? err.message : 'Failed to access audio devices');
|
setError(err instanceof Error ? err.message : 'Failed to access audio devices');
|
||||||
} finally {
|
} finally {
|
||||||
setIsLoading(false);
|
setIsLoading(false);
|
||||||
|
|
|
@ -1,6 +1,9 @@
|
||||||
import { useCallback, useEffect, useRef, useState } from 'react';
|
import { useCallback, useEffect, useRef, useState } from 'react';
|
||||||
import useWebSocket, { ReadyState } from 'react-use-websocket';
|
import useWebSocket, { ReadyState } from 'react-use-websocket';
|
||||||
|
|
||||||
|
import { devError, devWarn } from '../utils/debug';
|
||||||
|
import { NETWORK_CONFIG } from '../config/constants';
|
||||||
|
|
||||||
// Audio event types matching the backend
|
// Audio event types matching the backend
|
||||||
export type AudioEventType =
|
export type AudioEventType =
|
||||||
| 'audio-mute-changed'
|
| 'audio-mute-changed'
|
||||||
|
@ -121,7 +124,7 @@ export function useAudioEvents(onAudioDeviceChanged?: (data: AudioDeviceChangedD
|
||||||
} = useWebSocket(getWebSocketUrl(), {
|
} = useWebSocket(getWebSocketUrl(), {
|
||||||
shouldReconnect: () => true,
|
shouldReconnect: () => true,
|
||||||
reconnectAttempts: 10,
|
reconnectAttempts: 10,
|
||||||
reconnectInterval: 3000,
|
reconnectInterval: NETWORK_CONFIG.WEBSOCKET_RECONNECT_INTERVAL,
|
||||||
share: true, // Share the WebSocket connection across multiple hooks
|
share: true, // Share the WebSocket connection across multiple hooks
|
||||||
onOpen: () => {
|
onOpen: () => {
|
||||||
// WebSocket connected
|
// WebSocket connected
|
||||||
|
@ -137,7 +140,7 @@ export function useAudioEvents(onAudioDeviceChanged?: (data: AudioDeviceChangedD
|
||||||
globalSubscriptionState.connectionId = null;
|
globalSubscriptionState.connectionId = null;
|
||||||
},
|
},
|
||||||
onError: (event) => {
|
onError: (event) => {
|
||||||
console.error('[AudioEvents] WebSocket error:', event);
|
devError('[AudioEvents] WebSocket error:', event);
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -270,7 +273,7 @@ export function useAudioEvents(onAudioDeviceChanged?: (data: AudioDeviceChangedD
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// Ignore parsing errors for non-JSON messages (like "pong")
|
// Ignore parsing errors for non-JSON messages (like "pong")
|
||||||
if (lastMessage.data !== 'pong') {
|
if (lastMessage.data !== 'pong') {
|
||||||
console.warn('[AudioEvents] Failed to parse WebSocket message:', error);
|
devWarn('[AudioEvents] Failed to parse WebSocket message:', error);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
import { useEffect, useRef, useState } from 'react';
|
import { useEffect, useRef, useState } from 'react';
|
||||||
|
|
||||||
|
import { AUDIO_CONFIG } from '@/config/constants';
|
||||||
|
|
||||||
interface AudioLevelHookResult {
|
interface AudioLevelHookResult {
|
||||||
audioLevel: number; // 0-100 percentage
|
audioLevel: number; // 0-100 percentage
|
||||||
isAnalyzing: boolean;
|
isAnalyzing: boolean;
|
||||||
|
@ -7,14 +9,14 @@ interface AudioLevelHookResult {
|
||||||
|
|
||||||
interface AudioLevelOptions {
|
interface AudioLevelOptions {
|
||||||
enabled?: boolean; // Allow external control of analysis
|
enabled?: boolean; // Allow external control of analysis
|
||||||
updateInterval?: number; // Throttle updates (default: 100ms for 10fps instead of 60fps)
|
updateInterval?: number; // Throttle updates (default from AUDIO_CONFIG)
|
||||||
}
|
}
|
||||||
|
|
||||||
export const useAudioLevel = (
|
export const useAudioLevel = (
|
||||||
stream: MediaStream | null,
|
stream: MediaStream | null,
|
||||||
options: AudioLevelOptions = {}
|
options: AudioLevelOptions = {}
|
||||||
): AudioLevelHookResult => {
|
): AudioLevelHookResult => {
|
||||||
const { enabled = true, updateInterval = 100 } = options;
|
const { enabled = true, updateInterval = AUDIO_CONFIG.LEVEL_UPDATE_INTERVAL } = options;
|
||||||
|
|
||||||
const [audioLevel, setAudioLevel] = useState(0);
|
const [audioLevel, setAudioLevel] = useState(0);
|
||||||
const [isAnalyzing, setIsAnalyzing] = useState(false);
|
const [isAnalyzing, setIsAnalyzing] = useState(false);
|
||||||
|
@ -59,8 +61,8 @@ export const useAudioLevel = (
|
||||||
const source = audioContext.createMediaStreamSource(stream);
|
const source = audioContext.createMediaStreamSource(stream);
|
||||||
|
|
||||||
// Configure analyser - use smaller FFT for better performance
|
// Configure analyser - use smaller FFT for better performance
|
||||||
analyser.fftSize = 128; // Reduced from 256 for better performance
|
analyser.fftSize = AUDIO_CONFIG.FFT_SIZE;
|
||||||
analyser.smoothingTimeConstant = 0.8;
|
analyser.smoothingTimeConstant = AUDIO_CONFIG.SMOOTHING_TIME_CONSTANT;
|
||||||
|
|
||||||
// Connect nodes
|
// Connect nodes
|
||||||
source.connect(analyser);
|
source.connect(analyser);
|
||||||
|
@ -87,7 +89,7 @@ export const useAudioLevel = (
|
||||||
|
|
||||||
// Optimized RMS calculation - process only relevant frequency bands
|
// Optimized RMS calculation - process only relevant frequency bands
|
||||||
let sum = 0;
|
let sum = 0;
|
||||||
const relevantBins = Math.min(dataArray.length, 32); // Focus on lower frequencies for voice
|
const relevantBins = Math.min(dataArray.length, AUDIO_CONFIG.RELEVANT_FREQUENCY_BINS);
|
||||||
for (let i = 0; i < relevantBins; i++) {
|
for (let i = 0; i < relevantBins; i++) {
|
||||||
const value = dataArray[i];
|
const value = dataArray[i];
|
||||||
sum += value * value;
|
sum += value * value;
|
||||||
|
@ -95,7 +97,7 @@ export const useAudioLevel = (
|
||||||
const rms = Math.sqrt(sum / relevantBins);
|
const rms = Math.sqrt(sum / relevantBins);
|
||||||
|
|
||||||
// Convert to percentage (0-100) with better scaling
|
// Convert to percentage (0-100) with better scaling
|
||||||
const level = Math.min(100, Math.max(0, (rms / 180) * 100)); // Adjusted scaling for better sensitivity
|
const level = Math.min(AUDIO_CONFIG.MAX_LEVEL_PERCENTAGE, Math.max(0, (rms / AUDIO_CONFIG.RMS_SCALING_FACTOR) * AUDIO_CONFIG.MAX_LEVEL_PERCENTAGE));
|
||||||
setAudioLevel(Math.round(level));
|
setAudioLevel(Math.round(level));
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -2,6 +2,8 @@ import { useCallback, useEffect } from "react";
|
||||||
|
|
||||||
import { useRTCStore } from "@/hooks/stores";
|
import { useRTCStore } from "@/hooks/stores";
|
||||||
|
|
||||||
|
import { devError } from '../utils/debug';
|
||||||
|
|
||||||
export interface JsonRpcRequest {
|
export interface JsonRpcRequest {
|
||||||
jsonrpc: string;
|
jsonrpc: string;
|
||||||
method: string;
|
method: string;
|
||||||
|
@ -61,7 +63,7 @@ export function useJsonRpc(onRequest?: (payload: JsonRpcRequest) => void) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ("error" in payload) console.error(payload.error);
|
if ("error" in payload) devError(payload.error);
|
||||||
if (!payload.id) return;
|
if (!payload.id) return;
|
||||||
|
|
||||||
const callback = callbackStore.get(payload.id);
|
const callback = callbackStore.get(payload.id);
|
||||||
|
|
|
@ -2,6 +2,8 @@ import { useCallback, useEffect, useRef, useState } from "react";
|
||||||
|
|
||||||
import { useRTCStore } from "@/hooks/stores";
|
import { useRTCStore } from "@/hooks/stores";
|
||||||
import api from "@/api";
|
import api from "@/api";
|
||||||
|
import { devLog, devInfo, devWarn, devError, devOnly } from "@/utils/debug";
|
||||||
|
import { NETWORK_CONFIG, AUDIO_CONFIG } from "@/config/constants";
|
||||||
|
|
||||||
export interface MicrophoneError {
|
export interface MicrophoneError {
|
||||||
type: 'permission' | 'device' | 'network' | 'unknown';
|
type: 'permission' | 'device' | 'network' | 'unknown';
|
||||||
|
@ -31,15 +33,14 @@ export function useMicrophone() {
|
||||||
// Add debouncing refs to prevent rapid operations
|
// Add debouncing refs to prevent rapid operations
|
||||||
const lastOperationRef = useRef<number>(0);
|
const lastOperationRef = useRef<number>(0);
|
||||||
const operationTimeoutRef = useRef<number | null>(null);
|
const operationTimeoutRef = useRef<number | null>(null);
|
||||||
const OPERATION_DEBOUNCE_MS = 1000; // 1 second debounce
|
|
||||||
|
|
||||||
// Debounced operation wrapper
|
// Debounced operation wrapper
|
||||||
const debouncedOperation = useCallback((operation: () => Promise<void>, operationType: string) => {
|
const debouncedOperation = useCallback((operation: () => Promise<void>, operationType: string) => {
|
||||||
const now = Date.now();
|
const now = Date.now();
|
||||||
const timeSinceLastOp = now - lastOperationRef.current;
|
const timeSinceLastOp = now - lastOperationRef.current;
|
||||||
|
|
||||||
if (timeSinceLastOp < OPERATION_DEBOUNCE_MS) {
|
if (timeSinceLastOp < AUDIO_CONFIG.OPERATION_DEBOUNCE_MS) {
|
||||||
console.log(`Debouncing ${operationType} operation - too soon (${timeSinceLastOp}ms since last)`);
|
devLog(`Debouncing ${operationType} operation - too soon (${timeSinceLastOp}ms since last)`);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -51,7 +52,7 @@ export function useMicrophone() {
|
||||||
|
|
||||||
lastOperationRef.current = now;
|
lastOperationRef.current = now;
|
||||||
operation().catch(error => {
|
operation().catch(error => {
|
||||||
console.error(`Debounced ${operationType} operation failed:`, error);
|
devError(`Debounced ${operationType} operation failed:`, error);
|
||||||
});
|
});
|
||||||
}, []);
|
}, []);
|
||||||
|
|
||||||
|
@ -72,7 +73,7 @@ export function useMicrophone() {
|
||||||
try {
|
try {
|
||||||
await microphoneSender.replaceTrack(null);
|
await microphoneSender.replaceTrack(null);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.warn("Failed to replace track with null:", error);
|
devWarn("Failed to replace track with null:", error);
|
||||||
// Fallback to removing the track
|
// Fallback to removing the track
|
||||||
peerConnection.removeTrack(microphoneSender);
|
peerConnection.removeTrack(microphoneSender);
|
||||||
}
|
}
|
||||||
|
@ -110,14 +111,14 @@ export function useMicrophone() {
|
||||||
} : "No peer connection",
|
} : "No peer connection",
|
||||||
streamMatch: refStream === microphoneStream
|
streamMatch: refStream === microphoneStream
|
||||||
};
|
};
|
||||||
console.log("Microphone Debug State:", state);
|
devLog("Microphone Debug State:", state);
|
||||||
|
|
||||||
// Also check if streams are active
|
// Also check if streams are active
|
||||||
if (refStream) {
|
if (refStream) {
|
||||||
console.log("Ref stream active tracks:", refStream.getAudioTracks().filter(t => t.readyState === 'live').length);
|
devLog("Ref stream active tracks:", refStream.getAudioTracks().filter(t => t.readyState === 'live').length);
|
||||||
}
|
}
|
||||||
if (microphoneStream && microphoneStream !== refStream) {
|
if (microphoneStream && microphoneStream !== refStream) {
|
||||||
console.log("Store stream active tracks:", microphoneStream.getAudioTracks().filter(t => t.readyState === 'live').length);
|
devLog("Store stream active tracks:", microphoneStream.getAudioTracks().filter(t => t.readyState === 'live').length);
|
||||||
}
|
}
|
||||||
|
|
||||||
return state;
|
return state;
|
||||||
|
@ -137,15 +138,15 @@ export function useMicrophone() {
|
||||||
const syncMicrophoneState = useCallback(async () => {
|
const syncMicrophoneState = useCallback(async () => {
|
||||||
// Debounce sync calls to prevent race conditions
|
// Debounce sync calls to prevent race conditions
|
||||||
const now = Date.now();
|
const now = Date.now();
|
||||||
if (now - lastSyncRef.current < 1000) { // Increased debounce time
|
if (now - lastSyncRef.current < AUDIO_CONFIG.SYNC_DEBOUNCE_MS) {
|
||||||
console.log("Skipping sync - too frequent");
|
devLog("Skipping sync - too frequent");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
lastSyncRef.current = now;
|
lastSyncRef.current = now;
|
||||||
|
|
||||||
// Don't sync if we're in the middle of starting the microphone
|
// Don't sync if we're in the middle of starting the microphone
|
||||||
if (isStartingRef.current) {
|
if (isStartingRef.current) {
|
||||||
console.log("Skipping sync - microphone is starting");
|
devLog("Skipping sync - microphone is starting");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -157,27 +158,27 @@ export function useMicrophone() {
|
||||||
|
|
||||||
// Only sync if there's a significant state difference and we're not in a transition
|
// Only sync if there's a significant state difference and we're not in a transition
|
||||||
if (backendRunning !== isMicrophoneActive) {
|
if (backendRunning !== isMicrophoneActive) {
|
||||||
console.info(`Syncing microphone state: backend=${backendRunning}, frontend=${isMicrophoneActive}`);
|
devInfo(`Syncing microphone state: backend=${backendRunning}, frontend=${isMicrophoneActive}`);
|
||||||
|
|
||||||
// If backend is running but frontend thinks it's not, just update frontend state
|
// If backend is running but frontend thinks it's not, just update frontend state
|
||||||
if (backendRunning && !isMicrophoneActive) {
|
if (backendRunning && !isMicrophoneActive) {
|
||||||
console.log("Backend running, updating frontend state to active");
|
devLog("Backend running, updating frontend state to active");
|
||||||
setMicrophoneActive(true);
|
setMicrophoneActive(true);
|
||||||
}
|
}
|
||||||
// If backend is not running but frontend thinks it is, clean up and update state
|
// If backend is not running but frontend thinks it is, clean up and update state
|
||||||
else if (!backendRunning && isMicrophoneActive) {
|
else if (!backendRunning && isMicrophoneActive) {
|
||||||
console.log("Backend not running, cleaning up frontend state");
|
devLog("Backend not running, cleaning up frontend state");
|
||||||
setMicrophoneActive(false);
|
setMicrophoneActive(false);
|
||||||
// Only clean up stream if we actually have one
|
// Only clean up stream if we actually have one
|
||||||
if (microphoneStreamRef.current) {
|
if (microphoneStreamRef.current) {
|
||||||
console.log("Cleaning up orphaned stream");
|
devLog("Cleaning up orphaned stream");
|
||||||
await stopMicrophoneStream();
|
await stopMicrophoneStream();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.warn("Failed to sync microphone state:", error);
|
devWarn("Failed to sync microphone state:", error);
|
||||||
}
|
}
|
||||||
}, [isMicrophoneActive, setMicrophoneActive, stopMicrophoneStream]);
|
}, [isMicrophoneActive, setMicrophoneActive, stopMicrophoneStream]);
|
||||||
|
|
||||||
|
@ -185,7 +186,7 @@ export function useMicrophone() {
|
||||||
const startMicrophone = useCallback(async (deviceId?: string): Promise<{ success: boolean; error?: MicrophoneError }> => {
|
const startMicrophone = useCallback(async (deviceId?: string): Promise<{ success: boolean; error?: MicrophoneError }> => {
|
||||||
// Prevent multiple simultaneous start operations
|
// Prevent multiple simultaneous start operations
|
||||||
if (isStarting || isStopping || isToggling) {
|
if (isStarting || isStopping || isToggling) {
|
||||||
console.log("Microphone operation already in progress, skipping start");
|
devLog("Microphone operation already in progress, skipping start");
|
||||||
return { success: false, error: { type: 'unknown', message: 'Operation already in progress' } };
|
return { success: false, error: { type: 'unknown', message: 'Operation already in progress' } };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -198,8 +199,8 @@ export function useMicrophone() {
|
||||||
echoCancellation: true,
|
echoCancellation: true,
|
||||||
noiseSuppression: true,
|
noiseSuppression: true,
|
||||||
autoGainControl: true,
|
autoGainControl: true,
|
||||||
sampleRate: 48000,
|
sampleRate: AUDIO_CONFIG.SAMPLE_RATE,
|
||||||
channelCount: 1,
|
channelCount: AUDIO_CONFIG.CHANNEL_COUNT,
|
||||||
};
|
};
|
||||||
|
|
||||||
// Add device ID if specified
|
// Add device ID if specified
|
||||||
|
@ -207,7 +208,7 @@ export function useMicrophone() {
|
||||||
audioConstraints.deviceId = { exact: deviceId };
|
audioConstraints.deviceId = { exact: deviceId };
|
||||||
}
|
}
|
||||||
|
|
||||||
console.log("Requesting microphone with constraints:", audioConstraints);
|
devLog("Requesting microphone with constraints:", audioConstraints);
|
||||||
const stream = await navigator.mediaDevices.getUserMedia({
|
const stream = await navigator.mediaDevices.getUserMedia({
|
||||||
audio: audioConstraints
|
audio: audioConstraints
|
||||||
});
|
});
|
||||||
|
@ -219,14 +220,14 @@ export function useMicrophone() {
|
||||||
setMicrophoneStream(stream);
|
setMicrophoneStream(stream);
|
||||||
|
|
||||||
// Verify the stream was stored correctly
|
// Verify the stream was stored correctly
|
||||||
console.log("Stream storage verification:", {
|
devLog("Stream storage verification:", {
|
||||||
refSet: !!microphoneStreamRef.current,
|
refSet: !!microphoneStreamRef.current,
|
||||||
refId: microphoneStreamRef.current?.id,
|
refId: microphoneStreamRef.current?.id,
|
||||||
storeWillBeSet: true // Store update is async
|
storeWillBeSet: true // Store update is async
|
||||||
});
|
});
|
||||||
|
|
||||||
// Add audio track to peer connection if available
|
// Add audio track to peer connection if available
|
||||||
console.log("Peer connection state:", peerConnection ? {
|
devLog("Peer connection state:", peerConnection ? {
|
||||||
connectionState: peerConnection.connectionState,
|
connectionState: peerConnection.connectionState,
|
||||||
iceConnectionState: peerConnection.iceConnectionState,
|
iceConnectionState: peerConnection.iceConnectionState,
|
||||||
signalingState: peerConnection.signalingState
|
signalingState: peerConnection.signalingState
|
||||||
|
@ -234,11 +235,11 @@ export function useMicrophone() {
|
||||||
|
|
||||||
if (peerConnection && stream.getAudioTracks().length > 0) {
|
if (peerConnection && stream.getAudioTracks().length > 0) {
|
||||||
const audioTrack = stream.getAudioTracks()[0];
|
const audioTrack = stream.getAudioTracks()[0];
|
||||||
console.log("Starting microphone with audio track:", audioTrack.id, "kind:", audioTrack.kind);
|
devLog("Starting microphone with audio track:", audioTrack.id, "kind:", audioTrack.kind);
|
||||||
|
|
||||||
// Find the audio transceiver (should already exist with sendrecv direction)
|
// Find the audio transceiver (should already exist with sendrecv direction)
|
||||||
const transceivers = peerConnection.getTransceivers();
|
const transceivers = peerConnection.getTransceivers();
|
||||||
console.log("Available transceivers:", transceivers.map(t => ({
|
devLog("Available transceivers:", transceivers.map(t => ({
|
||||||
direction: t.direction,
|
direction: t.direction,
|
||||||
mid: t.mid,
|
mid: t.mid,
|
||||||
senderTrack: t.sender.track?.kind,
|
senderTrack: t.sender.track?.kind,
|
||||||
|
@ -264,7 +265,7 @@ export function useMicrophone() {
|
||||||
return false;
|
return false;
|
||||||
});
|
});
|
||||||
|
|
||||||
console.log("Found audio transceiver:", audioTransceiver ? {
|
devLog("Found audio transceiver:", audioTransceiver ? {
|
||||||
direction: audioTransceiver.direction,
|
direction: audioTransceiver.direction,
|
||||||
mid: audioTransceiver.mid,
|
mid: audioTransceiver.mid,
|
||||||
senderTrack: audioTransceiver.sender.track?.kind,
|
senderTrack: audioTransceiver.sender.track?.kind,
|
||||||
|
@ -276,10 +277,10 @@ export function useMicrophone() {
|
||||||
// Use the existing audio transceiver's sender
|
// Use the existing audio transceiver's sender
|
||||||
await audioTransceiver.sender.replaceTrack(audioTrack);
|
await audioTransceiver.sender.replaceTrack(audioTrack);
|
||||||
sender = audioTransceiver.sender;
|
sender = audioTransceiver.sender;
|
||||||
console.log("Replaced audio track on existing transceiver");
|
devLog("Replaced audio track on existing transceiver");
|
||||||
|
|
||||||
// Verify the track was set correctly
|
// Verify the track was set correctly
|
||||||
console.log("Transceiver after track replacement:", {
|
devLog("Transceiver after track replacement:", {
|
||||||
direction: audioTransceiver.direction,
|
direction: audioTransceiver.direction,
|
||||||
senderTrack: audioTransceiver.sender.track?.id,
|
senderTrack: audioTransceiver.sender.track?.id,
|
||||||
senderTrackKind: audioTransceiver.sender.track?.kind,
|
senderTrackKind: audioTransceiver.sender.track?.kind,
|
||||||
|
@ -289,11 +290,11 @@ export function useMicrophone() {
|
||||||
} else {
|
} else {
|
||||||
// Fallback: add new track if no transceiver found
|
// Fallback: add new track if no transceiver found
|
||||||
sender = peerConnection.addTrack(audioTrack, stream);
|
sender = peerConnection.addTrack(audioTrack, stream);
|
||||||
console.log("Added new audio track to peer connection");
|
devLog("Added new audio track to peer connection");
|
||||||
|
|
||||||
// Find the transceiver that was created for this track
|
// Find the transceiver that was created for this track
|
||||||
const newTransceiver = peerConnection.getTransceivers().find(t => t.sender === sender);
|
const newTransceiver = peerConnection.getTransceivers().find(t => t.sender === sender);
|
||||||
console.log("New transceiver created:", newTransceiver ? {
|
devLog("New transceiver created:", newTransceiver ? {
|
||||||
direction: newTransceiver.direction,
|
direction: newTransceiver.direction,
|
||||||
senderTrack: newTransceiver.sender.track?.id,
|
senderTrack: newTransceiver.sender.track?.id,
|
||||||
senderTrackKind: newTransceiver.sender.track?.kind
|
senderTrackKind: newTransceiver.sender.track?.kind
|
||||||
|
@ -301,7 +302,7 @@ export function useMicrophone() {
|
||||||
}
|
}
|
||||||
|
|
||||||
setMicrophoneSender(sender);
|
setMicrophoneSender(sender);
|
||||||
console.log("Microphone sender set:", {
|
devLog("Microphone sender set:", {
|
||||||
senderId: sender,
|
senderId: sender,
|
||||||
track: sender.track?.id,
|
track: sender.track?.id,
|
||||||
trackKind: sender.track?.kind,
|
trackKind: sender.track?.kind,
|
||||||
|
@ -310,28 +311,30 @@ export function useMicrophone() {
|
||||||
});
|
});
|
||||||
|
|
||||||
// Check sender stats to verify audio is being transmitted
|
// Check sender stats to verify audio is being transmitted
|
||||||
setTimeout(async () => {
|
devOnly(() => {
|
||||||
try {
|
setTimeout(async () => {
|
||||||
const stats = await sender.getStats();
|
try {
|
||||||
console.log("Sender stats after 2 seconds:");
|
const stats = await sender.getStats();
|
||||||
stats.forEach((report, id) => {
|
devLog("Sender stats after 2 seconds:");
|
||||||
if (report.type === 'outbound-rtp' && report.kind === 'audio') {
|
stats.forEach((report, id) => {
|
||||||
console.log("Outbound audio RTP stats:", {
|
if (report.type === 'outbound-rtp' && report.kind === 'audio') {
|
||||||
id,
|
devLog("Outbound audio RTP stats:", {
|
||||||
packetsSent: report.packetsSent,
|
id,
|
||||||
bytesSent: report.bytesSent,
|
packetsSent: report.packetsSent,
|
||||||
timestamp: report.timestamp
|
bytesSent: report.bytesSent,
|
||||||
});
|
timestamp: report.timestamp
|
||||||
}
|
});
|
||||||
});
|
}
|
||||||
} catch (error) {
|
});
|
||||||
console.error("Failed to get sender stats:", error);
|
} catch (error) {
|
||||||
}
|
devError("Failed to get sender stats:", error);
|
||||||
}, 2000);
|
}
|
||||||
|
}, 2000);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// Notify backend that microphone is started
|
// Notify backend that microphone is started
|
||||||
console.log("Notifying backend about microphone start...");
|
devLog("Notifying backend about microphone start...");
|
||||||
|
|
||||||
// Retry logic for backend failures
|
// Retry logic for backend failures
|
||||||
let backendSuccess = false;
|
let backendSuccess = false;
|
||||||
|
@ -341,12 +344,12 @@ export function useMicrophone() {
|
||||||
try {
|
try {
|
||||||
// If this is a retry, first try to reset the backend microphone state
|
// If this is a retry, first try to reset the backend microphone state
|
||||||
if (attempt > 1) {
|
if (attempt > 1) {
|
||||||
console.log(`Backend start attempt ${attempt}, first trying to reset backend state...`);
|
devLog(`Backend start attempt ${attempt}, first trying to reset backend state...`);
|
||||||
try {
|
try {
|
||||||
// Try the new reset endpoint first
|
// Try the new reset endpoint first
|
||||||
const resetResp = await api.POST("/microphone/reset", {});
|
const resetResp = await api.POST("/microphone/reset", {});
|
||||||
if (resetResp.ok) {
|
if (resetResp.ok) {
|
||||||
console.log("Backend reset successful");
|
devLog("Backend reset successful");
|
||||||
} else {
|
} else {
|
||||||
// Fallback to stop
|
// Fallback to stop
|
||||||
await api.POST("/microphone/stop", {});
|
await api.POST("/microphone/stop", {});
|
||||||
|
@ -354,59 +357,59 @@ export function useMicrophone() {
|
||||||
// Wait a bit for the backend to reset
|
// Wait a bit for the backend to reset
|
||||||
await new Promise(resolve => setTimeout(resolve, 200));
|
await new Promise(resolve => setTimeout(resolve, 200));
|
||||||
} catch (resetError) {
|
} catch (resetError) {
|
||||||
console.warn("Failed to reset backend state:", resetError);
|
devWarn("Failed to reset backend state:", resetError);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const backendResp = await api.POST("/microphone/start", {});
|
const backendResp = await api.POST("/microphone/start", {});
|
||||||
console.log(`Backend response status (attempt ${attempt}):`, backendResp.status, "ok:", backendResp.ok);
|
devLog(`Backend response status (attempt ${attempt}):`, backendResp.status, "ok:", backendResp.ok);
|
||||||
|
|
||||||
if (!backendResp.ok) {
|
if (!backendResp.ok) {
|
||||||
lastError = `Backend returned status ${backendResp.status}`;
|
lastError = `Backend returned status ${backendResp.status}`;
|
||||||
console.error(`Backend microphone start failed with status: ${backendResp.status} (attempt ${attempt})`);
|
devError(`Backend microphone start failed with status: ${backendResp.status} (attempt ${attempt})`);
|
||||||
|
|
||||||
// For 500 errors, try again after a short delay
|
// For 500 errors, try again after a short delay
|
||||||
if (backendResp.status === 500 && attempt < 3) {
|
if (backendResp.status === 500 && attempt < 3) {
|
||||||
console.log(`Retrying backend start in 500ms (attempt ${attempt + 1}/3)...`);
|
devLog(`Retrying backend start in 500ms (attempt ${attempt + 1}/3)...`);
|
||||||
await new Promise(resolve => setTimeout(resolve, 500));
|
await new Promise(resolve => setTimeout(resolve, 500));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Success!
|
// Success!
|
||||||
const responseData = await backendResp.json();
|
const responseData = await backendResp.json();
|
||||||
console.log("Backend response data:", responseData);
|
devLog("Backend response data:", responseData);
|
||||||
if (responseData.status === "already running") {
|
if (responseData.status === "already running") {
|
||||||
console.info("Backend microphone was already running");
|
devInfo("Backend microphone was already running");
|
||||||
|
|
||||||
// If we're on the first attempt and backend says "already running",
|
// If we're on the first attempt and backend says "already running",
|
||||||
// but frontend thinks it's not active, this might be a stuck state
|
// but frontend thinks it's not active, this might be a stuck state
|
||||||
if (attempt === 1 && !isMicrophoneActive) {
|
if (attempt === 1 && !isMicrophoneActive) {
|
||||||
console.warn("Backend reports 'already running' but frontend is not active - possible stuck state");
|
devWarn("Backend reports 'already running' but frontend is not active - possible stuck state");
|
||||||
console.log("Attempting to reset backend state and retry...");
|
devLog("Attempting to reset backend state and retry...");
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const resetResp = await api.POST("/microphone/reset", {});
|
const resetResp = await api.POST("/microphone/reset", {});
|
||||||
if (resetResp.ok) {
|
if (resetResp.ok) {
|
||||||
console.log("Backend reset successful, retrying start...");
|
devLog("Backend reset successful, retrying start...");
|
||||||
await new Promise(resolve => setTimeout(resolve, 200));
|
await new Promise(resolve => setTimeout(resolve, 200));
|
||||||
continue; // Retry the start
|
continue; // Retry the start
|
||||||
}
|
}
|
||||||
} catch (resetError) {
|
} catch (resetError) {
|
||||||
console.warn("Failed to reset stuck backend state:", resetError);
|
devWarn("Failed to reset stuck backend state:", resetError);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
console.log("Backend microphone start successful");
|
devLog("Backend microphone start successful");
|
||||||
backendSuccess = true;
|
backendSuccess = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
lastError = error instanceof Error ? error : String(error);
|
lastError = error instanceof Error ? error : String(error);
|
||||||
console.error(`Backend microphone start threw error (attempt ${attempt}):`, error);
|
devError(`Backend microphone start threw error (attempt ${attempt}):`, error);
|
||||||
|
|
||||||
// For network errors, try again after a short delay
|
// For network errors, try again after a short delay
|
||||||
if (attempt < 3) {
|
if (attempt < 3) {
|
||||||
console.log(`Retrying backend start in 500ms (attempt ${attempt + 1}/3)...`);
|
devLog(`Retrying backend start in 500ms (attempt ${attempt + 1}/3)...`);
|
||||||
await new Promise(resolve => setTimeout(resolve, 500));
|
await new Promise(resolve => setTimeout(resolve, 500));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -415,7 +418,7 @@ export function useMicrophone() {
|
||||||
|
|
||||||
// If all backend attempts failed, cleanup and return error
|
// If all backend attempts failed, cleanup and return error
|
||||||
if (!backendSuccess) {
|
if (!backendSuccess) {
|
||||||
console.error("All backend start attempts failed, cleaning up stream");
|
devError("All backend start attempts failed, cleaning up stream");
|
||||||
await stopMicrophoneStream();
|
await stopMicrophoneStream();
|
||||||
isStartingRef.current = false;
|
isStartingRef.current = false;
|
||||||
setIsStarting(false);
|
setIsStarting(false);
|
||||||
|
@ -432,7 +435,7 @@ export function useMicrophone() {
|
||||||
setMicrophoneActive(true);
|
setMicrophoneActive(true);
|
||||||
setMicrophoneMuted(false);
|
setMicrophoneMuted(false);
|
||||||
|
|
||||||
console.log("Microphone state set to active. Verifying state:", {
|
devLog("Microphone state set to active. Verifying state:", {
|
||||||
streamInRef: !!microphoneStreamRef.current,
|
streamInRef: !!microphoneStreamRef.current,
|
||||||
streamInStore: !!microphoneStream,
|
streamInStore: !!microphoneStream,
|
||||||
isActive: true,
|
isActive: true,
|
||||||
|
@ -441,15 +444,17 @@ export function useMicrophone() {
|
||||||
|
|
||||||
// Don't sync immediately after starting - it causes race conditions
|
// Don't sync immediately after starting - it causes race conditions
|
||||||
// The sync will happen naturally through other triggers
|
// The sync will happen naturally through other triggers
|
||||||
setTimeout(() => {
|
devOnly(() => {
|
||||||
// Just verify state after a delay for debugging
|
setTimeout(() => {
|
||||||
console.log("State check after delay:", {
|
// Just verify state after a delay for debugging
|
||||||
streamInRef: !!microphoneStreamRef.current,
|
devLog("State check after delay:", {
|
||||||
streamInStore: !!microphoneStream,
|
streamInRef: !!microphoneStreamRef.current,
|
||||||
isActive: isMicrophoneActive,
|
streamInStore: !!microphoneStream,
|
||||||
isMuted: isMicrophoneMuted
|
isActive: isMicrophoneActive,
|
||||||
});
|
isMuted: isMicrophoneMuted
|
||||||
}, 100);
|
});
|
||||||
|
}, AUDIO_CONFIG.AUDIO_TEST_TIMEOUT);
|
||||||
|
});
|
||||||
|
|
||||||
// Clear the starting flag
|
// Clear the starting flag
|
||||||
isStartingRef.current = false;
|
isStartingRef.current = false;
|
||||||
|
@ -493,12 +498,12 @@ export function useMicrophone() {
|
||||||
// Reset backend microphone state
|
// Reset backend microphone state
|
||||||
const resetBackendMicrophoneState = useCallback(async (): Promise<boolean> => {
|
const resetBackendMicrophoneState = useCallback(async (): Promise<boolean> => {
|
||||||
try {
|
try {
|
||||||
console.log("Resetting backend microphone state...");
|
devLog("Resetting backend microphone state...");
|
||||||
const response = await api.POST("/microphone/reset", {});
|
const response = await api.POST("/microphone/reset", {});
|
||||||
|
|
||||||
if (response.ok) {
|
if (response.ok) {
|
||||||
const data = await response.json();
|
const data = await response.json();
|
||||||
console.log("Backend microphone reset successful:", data);
|
devLog("Backend microphone reset successful:", data);
|
||||||
|
|
||||||
// Update frontend state to match backend
|
// Update frontend state to match backend
|
||||||
setMicrophoneActive(false);
|
setMicrophoneActive(false);
|
||||||
|
@ -506,7 +511,7 @@ export function useMicrophone() {
|
||||||
|
|
||||||
// Clean up any orphaned streams
|
// Clean up any orphaned streams
|
||||||
if (microphoneStreamRef.current) {
|
if (microphoneStreamRef.current) {
|
||||||
console.log("Cleaning up orphaned stream after reset");
|
devLog("Cleaning up orphaned stream after reset");
|
||||||
await stopMicrophoneStream();
|
await stopMicrophoneStream();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -518,19 +523,19 @@ export function useMicrophone() {
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
} else {
|
} else {
|
||||||
console.error("Backend microphone reset failed:", response.status);
|
devError("Backend microphone reset failed:", response.status);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.warn("Failed to reset backend microphone state:", error);
|
devWarn("Failed to reset backend microphone state:", error);
|
||||||
// Fallback to old method
|
// Fallback to old method
|
||||||
try {
|
try {
|
||||||
console.log("Trying fallback reset method...");
|
devLog("Trying fallback reset method...");
|
||||||
await api.POST("/microphone/stop", {});
|
await api.POST("/microphone/stop", {});
|
||||||
await new Promise(resolve => setTimeout(resolve, 300));
|
await new Promise(resolve => setTimeout(resolve, 300));
|
||||||
return true;
|
return true;
|
||||||
} catch (fallbackError) {
|
} catch (fallbackError) {
|
||||||
console.error("Fallback reset also failed:", fallbackError);
|
devError("Fallback reset also failed:", fallbackError);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -540,7 +545,7 @@ export function useMicrophone() {
|
||||||
const stopMicrophone = useCallback(async (): Promise<{ success: boolean; error?: MicrophoneError }> => {
|
const stopMicrophone = useCallback(async (): Promise<{ success: boolean; error?: MicrophoneError }> => {
|
||||||
// Prevent multiple simultaneous stop operations
|
// Prevent multiple simultaneous stop operations
|
||||||
if (isStarting || isStopping || isToggling) {
|
if (isStarting || isStopping || isToggling) {
|
||||||
console.log("Microphone operation already in progress, skipping stop");
|
devLog("Microphone operation already in progress, skipping stop");
|
||||||
return { success: false, error: { type: 'unknown', message: 'Operation already in progress' } };
|
return { success: false, error: { type: 'unknown', message: 'Operation already in progress' } };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -552,9 +557,9 @@ export function useMicrophone() {
|
||||||
// Then notify backend that microphone is stopped
|
// Then notify backend that microphone is stopped
|
||||||
try {
|
try {
|
||||||
await api.POST("/microphone/stop", {});
|
await api.POST("/microphone/stop", {});
|
||||||
console.log("Backend notified about microphone stop");
|
devLog("Backend notified about microphone stop");
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.warn("Failed to notify backend about microphone stop:", error);
|
devWarn("Failed to notify backend about microphone stop:", error);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update frontend state immediately
|
// Update frontend state immediately
|
||||||
|
@ -567,7 +572,7 @@ export function useMicrophone() {
|
||||||
setIsStopping(false);
|
setIsStopping(false);
|
||||||
return { success: true };
|
return { success: true };
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error("Failed to stop microphone:", error);
|
devError("Failed to stop microphone:", error);
|
||||||
setIsStopping(false);
|
setIsStopping(false);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
|
@ -583,7 +588,7 @@ export function useMicrophone() {
|
||||||
const toggleMicrophoneMute = useCallback(async (): Promise<{ success: boolean; error?: MicrophoneError }> => {
|
const toggleMicrophoneMute = useCallback(async (): Promise<{ success: boolean; error?: MicrophoneError }> => {
|
||||||
// Prevent multiple simultaneous toggle operations
|
// Prevent multiple simultaneous toggle operations
|
||||||
if (isStarting || isStopping || isToggling) {
|
if (isStarting || isStopping || isToggling) {
|
||||||
console.log("Microphone operation already in progress, skipping toggle");
|
devLog("Microphone operation already in progress, skipping toggle");
|
||||||
return { success: false, error: { type: 'unknown', message: 'Operation already in progress' } };
|
return { success: false, error: { type: 'unknown', message: 'Operation already in progress' } };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -592,7 +597,7 @@ export function useMicrophone() {
|
||||||
// Use the ref instead of store value to avoid race conditions
|
// Use the ref instead of store value to avoid race conditions
|
||||||
const currentStream = microphoneStreamRef.current || microphoneStream;
|
const currentStream = microphoneStreamRef.current || microphoneStream;
|
||||||
|
|
||||||
console.log("Toggle microphone mute - current state:", {
|
devLog("Toggle microphone mute - current state:", {
|
||||||
hasRefStream: !!microphoneStreamRef.current,
|
hasRefStream: !!microphoneStreamRef.current,
|
||||||
hasStoreStream: !!microphoneStream,
|
hasStoreStream: !!microphoneStream,
|
||||||
isActive: isMicrophoneActive,
|
isActive: isMicrophoneActive,
|
||||||
|
@ -610,7 +615,7 @@ export function useMicrophone() {
|
||||||
streamId: currentStream?.id,
|
streamId: currentStream?.id,
|
||||||
audioTracks: currentStream?.getAudioTracks().length || 0
|
audioTracks: currentStream?.getAudioTracks().length || 0
|
||||||
};
|
};
|
||||||
console.warn("Microphone mute failed: stream or active state missing", errorDetails);
|
devWarn("Microphone mute failed: stream or active state missing", errorDetails);
|
||||||
|
|
||||||
// Provide more specific error message
|
// Provide more specific error message
|
||||||
let errorMessage = 'Microphone is not active';
|
let errorMessage = 'Microphone is not active';
|
||||||
|
@ -647,7 +652,7 @@ export function useMicrophone() {
|
||||||
// Mute/unmute the audio track
|
// Mute/unmute the audio track
|
||||||
audioTracks.forEach(track => {
|
audioTracks.forEach(track => {
|
||||||
track.enabled = !newMutedState;
|
track.enabled = !newMutedState;
|
||||||
console.log(`Audio track ${track.id} enabled: ${track.enabled}`);
|
devLog(`Audio track ${track.id} enabled: ${track.enabled}`);
|
||||||
});
|
});
|
||||||
|
|
||||||
setMicrophoneMuted(newMutedState);
|
setMicrophoneMuted(newMutedState);
|
||||||
|
@ -656,13 +661,13 @@ export function useMicrophone() {
|
||||||
try {
|
try {
|
||||||
await api.POST("/microphone/mute", { muted: newMutedState });
|
await api.POST("/microphone/mute", { muted: newMutedState });
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.warn("Failed to notify backend about microphone mute:", error);
|
devWarn("Failed to notify backend about microphone mute:", error);
|
||||||
}
|
}
|
||||||
|
|
||||||
setIsToggling(false);
|
setIsToggling(false);
|
||||||
return { success: true };
|
return { success: true };
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error("Failed to toggle microphone mute:", error);
|
devError("Failed to toggle microphone mute:", error);
|
||||||
setIsToggling(false);
|
setIsToggling(false);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
|
@ -677,7 +682,7 @@ export function useMicrophone() {
|
||||||
// Function to check WebRTC audio transmission stats
|
// Function to check WebRTC audio transmission stats
|
||||||
const checkAudioTransmissionStats = useCallback(async () => {
|
const checkAudioTransmissionStats = useCallback(async () => {
|
||||||
if (!microphoneSender) {
|
if (!microphoneSender) {
|
||||||
console.log("No microphone sender available");
|
devLog("No microphone sender available");
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -707,38 +712,38 @@ export function useMicrophone() {
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
console.log("Audio transmission stats:", audioStats);
|
devLog("Audio transmission stats:", audioStats);
|
||||||
return audioStats;
|
return audioStats;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error("Failed to get audio transmission stats:", error);
|
devError("Failed to get audio transmission stats:", error);
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
}, [microphoneSender]);
|
}, [microphoneSender]);
|
||||||
|
|
||||||
// Comprehensive test function to diagnose microphone issues
|
// Comprehensive test function to diagnose microphone issues
|
||||||
const testMicrophoneAudio = useCallback(async () => {
|
const testMicrophoneAudio = useCallback(async () => {
|
||||||
console.log("=== MICROPHONE AUDIO TEST ===");
|
devLog("=== MICROPHONE AUDIO TEST ===");
|
||||||
|
|
||||||
// 1. Check if we have a stream
|
// 1. Check if we have a stream
|
||||||
const stream = microphoneStreamRef.current;
|
const stream = microphoneStreamRef.current;
|
||||||
if (!stream) {
|
if (!stream) {
|
||||||
console.log("❌ No microphone stream available");
|
devLog("❌ No microphone stream available");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
console.log("✅ Microphone stream exists:", stream.id);
|
devLog("✅ Microphone stream exists:", stream.id);
|
||||||
|
|
||||||
// 2. Check audio tracks
|
// 2. Check audio tracks
|
||||||
const audioTracks = stream.getAudioTracks();
|
const audioTracks = stream.getAudioTracks();
|
||||||
console.log("Audio tracks:", audioTracks.length);
|
devLog("Audio tracks:", audioTracks.length);
|
||||||
|
|
||||||
if (audioTracks.length === 0) {
|
if (audioTracks.length === 0) {
|
||||||
console.log("❌ No audio tracks in stream");
|
devLog("❌ No audio tracks in stream");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const track = audioTracks[0];
|
const track = audioTracks[0];
|
||||||
console.log("✅ Audio track details:", {
|
devLog("✅ Audio track details:", {
|
||||||
id: track.id,
|
id: track.id,
|
||||||
label: track.label,
|
label: track.label,
|
||||||
enabled: track.enabled,
|
enabled: track.enabled,
|
||||||
|
@ -752,13 +757,13 @@ export function useMicrophone() {
|
||||||
const analyser = audioContext.createAnalyser();
|
const analyser = audioContext.createAnalyser();
|
||||||
const source = audioContext.createMediaStreamSource(stream);
|
const source = audioContext.createMediaStreamSource(stream);
|
||||||
|
|
||||||
analyser.fftSize = 256;
|
analyser.fftSize = AUDIO_CONFIG.ANALYSIS_FFT_SIZE;
|
||||||
source.connect(analyser);
|
source.connect(analyser);
|
||||||
|
|
||||||
const dataArray = new Uint8Array(analyser.frequencyBinCount);
|
const dataArray = new Uint8Array(analyser.frequencyBinCount);
|
||||||
|
|
||||||
console.log("🎤 Testing audio level detection for 5 seconds...");
|
devLog("🎤 Testing audio level detection for 5 seconds...");
|
||||||
console.log("Please speak into your microphone now!");
|
devLog("Please speak into your microphone now!");
|
||||||
|
|
||||||
let maxLevel = 0;
|
let maxLevel = 0;
|
||||||
let sampleCount = 0;
|
let sampleCount = 0;
|
||||||
|
@ -771,39 +776,39 @@ export function useMicrophone() {
|
||||||
sum += value * value;
|
sum += value * value;
|
||||||
}
|
}
|
||||||
const rms = Math.sqrt(sum / dataArray.length);
|
const rms = Math.sqrt(sum / dataArray.length);
|
||||||
const level = Math.min(100, (rms / 255) * 100);
|
const level = Math.min(AUDIO_CONFIG.MAX_LEVEL_PERCENTAGE, (rms / AUDIO_CONFIG.LEVEL_SCALING_FACTOR) * AUDIO_CONFIG.MAX_LEVEL_PERCENTAGE);
|
||||||
|
|
||||||
maxLevel = Math.max(maxLevel, level);
|
maxLevel = Math.max(maxLevel, level);
|
||||||
sampleCount++;
|
sampleCount++;
|
||||||
|
|
||||||
if (sampleCount % 10 === 0) { // Log every 10th sample
|
if (sampleCount % 10 === 0) { // Log every 10th sample
|
||||||
console.log(`Audio level: ${level.toFixed(1)}% (max so far: ${maxLevel.toFixed(1)}%)`);
|
devLog(`Audio level: ${level.toFixed(1)}% (max so far: ${maxLevel.toFixed(1)}%)`);
|
||||||
}
|
}
|
||||||
}, 100);
|
}, AUDIO_CONFIG.ANALYSIS_UPDATE_INTERVAL);
|
||||||
|
|
||||||
setTimeout(() => {
|
setTimeout(() => {
|
||||||
clearInterval(testInterval);
|
clearInterval(testInterval);
|
||||||
source.disconnect();
|
source.disconnect();
|
||||||
audioContext.close();
|
audioContext.close();
|
||||||
|
|
||||||
console.log("🎤 Audio test completed!");
|
devLog("🎤 Audio test completed!");
|
||||||
console.log(`Maximum audio level detected: ${maxLevel.toFixed(1)}%`);
|
devLog(`Maximum audio level detected: ${maxLevel.toFixed(1)}%`);
|
||||||
|
|
||||||
if (maxLevel > 5) {
|
if (maxLevel > 5) {
|
||||||
console.log("✅ Microphone is detecting audio!");
|
devLog("✅ Microphone is detecting audio!");
|
||||||
} else {
|
} else {
|
||||||
console.log("❌ No significant audio detected. Check microphone permissions and hardware.");
|
devLog("❌ No significant audio detected. Check microphone permissions and hardware.");
|
||||||
}
|
}
|
||||||
}, 5000);
|
}, NETWORK_CONFIG.AUDIO_TEST_DURATION);
|
||||||
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error("❌ Failed to test audio level:", error);
|
devError("❌ Failed to test audio level:", error);
|
||||||
}
|
}
|
||||||
|
|
||||||
// 4. Check WebRTC sender
|
// 4. Check WebRTC sender
|
||||||
if (microphoneSender) {
|
if (microphoneSender) {
|
||||||
console.log("✅ WebRTC sender exists");
|
devLog("✅ WebRTC sender exists");
|
||||||
console.log("Sender track:", {
|
devLog("Sender track:", {
|
||||||
id: microphoneSender.track?.id,
|
id: microphoneSender.track?.id,
|
||||||
kind: microphoneSender.track?.kind,
|
kind: microphoneSender.track?.kind,
|
||||||
enabled: microphoneSender.track?.enabled,
|
enabled: microphoneSender.track?.enabled,
|
||||||
|
@ -812,45 +817,45 @@ export function useMicrophone() {
|
||||||
|
|
||||||
// Check if sender track matches stream track
|
// Check if sender track matches stream track
|
||||||
if (microphoneSender.track === track) {
|
if (microphoneSender.track === track) {
|
||||||
console.log("✅ Sender track matches stream track");
|
devLog("✅ Sender track matches stream track");
|
||||||
} else {
|
} else {
|
||||||
console.log("❌ Sender track does NOT match stream track");
|
devLog("❌ Sender track does NOT match stream track");
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
console.log("❌ No WebRTC sender available");
|
devLog("❌ No WebRTC sender available");
|
||||||
}
|
}
|
||||||
|
|
||||||
// 5. Check peer connection
|
// 5. Check peer connection
|
||||||
if (peerConnection) {
|
if (peerConnection) {
|
||||||
console.log("✅ Peer connection exists");
|
devLog("✅ Peer connection exists");
|
||||||
console.log("Connection state:", peerConnection.connectionState);
|
devLog("Connection state:", peerConnection.connectionState);
|
||||||
console.log("ICE connection state:", peerConnection.iceConnectionState);
|
devLog("ICE connection state:", peerConnection.iceConnectionState);
|
||||||
|
|
||||||
const transceivers = peerConnection.getTransceivers();
|
const transceivers = peerConnection.getTransceivers();
|
||||||
const audioTransceivers = transceivers.filter(t =>
|
const audioTransceivers = transceivers.filter(t =>
|
||||||
t.sender.track?.kind === 'audio' || t.receiver.track?.kind === 'audio'
|
t.sender.track?.kind === 'audio' || t.receiver.track?.kind === 'audio'
|
||||||
);
|
);
|
||||||
|
|
||||||
console.log("Audio transceivers:", audioTransceivers.map(t => ({
|
devLog("Audio transceivers:", audioTransceivers.map(t => ({
|
||||||
direction: t.direction,
|
direction: t.direction,
|
||||||
senderTrack: t.sender.track?.id,
|
senderTrack: t.sender.track?.id,
|
||||||
receiverTrack: t.receiver.track?.id
|
receiverTrack: t.receiver.track?.id
|
||||||
})));
|
})));
|
||||||
} else {
|
} else {
|
||||||
console.log("❌ No peer connection available");
|
devLog("❌ No peer connection available");
|
||||||
}
|
}
|
||||||
|
|
||||||
}, [microphoneSender, peerConnection]);
|
}, [microphoneSender, peerConnection]);
|
||||||
|
|
||||||
const startMicrophoneDebounced = useCallback((deviceId?: string) => {
|
const startMicrophoneDebounced = useCallback((deviceId?: string) => {
|
||||||
debouncedOperation(async () => {
|
debouncedOperation(async () => {
|
||||||
await startMicrophone(deviceId).catch(console.error);
|
await startMicrophone(deviceId).catch(devError);
|
||||||
}, "start");
|
}, "start");
|
||||||
}, [startMicrophone, debouncedOperation]);
|
}, [startMicrophone, debouncedOperation]);
|
||||||
|
|
||||||
const stopMicrophoneDebounced = useCallback(() => {
|
const stopMicrophoneDebounced = useCallback(() => {
|
||||||
debouncedOperation(async () => {
|
debouncedOperation(async () => {
|
||||||
await stopMicrophone().catch(console.error);
|
await stopMicrophone().catch(devError);
|
||||||
}, "stop");
|
}, "stop");
|
||||||
}, [stopMicrophone, debouncedOperation]);
|
}, [stopMicrophone, debouncedOperation]);
|
||||||
|
|
||||||
|
@ -919,10 +924,10 @@ export function useMicrophone() {
|
||||||
// Clean up stream directly without depending on the callback
|
// Clean up stream directly without depending on the callback
|
||||||
const stream = microphoneStreamRef.current;
|
const stream = microphoneStreamRef.current;
|
||||||
if (stream) {
|
if (stream) {
|
||||||
console.log("Cleanup: stopping microphone stream on unmount");
|
devLog("Cleanup: stopping microphone stream on unmount");
|
||||||
stream.getAudioTracks().forEach(track => {
|
stream.getAudioTracks().forEach(track => {
|
||||||
track.stop();
|
track.stop();
|
||||||
console.log(`Cleanup: stopped audio track ${track.id}`);
|
devLog(`Cleanup: stopped audio track ${track.id}`);
|
||||||
});
|
});
|
||||||
microphoneStreamRef.current = null;
|
microphoneStreamRef.current = null;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
import { useCallback, useEffect, useState } from "react";
|
import { useCallback, useEffect, useState } from "react";
|
||||||
|
|
||||||
|
import { devError } from '../utils/debug';
|
||||||
|
|
||||||
import { JsonRpcResponse, useJsonRpc } from "./useJsonRpc";
|
import { JsonRpcResponse, useJsonRpc } from "./useJsonRpc";
|
||||||
import { useAudioEvents } from "./useAudioEvents";
|
import { useAudioEvents } from "./useAudioEvents";
|
||||||
|
|
||||||
|
@ -25,7 +27,7 @@ export function useUsbDeviceConfig() {
|
||||||
setLoading(false);
|
setLoading(false);
|
||||||
|
|
||||||
if ("error" in resp) {
|
if ("error" in resp) {
|
||||||
console.error("Failed to load USB devices:", resp.error);
|
devError("Failed to load USB devices:", resp.error);
|
||||||
setError(resp.error.data || "Unknown error");
|
setError(resp.error.data || "Unknown error");
|
||||||
setUsbDeviceConfig(null);
|
setUsbDeviceConfig(null);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -0,0 +1,142 @@
|
||||||
|
import api from '@/api';
|
||||||
|
|
||||||
|
interface AudioConfig {
|
||||||
|
Quality: number;
|
||||||
|
Bitrate: number;
|
||||||
|
SampleRate: number;
|
||||||
|
Channels: number;
|
||||||
|
FrameSize: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
type QualityPresets = Record<number, AudioConfig>;
|
||||||
|
|
||||||
|
interface AudioQualityResponse {
|
||||||
|
current: AudioConfig;
|
||||||
|
presets: QualityPresets;
|
||||||
|
}
|
||||||
|
|
||||||
|
class AudioQualityService {
|
||||||
|
private audioPresets: QualityPresets | null = null;
|
||||||
|
private microphonePresets: QualityPresets | null = null;
|
||||||
|
private qualityLabels: Record<number, string> = {
|
||||||
|
0: 'Low',
|
||||||
|
1: 'Medium',
|
||||||
|
2: 'High',
|
||||||
|
3: 'Ultra'
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Fetch audio quality presets from the backend
|
||||||
|
*/
|
||||||
|
async fetchAudioQualityPresets(): Promise<AudioQualityResponse | null> {
|
||||||
|
try {
|
||||||
|
const response = await api.GET('/audio/quality');
|
||||||
|
if (response.ok) {
|
||||||
|
const data = await response.json();
|
||||||
|
this.audioPresets = data.presets;
|
||||||
|
this.updateQualityLabels(data.presets);
|
||||||
|
return data;
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Failed to fetch audio quality presets:', error);
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Fetch microphone quality presets from the backend
|
||||||
|
*/
|
||||||
|
async fetchMicrophoneQualityPresets(): Promise<AudioQualityResponse | null> {
|
||||||
|
try {
|
||||||
|
const response = await api.GET('/microphone/quality');
|
||||||
|
if (response.ok) {
|
||||||
|
const data = await response.json();
|
||||||
|
this.microphonePresets = data.presets;
|
||||||
|
return data;
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Failed to fetch microphone quality presets:', error);
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update quality labels with actual bitrates from presets
|
||||||
|
*/
|
||||||
|
private updateQualityLabels(presets: QualityPresets): void {
|
||||||
|
const newQualityLabels: Record<number, string> = {};
|
||||||
|
Object.entries(presets).forEach(([qualityNum, preset]) => {
|
||||||
|
const quality = parseInt(qualityNum);
|
||||||
|
const qualityNames = ['Low', 'Medium', 'High', 'Ultra'];
|
||||||
|
const name = qualityNames[quality] || `Quality ${quality}`;
|
||||||
|
newQualityLabels[quality] = `${name} (${preset.Bitrate}kbps)`;
|
||||||
|
});
|
||||||
|
this.qualityLabels = newQualityLabels;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get quality labels with bitrates
|
||||||
|
*/
|
||||||
|
getQualityLabels(): Record<number, string> {
|
||||||
|
return this.qualityLabels;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get cached audio presets
|
||||||
|
*/
|
||||||
|
getAudioPresets(): QualityPresets | null {
|
||||||
|
return this.audioPresets;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get cached microphone presets
|
||||||
|
*/
|
||||||
|
getMicrophonePresets(): QualityPresets | null {
|
||||||
|
return this.microphonePresets;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set audio quality
|
||||||
|
*/
|
||||||
|
async setAudioQuality(quality: number): Promise<boolean> {
|
||||||
|
try {
|
||||||
|
const response = await api.POST('/audio/quality', { quality });
|
||||||
|
return response.ok;
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Failed to set audio quality:', error);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set microphone quality
|
||||||
|
*/
|
||||||
|
async setMicrophoneQuality(quality: number): Promise<boolean> {
|
||||||
|
try {
|
||||||
|
const response = await api.POST('/microphone/quality', { quality });
|
||||||
|
return response.ok;
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Failed to set microphone quality:', error);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Load both audio and microphone configurations
|
||||||
|
*/
|
||||||
|
async loadAllConfigurations(): Promise<{
|
||||||
|
audio: AudioQualityResponse | null;
|
||||||
|
microphone: AudioQualityResponse | null;
|
||||||
|
}> {
|
||||||
|
const [audio, microphone] = await Promise.all([
|
||||||
|
this.fetchAudioQualityPresets(),
|
||||||
|
this.fetchMicrophoneQualityPresets()
|
||||||
|
]);
|
||||||
|
|
||||||
|
return { audio, microphone };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Export a singleton instance
|
||||||
|
export const audioQualityService = new AudioQualityService();
|
||||||
|
export default audioQualityService;
|
|
@ -0,0 +1,64 @@
|
||||||
|
/**
|
||||||
|
* Debug utilities for development mode logging
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Check if we're in development mode
|
||||||
|
const isDevelopment = import.meta.env.DEV || import.meta.env.MODE === 'development';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Development-only console.log wrapper
|
||||||
|
* Only logs in development mode, silent in production
|
||||||
|
*/
|
||||||
|
export const devLog = (...args: unknown[]): void => {
|
||||||
|
if (isDevelopment) {
|
||||||
|
console.log(...args);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Development-only console.info wrapper
|
||||||
|
* Only logs in development mode, silent in production
|
||||||
|
*/
|
||||||
|
export const devInfo = (...args: unknown[]): void => {
|
||||||
|
if (isDevelopment) {
|
||||||
|
console.info(...args);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Development-only console.warn wrapper
|
||||||
|
* Only logs in development mode, silent in production
|
||||||
|
*/
|
||||||
|
export const devWarn = (...args: unknown[]): void => {
|
||||||
|
if (isDevelopment) {
|
||||||
|
console.warn(...args);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Development-only console.error wrapper
|
||||||
|
* Always logs errors, but with dev prefix in development
|
||||||
|
*/
|
||||||
|
export const devError = (...args: unknown[]): void => {
|
||||||
|
if (isDevelopment) {
|
||||||
|
console.error('[DEV]', ...args);
|
||||||
|
} else {
|
||||||
|
console.error(...args);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Development-only debug function wrapper
|
||||||
|
* Only executes the function in development mode
|
||||||
|
*/
|
||||||
|
export const devOnly = <T>(fn: () => T): T | undefined => {
|
||||||
|
if (isDevelopment) {
|
||||||
|
return fn();
|
||||||
|
}
|
||||||
|
return undefined;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if we're in development mode
|
||||||
|
*/
|
||||||
|
export const isDevMode = (): boolean => isDevelopment;
|
Loading…
Reference in New Issue