mirror of https://github.com/jetkvm/kvm.git
Compare commits
1 Commits
7397408052
...
96227f1e8e
| Author | SHA1 | Date |
|---|---|---|
|
|
96227f1e8e |
31
audio.go
31
audio.go
|
|
@ -21,7 +21,7 @@ var (
|
|||
activeConnections atomic.Int32
|
||||
audioLogger zerolog.Logger
|
||||
currentAudioTrack *webrtc.TrackLocalStaticSample
|
||||
currentInputTrack atomic.Pointer[string]
|
||||
inputTrackHandling atomic.Bool
|
||||
audioOutputEnabled atomic.Bool
|
||||
audioInputEnabled atomic.Bool
|
||||
)
|
||||
|
|
@ -29,8 +29,7 @@ var (
|
|||
func initAudio() {
|
||||
audioLogger = logging.GetDefaultLogger().With().Str("component", "audio-manager").Logger()
|
||||
|
||||
ensureConfigLoaded()
|
||||
audioOutputEnabled.Store(config.AudioOutputEnabled)
|
||||
audioOutputEnabled.Store(true)
|
||||
audioInputEnabled.Store(true)
|
||||
|
||||
audioLogger.Debug().Msg("Audio subsystem initialized")
|
||||
|
|
@ -152,10 +151,14 @@ func setAudioTrack(audioTrack *webrtc.TrackLocalStaticSample) {
|
|||
}
|
||||
|
||||
func setPendingInputTrack(track *webrtc.TrackRemote) {
|
||||
trackID := track.ID()
|
||||
currentInputTrack.Store(&trackID)
|
||||
audioMutex.Lock()
|
||||
defer audioMutex.Unlock()
|
||||
|
||||
// Start input track handler only once per WebRTC session
|
||||
if inputTrackHandling.CompareAndSwap(false, true) {
|
||||
go handleInputTrackForSession(track)
|
||||
}
|
||||
}
|
||||
|
||||
// SetAudioOutputEnabled enables or disables audio output
|
||||
func SetAudioOutputEnabled(enabled bool) error {
|
||||
|
|
@ -198,32 +201,22 @@ func SetAudioInputEnabled(enabled bool) error {
|
|||
// handleInputTrackForSession runs for the entire WebRTC session lifetime
|
||||
// It continuously reads from the track and sends to whatever relay is currently active
|
||||
func handleInputTrackForSession(track *webrtc.TrackRemote) {
|
||||
myTrackID := track.ID()
|
||||
defer inputTrackHandling.Store(false)
|
||||
|
||||
audioLogger.Debug().
|
||||
Str("codec", track.Codec().MimeType).
|
||||
Str("track_id", myTrackID).
|
||||
Str("track_id", track.ID()).
|
||||
Msg("starting session-lifetime track handler")
|
||||
|
||||
for {
|
||||
// Check if we've been superseded by a new track
|
||||
currentTrackID := currentInputTrack.Load()
|
||||
if currentTrackID != nil && *currentTrackID != myTrackID {
|
||||
audioLogger.Debug().
|
||||
Str("my_track_id", myTrackID).
|
||||
Str("current_track_id", *currentTrackID).
|
||||
Msg("audio track handler exiting - superseded by new track")
|
||||
return
|
||||
}
|
||||
|
||||
// Read RTP packet (must always read to keep track alive)
|
||||
rtpPacket, _, err := track.ReadRTP()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
audioLogger.Debug().Str("track_id", myTrackID).Msg("audio track ended")
|
||||
audioLogger.Debug().Msg("audio track ended")
|
||||
return
|
||||
}
|
||||
audioLogger.Warn().Err(err).Str("track_id", myTrackID).Msg("failed to read RTP packet")
|
||||
audioLogger.Warn().Err(err).Msg("failed to read RTP packet")
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -107,8 +107,6 @@ type Config struct {
|
|||
DefaultLogLevel string `json:"default_log_level"`
|
||||
VideoSleepAfterSec int `json:"video_sleep_after_sec"`
|
||||
VideoQualityFactor float64 `json:"video_quality_factor"`
|
||||
AudioInputAutoEnable bool `json:"audio_input_auto_enable"`
|
||||
AudioOutputEnabled bool `json:"audio_output_enabled"`
|
||||
}
|
||||
|
||||
func (c *Config) GetDisplayRotation() uint16 {
|
||||
|
|
@ -182,8 +180,6 @@ func getDefaultConfig() Config {
|
|||
}(),
|
||||
DefaultLogLevel: "INFO",
|
||||
VideoQualityFactor: 1.0,
|
||||
AudioInputAutoEnable: false,
|
||||
AudioOutputEnabled: true,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
21
jsonrpc.go
21
jsonrpc.go
|
|
@ -946,16 +946,10 @@ func rpcSetUsbDeviceState(device string, enabled bool) error {
|
|||
}
|
||||
|
||||
func rpcGetAudioOutputEnabled() (bool, error) {
|
||||
ensureConfigLoaded()
|
||||
return config.AudioOutputEnabled, nil
|
||||
return audioOutputEnabled.Load(), nil
|
||||
}
|
||||
|
||||
func rpcSetAudioOutputEnabled(enabled bool) error {
|
||||
ensureConfigLoaded()
|
||||
config.AudioOutputEnabled = enabled
|
||||
if err := SaveConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
return SetAudioOutputEnabled(enabled)
|
||||
}
|
||||
|
||||
|
|
@ -967,17 +961,6 @@ func rpcSetAudioInputEnabled(enabled bool) error {
|
|||
return SetAudioInputEnabled(enabled)
|
||||
}
|
||||
|
||||
func rpcGetAudioInputAutoEnable() (bool, error) {
|
||||
ensureConfigLoaded()
|
||||
return config.AudioInputAutoEnable, nil
|
||||
}
|
||||
|
||||
func rpcSetAudioInputAutoEnable(enabled bool) error {
|
||||
ensureConfigLoaded()
|
||||
config.AudioInputAutoEnable = enabled
|
||||
return SaveConfig()
|
||||
}
|
||||
|
||||
func rpcSetCloudUrl(apiUrl string, appUrl string) error {
|
||||
currentCloudURL := config.CloudURL
|
||||
config.CloudURL = apiUrl
|
||||
|
|
@ -1300,8 +1283,6 @@ var rpcHandlers = map[string]RPCHandler{
|
|||
"setAudioOutputEnabled": {Func: rpcSetAudioOutputEnabled, Params: []string{"enabled"}},
|
||||
"getAudioInputEnabled": {Func: rpcGetAudioInputEnabled},
|
||||
"setAudioInputEnabled": {Func: rpcSetAudioInputEnabled, Params: []string{"enabled"}},
|
||||
"getAudioInputAutoEnable": {Func: rpcGetAudioInputAutoEnable},
|
||||
"setAudioInputAutoEnable": {Func: rpcSetAudioInputAutoEnable, Params: []string{"enabled"}},
|
||||
"setCloudUrl": {Func: rpcSetCloudUrl, Params: []string{"apiUrl", "appUrl"}},
|
||||
"getKeyboardLayout": {Func: rpcGetKeyboardLayout},
|
||||
"setKeyboardLayout": {Func: rpcSetKeyboardLayout, Params: []string{"layout"}},
|
||||
|
|
|
|||
|
|
@ -57,8 +57,6 @@
|
|||
"audio_input_failed_disable": "Kunne ikke deaktivere lydindgang: {error}",
|
||||
"audio_input_failed_enable": "Kunne ikke aktivere lydindgang: {error}",
|
||||
"audio_input_title": "Lydindgang (Mikrofon)",
|
||||
"audio_input_auto_enable_disabled": "Automatisk aktivering af mikrofon deaktiveret",
|
||||
"audio_input_auto_enable_enabled": "Automatisk aktivering af mikrofon aktiveret",
|
||||
"audio_output_description": "Aktiver lyd fra mål til højttalere",
|
||||
"audio_output_disabled": "Lydudgang deaktiveret",
|
||||
"audio_output_enabled": "Lydudgang aktiveret",
|
||||
|
|
|
|||
|
|
@ -57,8 +57,6 @@
|
|||
"audio_input_failed_disable": "Fehler beim Deaktivieren des Audioeingangs: {error}",
|
||||
"audio_input_failed_enable": "Fehler beim Aktivieren des Audioeingangs: {error}",
|
||||
"audio_input_title": "Audioeingang (Mikrofon)",
|
||||
"audio_input_auto_enable_disabled": "Automatische Mikrofonaktivierung deaktiviert",
|
||||
"audio_input_auto_enable_enabled": "Automatische Mikrofonaktivierung aktiviert",
|
||||
"audio_output_description": "Audio vom Ziel zu Lautsprechern aktivieren",
|
||||
"audio_output_disabled": "Audioausgang deaktiviert",
|
||||
"audio_output_enabled": "Audioausgang aktiviert",
|
||||
|
|
|
|||
|
|
@ -57,8 +57,6 @@
|
|||
"audio_input_failed_disable": "Failed to disable audio input: {error}",
|
||||
"audio_input_failed_enable": "Failed to enable audio input: {error}",
|
||||
"audio_input_title": "Audio Input (Microphone)",
|
||||
"audio_input_auto_enable_disabled": "Auto-enable microphone disabled",
|
||||
"audio_input_auto_enable_enabled": "Auto-enable microphone enabled",
|
||||
"audio_output_description": "Enable audio from target to speakers",
|
||||
"audio_output_disabled": "Audio output disabled",
|
||||
"audio_output_enabled": "Audio output enabled",
|
||||
|
|
|
|||
|
|
@ -57,8 +57,6 @@
|
|||
"audio_input_failed_disable": "Error al desactivar la entrada de audio: {error}",
|
||||
"audio_input_failed_enable": "Error al activar la entrada de audio: {error}",
|
||||
"audio_input_title": "Entrada de audio (Micrófono)",
|
||||
"audio_input_auto_enable_disabled": "Habilitación automática de micrófono desactivada",
|
||||
"audio_input_auto_enable_enabled": "Habilitación automática de micrófono activada",
|
||||
"audio_output_description": "Habilitar audio del objetivo a los altavoces",
|
||||
"audio_output_disabled": "Salida de audio desactivada",
|
||||
"audio_output_enabled": "Salida de audio activada",
|
||||
|
|
|
|||
|
|
@ -57,8 +57,6 @@
|
|||
"audio_input_failed_disable": "Échec de la désactivation de l'entrée audio : {error}",
|
||||
"audio_input_failed_enable": "Échec de l'activation de l'entrée audio : {error}",
|
||||
"audio_input_title": "Entrée audio (Microphone)",
|
||||
"audio_input_auto_enable_disabled": "Activation automatique du microphone désactivée",
|
||||
"audio_input_auto_enable_enabled": "Activation automatique du microphone activée",
|
||||
"audio_output_description": "Activer l'audio de la cible vers les haut-parleurs",
|
||||
"audio_output_disabled": "Sortie audio désactivée",
|
||||
"audio_output_enabled": "Sortie audio activée",
|
||||
|
|
|
|||
|
|
@ -57,8 +57,6 @@
|
|||
"audio_input_failed_disable": "Impossibile disabilitare l'ingresso audio: {error}",
|
||||
"audio_input_failed_enable": "Impossibile abilitare l'ingresso audio: {error}",
|
||||
"audio_input_title": "Ingresso audio (Microfono)",
|
||||
"audio_input_auto_enable_disabled": "Abilitazione automatica microfono disabilitata",
|
||||
"audio_input_auto_enable_enabled": "Abilitazione automatica microfono abilitata",
|
||||
"audio_output_description": "Abilita l'audio dal target agli altoparlanti",
|
||||
"audio_output_disabled": "Uscita audio disabilitata",
|
||||
"audio_output_enabled": "Uscita audio abilitata",
|
||||
|
|
|
|||
|
|
@ -57,8 +57,6 @@
|
|||
"audio_input_failed_disable": "Kunne ikke deaktivere lydinngang: {error}",
|
||||
"audio_input_failed_enable": "Kunne ikke aktivere lydinngang: {error}",
|
||||
"audio_input_title": "Lydinngang (Mikrofon)",
|
||||
"audio_input_auto_enable_disabled": "Automatisk aktivering av mikrofon deaktivert",
|
||||
"audio_input_auto_enable_enabled": "Automatisk aktivering av mikrofon aktivert",
|
||||
"audio_output_description": "Aktiver lyd fra mål til høyttalere",
|
||||
"audio_output_disabled": "Lydutgang deaktivert",
|
||||
"audio_output_enabled": "Lydutgang aktivert",
|
||||
|
|
|
|||
|
|
@ -57,8 +57,6 @@
|
|||
"audio_input_failed_disable": "Det gick inte att inaktivera ljudingången: {error}",
|
||||
"audio_input_failed_enable": "Det gick inte att aktivera ljudingången: {error}",
|
||||
"audio_input_title": "Ljudingång (Mikrofon)",
|
||||
"audio_input_auto_enable_disabled": "Automatisk aktivering av mikrofon inaktiverad",
|
||||
"audio_input_auto_enable_enabled": "Automatisk aktivering av mikrofon aktiverad",
|
||||
"audio_output_description": "Aktivera ljud från mål till högtalare",
|
||||
"audio_output_disabled": "Ljudutgång inaktiverad",
|
||||
"audio_output_enabled": "Ljudutgång aktiverad",
|
||||
|
|
|
|||
|
|
@ -57,8 +57,6 @@
|
|||
"audio_input_failed_disable": "禁用音频输入失败:{error}",
|
||||
"audio_input_failed_enable": "启用音频输入失败:{error}",
|
||||
"audio_input_title": "音频输入(麦克风)",
|
||||
"audio_input_auto_enable_disabled": "自动启用麦克风已禁用",
|
||||
"audio_input_auto_enable_enabled": "自动启用麦克风已启用",
|
||||
"audio_output_description": "启用从目标设备到扬声器的音频",
|
||||
"audio_output_disabled": "音频输出已禁用",
|
||||
"audio_output_enabled": "音频输出已启用",
|
||||
|
|
|
|||
|
|
@ -16,7 +16,6 @@ export default function AudioPopover() {
|
|||
const [audioOutputEnabled, setAudioOutputEnabled] = useState<boolean>(true);
|
||||
const [usbAudioEnabled, setUsbAudioEnabled] = useState<boolean>(false);
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [micLoading, setMicLoading] = useState(false);
|
||||
const isHttps = isSecureContext();
|
||||
|
||||
useEffect(() => {
|
||||
|
|
@ -55,21 +54,6 @@ export default function AudioPopover() {
|
|||
});
|
||||
}, [send]);
|
||||
|
||||
const handleMicrophoneToggle = useCallback((enabled: boolean) => {
|
||||
setMicLoading(true);
|
||||
send("setAudioInputEnabled", { enabled }, (resp: JsonRpcResponse) => {
|
||||
setMicLoading(false);
|
||||
if ("error" in resp) {
|
||||
const errorMsg = enabled
|
||||
? m.audio_input_failed_enable({ error: String(resp.error.data || m.unknown_error()) })
|
||||
: m.audio_input_failed_disable({ error: String(resp.error.data || m.unknown_error()) });
|
||||
notifications.error(errorMsg);
|
||||
} else {
|
||||
setMicrophoneEnabled(enabled);
|
||||
}
|
||||
});
|
||||
}, [send, setMicrophoneEnabled]);
|
||||
|
||||
return (
|
||||
<GridCard>
|
||||
<div className="space-y-4 p-4 py-3">
|
||||
|
|
@ -96,7 +80,6 @@ export default function AudioPopover() {
|
|||
<div className="h-px w-full bg-slate-800/10 dark:bg-slate-300/20" />
|
||||
|
||||
<SettingsItem
|
||||
loading={micLoading}
|
||||
title={m.audio_microphone_title()}
|
||||
description={m.audio_microphone_description()}
|
||||
badge={!isHttps ? m.audio_https_only() : undefined}
|
||||
|
|
@ -106,7 +89,7 @@ export default function AudioPopover() {
|
|||
<Checkbox
|
||||
checked={microphoneEnabled}
|
||||
disabled={!isHttps}
|
||||
onChange={(e) => handleMicrophoneToggle(e.target.checked)}
|
||||
onChange={(e) => setMicrophoneEnabled(e.target.checked)}
|
||||
/>
|
||||
</SettingsItem>
|
||||
</>
|
||||
|
|
|
|||
|
|
@ -382,8 +382,6 @@ export interface SettingsState {
|
|||
setMicrophoneEnabled: (enabled: boolean) => void;
|
||||
audioInputAutoEnable: boolean;
|
||||
setAudioInputAutoEnable: (enabled: boolean) => void;
|
||||
|
||||
resetMicrophoneState: () => void;
|
||||
}
|
||||
|
||||
export const useSettingsStore = create(
|
||||
|
|
@ -432,14 +430,13 @@ export const useSettingsStore = create(
|
|||
videoContrast: 1.0,
|
||||
setVideoContrast: (value: number) => set({ videoContrast: value }),
|
||||
|
||||
// Audio settings with defaults
|
||||
audioOutputEnabled: true,
|
||||
setAudioOutputEnabled: (enabled: boolean) => set({ audioOutputEnabled: enabled }),
|
||||
microphoneEnabled: false,
|
||||
setMicrophoneEnabled: (enabled: boolean) => set({ microphoneEnabled: enabled }),
|
||||
audioInputAutoEnable: false,
|
||||
setAudioInputAutoEnable: (enabled: boolean) => set({ audioInputAutoEnable: enabled }),
|
||||
|
||||
resetMicrophoneState: () => set({ microphoneEnabled: false }),
|
||||
}),
|
||||
{
|
||||
name: "settings",
|
||||
|
|
|
|||
|
|
@ -16,15 +16,20 @@ export default function SettingsAudioRoute() {
|
|||
|
||||
useEffect(() => {
|
||||
send("getAudioOutputEnabled", {}, (resp: JsonRpcResponse) => {
|
||||
if ("error" in resp) return;
|
||||
if ("error" in resp) {
|
||||
return;
|
||||
}
|
||||
settings.setAudioOutputEnabled(resp.result as boolean);
|
||||
});
|
||||
|
||||
send("getAudioInputAutoEnable", {}, (resp: JsonRpcResponse) => {
|
||||
if ("error" in resp) return;
|
||||
send("getAudioInputEnabled", {}, (resp: JsonRpcResponse) => {
|
||||
if ("error" in resp) {
|
||||
return;
|
||||
}
|
||||
settings.setAudioInputAutoEnable(resp.result as boolean);
|
||||
});
|
||||
}, [send, settings]);
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [send]);
|
||||
|
||||
const handleAudioOutputEnabledChange = (enabled: boolean) => {
|
||||
send("setAudioOutputEnabled", { enabled }, (resp: JsonRpcResponse) => {
|
||||
|
|
@ -42,15 +47,16 @@ export default function SettingsAudioRoute() {
|
|||
};
|
||||
|
||||
const handleAudioInputAutoEnableChange = (enabled: boolean) => {
|
||||
send("setAudioInputAutoEnable", { enabled }, (resp: JsonRpcResponse) => {
|
||||
send("setAudioInputEnabled", { enabled }, (resp: JsonRpcResponse) => {
|
||||
if ("error" in resp) {
|
||||
notifications.error(String(resp.error.data || m.unknown_error()));
|
||||
const errorMsg = enabled
|
||||
? m.audio_input_failed_enable({ error: String(resp.error.data || m.unknown_error()) })
|
||||
: m.audio_input_failed_disable({ error: String(resp.error.data || m.unknown_error()) });
|
||||
notifications.error(errorMsg);
|
||||
return;
|
||||
}
|
||||
settings.setAudioInputAutoEnable(enabled);
|
||||
const successMsg = enabled
|
||||
? m.audio_input_auto_enable_enabled()
|
||||
: m.audio_input_auto_enable_disabled();
|
||||
const successMsg = enabled ? m.audio_input_enabled() : m.audio_input_disabled();
|
||||
notifications.success(successMsg);
|
||||
});
|
||||
};
|
||||
|
|
|
|||
|
|
@ -538,6 +538,11 @@ export default function KvmIdRoute() {
|
|||
const audioTrans = pc.addTransceiver("audio", { direction: "sendrecv" });
|
||||
setAudioTransceiver(audioTrans);
|
||||
|
||||
// Enable microphone if auto-enable is on (only works over HTTPS or localhost)
|
||||
if (audioInputAutoEnable && isSecureContext()) {
|
||||
setMicrophoneEnabled(true);
|
||||
}
|
||||
|
||||
const rpcDataChannel = pc.createDataChannel("rpc");
|
||||
rpcDataChannel.onclose = () => console.log("rpcDataChannel has closed");
|
||||
rpcDataChannel.onerror = (ev: Event) => console.error(`Error on DataChannel '${rpcDataChannel.label}': ${ev}`);
|
||||
|
|
@ -601,11 +606,14 @@ export default function KvmIdRoute() {
|
|||
}
|
||||
}, [peerConnectionState, cleanupAndStopReconnecting]);
|
||||
|
||||
// Handle dynamic microphone enable/disable
|
||||
useEffect(() => {
|
||||
if (!audioTransceiver || !peerConnection) return;
|
||||
|
||||
if (microphoneEnabled) {
|
||||
navigator.mediaDevices?.getUserMedia({
|
||||
// Request microphone access
|
||||
if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
|
||||
navigator.mediaDevices.getUserMedia({
|
||||
audio: {
|
||||
echoCancellation: true,
|
||||
noiseSuppression: true,
|
||||
|
|
@ -616,24 +624,29 @@ export default function KvmIdRoute() {
|
|||
const audioTrack = stream.getAudioTracks()[0];
|
||||
if (audioTrack && audioTransceiver.sender) {
|
||||
audioTransceiver.sender.replaceTrack(audioTrack);
|
||||
console.log("Microphone enabled");
|
||||
}
|
||||
}).catch(() => {
|
||||
}).catch((err) => {
|
||||
console.warn("Microphone access denied or unavailable:", err.message);
|
||||
setMicrophoneEnabled(false);
|
||||
});
|
||||
}
|
||||
} else {
|
||||
// Disable microphone by removing the track
|
||||
if (audioTransceiver.sender.track) {
|
||||
audioTransceiver.sender.track.stop();
|
||||
audioTransceiver.sender.replaceTrack(null);
|
||||
console.log("Microphone disabled");
|
||||
}
|
||||
}
|
||||
}, [microphoneEnabled, audioTransceiver, peerConnection]);
|
||||
}, [microphoneEnabled, audioTransceiver, peerConnection, setMicrophoneEnabled]);
|
||||
|
||||
// Auto-enable microphone when setting is loaded from backend
|
||||
useEffect(() => {
|
||||
if (!audioTransceiver || !peerConnection || !audioInputAutoEnable || microphoneEnabled) return;
|
||||
if (isSecureContext()) {
|
||||
if (audioInputAutoEnable && audioTransceiver && peerConnection && !microphoneEnabled && isSecureContext()) {
|
||||
setMicrophoneEnabled(true);
|
||||
}
|
||||
}, [audioInputAutoEnable, audioTransceiver, peerConnection, microphoneEnabled]);
|
||||
}, [audioInputAutoEnable, audioTransceiver, peerConnection, microphoneEnabled, setMicrophoneEnabled]);
|
||||
|
||||
// Cleanup effect
|
||||
const { clearInboundRtpStats, clearCandidatePairStats } = useRTCStore();
|
||||
|
|
@ -793,6 +806,15 @@ export default function KvmIdRoute() {
|
|||
|
||||
const { send } = useJsonRpc(onJsonRpcRequest);
|
||||
|
||||
// Load audio input auto-enable setting from backend on mount
|
||||
useEffect(() => {
|
||||
send("getAudioInputEnabled", {}, (resp: JsonRpcResponse) => {
|
||||
if ("error" in resp) {
|
||||
return;
|
||||
}
|
||||
setAudioInputAutoEnable(resp.result as boolean);
|
||||
});
|
||||
}, [send, setAudioInputAutoEnable]);
|
||||
|
||||
useEffect(() => {
|
||||
if (rpcDataChannel?.readyState !== "open") return;
|
||||
|
|
@ -805,15 +827,6 @@ export default function KvmIdRoute() {
|
|||
});
|
||||
}, [rpcDataChannel?.readyState, send, setHdmiState]);
|
||||
|
||||
// Load audio input auto-enable preference from backend
|
||||
useEffect(() => {
|
||||
if (rpcDataChannel?.readyState !== "open") return;
|
||||
send("getAudioInputAutoEnable", {}, (resp: JsonRpcResponse) => {
|
||||
if ("error" in resp) return;
|
||||
setAudioInputAutoEnable(resp.result as boolean);
|
||||
});
|
||||
}, [rpcDataChannel?.readyState, send, setAudioInputAutoEnable]);
|
||||
|
||||
const [needLedState, setNeedLedState] = useState(true);
|
||||
|
||||
// request keyboard led state from the device
|
||||
|
|
|
|||
|
|
@ -16,11 +16,8 @@ import { DeviceStatus } from "@routes/welcome-local";
|
|||
import { DEVICE_API } from "@/ui.config";
|
||||
import api from "@/api";
|
||||
import { m } from "@localizations/messages.js";
|
||||
import { useSettingsStore } from "@/hooks/stores";
|
||||
|
||||
const loader: LoaderFunction = async () => {
|
||||
useSettingsStore.getState().resetMicrophoneState();
|
||||
|
||||
const res = await api
|
||||
.GET(`${DEVICE_API}/device/status`)
|
||||
.then(res => res.json() as Promise<DeviceStatus>);
|
||||
|
|
|
|||
|
|
@ -1,19 +1,13 @@
|
|||
import { useEffect } from "react";
|
||||
import { useLocation, useSearchParams } from "react-router";
|
||||
|
||||
import { m } from "@localizations/messages.js";
|
||||
import AuthLayout from "@components/AuthLayout";
|
||||
import { useSettingsStore } from "@/hooks/stores";
|
||||
|
||||
export default function LoginRoute() {
|
||||
const [sq] = useSearchParams();
|
||||
const location = useLocation();
|
||||
const deviceId = sq.get("deviceId") || location.state?.deviceId;
|
||||
|
||||
useEffect(() => {
|
||||
useSettingsStore.getState().resetMicrophoneState();
|
||||
}, []);
|
||||
|
||||
if (deviceId) {
|
||||
return (
|
||||
<AuthLayout
|
||||
|
|
|
|||
Loading…
Reference in New Issue