mirror of https://github.com/jetkvm/kvm.git
Refactor: Simplify / rewrite Audio
This commit is contained in:
parent
67447e4e5e
commit
7872ddc8fc
|
|
@ -1,10 +1,6 @@
|
|||
{
|
||||
"name": "JetKVM",
|
||||
"image": "mcr.microsoft.com/devcontainers/go:1.25-trixie",
|
||||
"runArgs": [
|
||||
"--platform=linux/amd64"
|
||||
],
|
||||
"onCreateCommand": ".devcontainer/install-deps.sh",
|
||||
"features": {
|
||||
"ghcr.io/devcontainers/features/node:1": {
|
||||
// Should match what is defined in ui/package.json
|
||||
|
|
@ -14,6 +10,7 @@
|
|||
"mounts": [
|
||||
"source=${localEnv:HOME}/.ssh,target=/home/vscode/.ssh,type=bind,consistency=cached"
|
||||
],
|
||||
"onCreateCommand": ".devcontainer/install-deps.sh",
|
||||
"customizations": {
|
||||
"vscode": {
|
||||
"extensions": [
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ jobs:
|
|||
cache: "npm"
|
||||
cache-dependency-path: "**/package-lock.json"
|
||||
- name: Set up Golang
|
||||
uses: actions/setup-go@v6.0.0
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: "^1.25.1"
|
||||
- name: Build frontend
|
||||
|
|
|
|||
|
|
@ -24,73 +24,14 @@ jobs:
|
|||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: oldstable
|
||||
- name: Setup build environment variables
|
||||
id: build-env
|
||||
run: |
|
||||
# Extract versions from Makefile
|
||||
ALSA_VERSION=$(grep '^ALSA_VERSION' Makefile | cut -d'=' -f2 | tr -d ' ')
|
||||
OPUS_VERSION=$(grep '^OPUS_VERSION' Makefile | cut -d'=' -f2 | tr -d ' ')
|
||||
|
||||
# Define buildkit path
|
||||
BUILDKIT_PATH="/opt/jetkvm-native-buildkit"
|
||||
BUILDKIT_FLAVOR="arm-rockchip830-linux-uclibcgnueabihf"
|
||||
|
||||
# Set environment variables
|
||||
echo "ALSA_VERSION=$ALSA_VERSION" >> $GITHUB_ENV
|
||||
echo "OPUS_VERSION=$OPUS_VERSION" >> $GITHUB_ENV
|
||||
echo "BUILDKIT_PATH=$BUILDKIT_PATH" >> $GITHUB_ENV
|
||||
echo "BUILDKIT_FLAVOR=$BUILDKIT_FLAVOR" >> $GITHUB_ENV
|
||||
|
||||
# Set outputs for use in other steps
|
||||
echo "alsa_version=$ALSA_VERSION" >> $GITHUB_OUTPUT
|
||||
echo "opus_version=$OPUS_VERSION" >> $GITHUB_OUTPUT
|
||||
echo "buildkit_path=$BUILDKIT_PATH" >> $GITHUB_OUTPUT
|
||||
echo "buildkit_flavor=$BUILDKIT_FLAVOR" >> $GITHUB_OUTPUT
|
||||
|
||||
# Set resolved cache path
|
||||
CACHE_PATH="/opt/jetkvm-audio-libs"
|
||||
echo "CACHE_PATH=$CACHE_PATH" >> $GITHUB_ENV
|
||||
echo "cache_path=$CACHE_PATH" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "Extracted ALSA version: $ALSA_VERSION"
|
||||
echo "Extracted Opus version: $OPUS_VERSION"
|
||||
echo "Buildkit path: $BUILDKIT_PATH"
|
||||
echo "Cache path: $CACHE_PATH"
|
||||
- name: Restore audio dependencies cache
|
||||
id: cache-audio-deps
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: ${{ steps.build-env.outputs.cache_path }}
|
||||
key: audio-deps-${{ runner.os }}-alsa-${{ steps.build-env.outputs.alsa_version }}-opus-${{ steps.build-env.outputs.opus_version }}-buildkit
|
||||
- name: Setup development environment
|
||||
if: steps.cache-audio-deps.outputs.cache-hit != 'true'
|
||||
run: make dev_env
|
||||
env:
|
||||
ALSA_VERSION: ${{ env.ALSA_VERSION }}
|
||||
OPUS_VERSION: ${{ env.OPUS_VERSION }}
|
||||
- name: Create empty resource directory
|
||||
run: |
|
||||
mkdir -p static && touch static/.gitkeep
|
||||
- name: Save audio dependencies cache
|
||||
if: always() && steps.cache-audio-deps.outputs.cache-hit != 'true'
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: ${{ steps.build-env.outputs.cache_path }}
|
||||
key: ${{ steps.cache-audio-deps.outputs.cache-primary-key }}
|
||||
- name: Lint
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
with:
|
||||
args: --verbose
|
||||
version: v2.1
|
||||
env:
|
||||
CGO_ENABLED: 1
|
||||
GOOS: linux
|
||||
GOARCH: arm
|
||||
GOARM: 7
|
||||
CC: ${{ steps.build-env.outputs.buildkit_path }}/bin/${{ steps.build-env.outputs.buildkit_flavor }}-gcc
|
||||
PKG_CONFIG_PATH: ${{ steps.build-env.outputs.cache_path }}/alsa-lib-${{ steps.build-env.outputs.alsa_version }}/utils:${{ steps.build-env.outputs.cache_path }}/opus-${{ steps.build-env.outputs.opus_version }}
|
||||
CGO_CFLAGS: "-O3 -mfpu=neon -mtune=cortex-a7 -mfloat-abi=hard -ftree-vectorize -ffast-math -funroll-loops -mvectorize-with-neon-quad -marm -D__ARM_NEON -I${{ steps.build-env.outputs.cache_path }}/alsa-lib-${{ steps.build-env.outputs.alsa_version }}/include -I${{ steps.build-env.outputs.cache_path }}/opus-${{ steps.build-env.outputs.opus_version }}/include -I${{ steps.build-env.outputs.cache_path }}/opus-${{ steps.build-env.outputs.opus_version }}/celt"
|
||||
CGO_LDFLAGS: "-L${{ steps.build-env.outputs.cache_path }}/alsa-lib-${{ steps.build-env.outputs.alsa_version }}/src/.libs -lasound -L${{ steps.build-env.outputs.cache_path }}/opus-${{ steps.build-env.outputs.opus_version }}/.libs -lopus -lm -ldl -static"
|
||||
|
|
|
|||
|
|
@ -104,7 +104,7 @@ jobs:
|
|||
EOF
|
||||
ssh jkci "cat /tmp/device-tests.json" > device-tests.json
|
||||
- name: Set up Golang
|
||||
uses: actions/setup-go@v5.5.0
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: "1.24.4"
|
||||
- name: Golang Test Report
|
||||
|
|
|
|||
|
|
@ -1,13 +1,7 @@
|
|||
bin/*
|
||||
static/*
|
||||
.vscode/
|
||||
tmp/
|
||||
.devcontainer/devcontainer-lock.json
|
||||
.idea
|
||||
.DS_Store
|
||||
*.log
|
||||
*.tmp
|
||||
*.code-workspace
|
||||
|
||||
.cache
|
||||
.vite
|
||||
|
|
@ -20,10 +14,3 @@ node_modules
|
|||
#internal/native/include
|
||||
#internal/native/lib
|
||||
internal/audio/bin/
|
||||
|
||||
# backup files
|
||||
*.bak
|
||||
|
||||
# core dumps
|
||||
core
|
||||
core.*
|
||||
|
|
|
|||
258
DEVELOPMENT.md
258
DEVELOPMENT.md
|
|
@ -11,43 +11,21 @@
|
|||
|
||||
</div>
|
||||
|
||||
|
||||
# JetKVM Development Guide
|
||||
|
||||
|
||||
Welcome to JetKVM development! This guide will help you get started quickly, whether you're fixing bugs, adding features, or just exploring the codebase.
|
||||
|
||||
## Get Started
|
||||
|
||||
|
||||
### Prerequisites
|
||||
- **A JetKVM device** (for full development)
|
||||
- **[Go 1.24.4+](https://go.dev/doc/install)** and **[Node.js 22.15.0](https://nodejs.org/en/download/)**
|
||||
- **[Git](https://git-scm.com/downloads)** for version control
|
||||
- **[SSH access](https://jetkvm.com/docs/advanced-usage/developing#developer-mode)** to your JetKVM device
|
||||
- **Audio build dependencies:**
|
||||
- **New:** The audio system uses a dual-subprocess architecture with CGO, ALSA, and Opus integration. The audio dependencies are automatically installed by the devcontainer or can be manually built using `.devcontainer/install_audio_deps.sh`.
|
||||
|
||||
|
||||
### Development Environment
|
||||
|
||||
**Recommended:** Development is best done on **Linux** or **macOS**.
|
||||
|
||||
#### Apple Silicon (M1/M2/M3) Mac Users
|
||||
|
||||
If you are developing on an Apple Silicon Mac, you should use a devcontainer to ensure compatibility with the JetKVM build environment (which targets linux/amd64 and ARM). There are two main options:
|
||||
|
||||
- **VS Code Dev Containers**: Open the project in VS Code and use the built-in Dev Containers support. The configuration in `.devcontainer/devcontainer.json` is set to use `linux/amd64` platform.
|
||||
- **Devpod**: [Devpod](https://devpod.sh/) is a fast, open-source tool for running devcontainers anywhere. If you use Devpod, go to **Settings → Experimental → Additional Environmental Variables** and add:
|
||||
- `DOCKER_DEFAULT_PLATFORM=linux/amd64`
|
||||
This ensures all builds run in the correct architecture.
|
||||
- **devcontainer CLI**: You can also use the [devcontainer CLI](https://github.com/devcontainers/cli) to launch the devcontainer from the terminal.
|
||||
|
||||
**Important:** If you're switching from an ARM64 devcontainer or updating the platform settings, you'll need to rebuild the devcontainer completely:
|
||||
- In VS Code: Run "Dev Containers: Rebuild Container" from the command palette
|
||||
- With devcontainer CLI: Use `devcontainer up --build`
|
||||
|
||||
This approach ensures compatibility with all shell scripts, build tools, and cross-compilation steps used in the project.
|
||||
**Recommended:** Development is best done on **Linux** or **macOS**.
|
||||
|
||||
If you're using Windows, we strongly recommend using **WSL (Windows Subsystem for Linux)** for the best development experience:
|
||||
- [Install WSL on Windows](https://docs.microsoft.com/en-us/windows/wsl/install)
|
||||
|
|
@ -55,7 +33,6 @@ If you're using Windows, we strongly recommend using **WSL (Windows Subsystem fo
|
|||
|
||||
This ensures compatibility with shell scripts and build tools used in the project.
|
||||
|
||||
|
||||
### Project Setup
|
||||
|
||||
1. **Clone the repository:**
|
||||
|
|
@ -69,25 +46,16 @@ This ensures compatibility with shell scripts and build tools used in the projec
|
|||
go version && node --version
|
||||
```
|
||||
|
||||
3. **Set up the cross-compiler and audio dependencies:**
|
||||
```bash
|
||||
make dev_env
|
||||
# This will install audio dependencies using .devcontainer/install_audio_deps.sh
|
||||
# It will build ALSA/Opus static libs in /opt/jetkvm-audio-libs using the buildkit from /opt/jetkvm-native-buildkit
|
||||
#
|
||||
# **Note:** This is required for the audio subprocess architecture. If you skip this step, builds will not succeed.
|
||||
```
|
||||
3. **Find your JetKVM IP address** (check your router or device screen)
|
||||
|
||||
4. **Find your JetKVM IP address** (check your router or device screen)
|
||||
|
||||
5. **Deploy and test:**
|
||||
4. **Deploy and test:**
|
||||
```bash
|
||||
./dev_deploy.sh -r 192.168.1.100 # Replace with your device IP
|
||||
```
|
||||
|
||||
6. **Open in browser:** `http://192.168.1.100`
|
||||
5. **Open in browser:** `http://192.168.1.100`
|
||||
|
||||
That's it! You're now running your own development version of JetKVM, **with bidirectional audio streaming using the dual-subprocess architecture.**
|
||||
That's it! You're now running your own development version of JetKVM.
|
||||
|
||||
---
|
||||
|
||||
|
|
@ -103,15 +71,13 @@ npm install
|
|||
|
||||
Now edit files in `ui/src/` and see changes live in your browser!
|
||||
|
||||
|
||||
### Modify the backend (including audio)
|
||||
### Modify the backend
|
||||
|
||||
```bash
|
||||
# Edit Go files (config.go, web.go, internal/audio, etc.)
|
||||
# Edit Go files (config.go, web.go, etc.)
|
||||
./dev_deploy.sh -r 192.168.1.100 --skip-ui-build
|
||||
```
|
||||
|
||||
|
||||
### Run tests
|
||||
|
||||
```bash
|
||||
|
|
@ -127,32 +93,46 @@ tail -f /var/log/jetkvm.log
|
|||
|
||||
---
|
||||
|
||||
|
||||
## Project Layout
|
||||
|
||||
```
|
||||
/kvm/
|
||||
├── main.go # App entry point
|
||||
├── config.go # Settings & configuration
|
||||
├── web.go # API endpoints
|
||||
├── ui/ # React frontend
|
||||
│ ├── src/routes/ # Pages (login, settings, etc.)
|
||||
│ └── src/components/ # UI components
|
||||
├── internal/ # Internal Go packages
|
||||
│ └── audio/ # Audio Processing Layer (CGO, ALSA, Opus)
|
||||
│ ├── native/ # CGO / Native code glue layer
|
||||
│ ├── native/cgo/ # C files for the native library (HDMI, Touchscreen, etc.)
|
||||
│ ├── native/eez/ # EEZ Studio Project files (for Touchscreen)
|
||||
│ ├── hidrpc/ # HIDRPC implementation for HID devices (keyboard, mouse, etc.)
|
||||
│ ├── logging/ # Logging implementation
|
||||
│ ├── usbgadget/ # USB gadget
|
||||
│ └── websecurity/ # TLS certificate management
|
||||
└── resource # netboot iso and other resources
|
||||
├── main.go # App entry point
|
||||
├── config.go # Settings & configuration
|
||||
├── display.go # Device UI control
|
||||
├── web.go # API endpoints
|
||||
├── cmd/ # Command line main
|
||||
├── internal/ # Internal Go packages
|
||||
│ ├── confparser/ # Configuration file implementation
|
||||
│ ├── hidrpc/ # HIDRPC implementation for HID devices (keyboard, mouse, etc.)
|
||||
│ ├── logging/ # Logging implementation
|
||||
│ ├── mdns/ # mDNS implementation
|
||||
│ ├── native/ # CGO / Native code glue layer (on-device hardware)
|
||||
│ │ ├── cgo/ # C files for the native library (HDMI, Touchscreen, etc.)
|
||||
│ │ └── eez/ # EEZ Studio Project files (for Touchscreen)
|
||||
│ ├── network/ # Network implementation
|
||||
│ ├── timesync/ # Time sync/NTP implementation
|
||||
│ ├── tzdata/ # Timezone data and generation
|
||||
│ ├── udhcpc/ # DHCP implementation
|
||||
│ ├── usbgadget/ # USB gadget
|
||||
│ ├── utils/ # SSH handling
|
||||
│ └── websecure/ # TLS certificate management
|
||||
├── resource/ # netboot iso and other resources
|
||||
├── scripts/ # Bash shell scripts for building and deploying
|
||||
└── static/ # (react client build output)
|
||||
└── ui/ # React frontend
|
||||
├── public/ # UI website static images and fonts
|
||||
└── src/ # Client React UI
|
||||
├── assets/ # UI in-page images
|
||||
├── components/ # UI components
|
||||
├── hooks/ # Hooks (stores, RPC handling, virtual devices)
|
||||
├── keyboardLayouts/ # Keyboard layout definitions
|
||||
├── providers/ # Feature flags
|
||||
└── routes/ # Pages (login, settings, etc.)
|
||||
```
|
||||
|
||||
**Key files for beginners:**
|
||||
|
||||
- `internal/audio/` - [NEW] Dual-subprocess audio architecture (CGO, ALSA, Opus)
|
||||
- `web.go` - Add new API endpoints here
|
||||
- `config.go` - Add new settings here
|
||||
- `ui/src/routes/` - Add new pages here
|
||||
|
|
@ -187,7 +167,7 @@ Please click the `Build` button in EEZ Studio then run `./dev_deploy.sh -r <YOUR
|
|||
|
||||
### Quick Backend Changes
|
||||
|
||||
*Best for: API, backend, or audio logic changes (including audio subprocess architecture)*
|
||||
*Best for: API or backend logic changes*
|
||||
|
||||
```bash
|
||||
# Skip frontend build for faster deployment
|
||||
|
|
@ -244,100 +224,6 @@ systemctl restart jetkvm
|
|||
cd ui && npm run lint
|
||||
```
|
||||
|
||||
### Essential Makefile Targets
|
||||
|
||||
The project includes several essential Makefile targets for development environment setup, building, and code quality:
|
||||
|
||||
#### Development Environment Setup
|
||||
|
||||
```bash
|
||||
# Set up complete development environment (recommended first step)
|
||||
make dev_env
|
||||
# This runs build_audio_deps + installs Go tools
|
||||
# - Uses buildkit from /opt/jetkvm-native-buildkit for cross-compilation
|
||||
# - Builds ALSA and Opus static libraries for ARM in /opt/jetkvm-audio-libs
|
||||
# - Installs goimports and other Go development tools
|
||||
|
||||
# Build only the audio dependencies
|
||||
make build_audio_deps
|
||||
```
|
||||
|
||||
#### Building
|
||||
|
||||
```bash
|
||||
# Build development version with debug symbols
|
||||
make build_dev
|
||||
# Builds jetkvm_app with version like 0.4.7-dev20241222
|
||||
# Requires: make dev_env (for buildkit and audio dependencies)
|
||||
|
||||
# Build release version (production)
|
||||
make build_release
|
||||
# Builds optimized release version
|
||||
# Requires: make dev_env and frontend build
|
||||
|
||||
# Build test binaries for device testing
|
||||
make build_dev_test
|
||||
# Creates device-tests.tar.gz with all test binaries
|
||||
```
|
||||
|
||||
#### Code Quality and Linting
|
||||
|
||||
```bash
|
||||
# Run both Go and UI linting
|
||||
make lint
|
||||
|
||||
# Run both Go and UI linting with auto-fix
|
||||
make lint-fix
|
||||
|
||||
# Run only Go linting
|
||||
make lint-go
|
||||
|
||||
# Run only Go linting with auto-fix
|
||||
make lint-go-fix
|
||||
|
||||
# Run only UI linting
|
||||
make lint-ui
|
||||
|
||||
# Run only UI linting with auto-fix
|
||||
make lint-ui-fix
|
||||
```
|
||||
|
||||
**Note:** The Go linting targets (`lint-go`, `lint-go-fix`, and the combined `lint`/`lint-fix` targets) require audio dependencies. Run `make dev_env` first if you haven't already.
|
||||
|
||||
### Development Deployment Script
|
||||
|
||||
The `dev_deploy.sh` script is the primary tool for deploying your development changes to a JetKVM device:
|
||||
|
||||
```bash
|
||||
# Basic deployment (builds and deploys everything)
|
||||
./dev_deploy.sh -r 192.168.1.100
|
||||
|
||||
# Skip UI build for faster backend-only deployment
|
||||
./dev_deploy.sh -r 192.168.1.100 --skip-ui-build
|
||||
|
||||
# Run Go tests on the device after deployment
|
||||
./dev_deploy.sh -r 192.168.1.100 --run-go-tests
|
||||
|
||||
# Deploy with release build and install
|
||||
./dev_deploy.sh -r 192.168.1.100 -i
|
||||
|
||||
# View all available options
|
||||
./dev_deploy.sh --help
|
||||
```
|
||||
|
||||
**Key features:**
|
||||
- Automatically builds the Go backend with proper cross-compilation
|
||||
- Optionally builds the React frontend (unless `--skip-ui-build`)
|
||||
- Deploys binaries to the device via SSH/SCP
|
||||
- Restarts the JetKVM service
|
||||
- Can run tests on the device
|
||||
- Supports custom SSH user and various deployment options
|
||||
|
||||
**Requirements:**
|
||||
- SSH access to your JetKVM device
|
||||
- `make dev_env` must be run first (for buildkit and audio dependencies)
|
||||
- Device IP address or hostname
|
||||
|
||||
### API Testing
|
||||
|
||||
```bash
|
||||
|
|
@ -349,8 +235,7 @@ curl -X POST http://<IP>/auth/password-local \
|
|||
|
||||
---
|
||||
|
||||
|
||||
### Common Issues & Solutions
|
||||
## Common Issues & Solutions
|
||||
|
||||
### "Build failed" or "Permission denied"
|
||||
|
||||
|
|
@ -362,8 +247,6 @@ ssh root@<IP> chmod +x /userdata/jetkvm/bin/jetkvm_app_debug
|
|||
go clean -modcache
|
||||
go mod tidy
|
||||
make build_dev
|
||||
# If you see errors about missing ALSA/Opus or toolchain, run:
|
||||
make dev_env # Required for audio subprocess architecture
|
||||
```
|
||||
|
||||
### "Can't connect to device"
|
||||
|
|
@ -376,15 +259,6 @@ ping <IP>
|
|||
ssh root@<IP> echo "Connection OK"
|
||||
```
|
||||
|
||||
|
||||
### "Audio not working"
|
||||
|
||||
```bash
|
||||
# Make sure you have run:
|
||||
make dev_env
|
||||
# # If you see errors about ALSA/Opus, check logs and re-run: make build_audio_deps
|
||||
```
|
||||
|
||||
### "Frontend not updating"
|
||||
|
||||
```bash
|
||||
|
|
@ -395,31 +269,69 @@ rm -rf node_modules
|
|||
npm install
|
||||
```
|
||||
|
||||
### "Device UI Fails to Build"
|
||||
|
||||
If while trying to build you run into an error message similar to :
|
||||
```plaintext
|
||||
In file included from /workspaces/kvm/internal/native/cgo/ctrl.c:15:
|
||||
/workspaces/kvm/internal/native/cgo/ui_index.h:4:10: fatal error: ui/ui.h: No such file or directory
|
||||
#include "ui/ui.h"
|
||||
^~~~~~~~~
|
||||
compilation terminated.
|
||||
```
|
||||
This means that your system didn't create the directory-link to from _./internal/native/cgo/ui_ to ./internal/native/eez/src/ui when the repository was checked out. You can verify this is the case if _./internal/native/cgo/ui_ appears as a plain text file with only the textual contents:
|
||||
```plaintext
|
||||
../eez/src/ui
|
||||
```
|
||||
|
||||
If this happens to you need to [enable git creation of symbolic links](https://stackoverflow.com/a/59761201/2076) either globally or for the KVM repository:
|
||||
```bash
|
||||
# Globally enable git to create symlinks
|
||||
git config --global core.symlinks true
|
||||
git restore internal/native/cgo/ui
|
||||
```
|
||||
```bash
|
||||
# Enable git to create symlinks only in this project
|
||||
git config core.symlinks true
|
||||
git restore internal/native/cgo/ui
|
||||
```
|
||||
|
||||
Or if you want to manually create the symlink use:
|
||||
```bash
|
||||
# linux
|
||||
cd internal/native/cgo
|
||||
rm ui
|
||||
ln -s ../eez/src/ui ui
|
||||
```
|
||||
```dos
|
||||
rem Windows
|
||||
cd internal/native/cgo
|
||||
del ui
|
||||
mklink /d ui ..\eez\src\ui
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Next Steps
|
||||
|
||||
|
||||
### Adding a New Feature
|
||||
|
||||
1. **Backend:** Add API endpoint in `web.go` or extend audio in `internal/audio/`
|
||||
1. **Backend:** Add API endpoint in `web.go`
|
||||
2. **Config:** Add settings in `config.go`
|
||||
3. **Frontend:** Add UI in `ui/src/routes/`
|
||||
4. **Test:** Deploy and test with `./dev_deploy.sh`
|
||||
|
||||
|
||||
### Code Style
|
||||
|
||||
- **Go:** Follow standard Go conventions
|
||||
- **TypeScript:** Use TypeScript for type safety
|
||||
- **React:** Keep components small and reusable
|
||||
- **Audio/CGO:** Keep C/Go integration minimal, robust, and well-documented. Use zerolog for all logging.
|
||||
|
||||
### Environment Variables
|
||||
|
||||
```bash
|
||||
# Enable debug logging
|
||||
export LOG_TRACE_SCOPES="jetkvm,cloud,websocket,native,jsonrpc,audio"
|
||||
export LOG_TRACE_SCOPES="jetkvm,cloud,websocket,native,jsonrpc"
|
||||
|
||||
# Frontend development
|
||||
export JETKVM_PROXY_URL="ws://<IP>"
|
||||
|
|
@ -471,7 +383,7 @@ curl http://api:$JETKVM_PASSWORD@YOUR_DEVICE_IP/developer/pprof/
|
|||
|
||||
```bash
|
||||
# Enable trace logging (useful for debugging)
|
||||
export LOG_TRACE_SCOPES="jetkvm,cloud,websocket,native,jsonrpc,audio"
|
||||
export LOG_TRACE_SCOPES="jetkvm,cloud,websocket,native,jsonrpc"
|
||||
|
||||
# For frontend development
|
||||
export JETKVM_PROXY_URL="ws://<JETKVM_IP>"
|
||||
|
|
|
|||
20
Makefile
20
Makefile
|
|
@ -40,8 +40,12 @@ export GOARCH := arm
|
|||
export GOARM := 7
|
||||
export CC := $(BUILDKIT_PATH)/bin/$(BUILDKIT_FLAVOR)-gcc
|
||||
export CGO_ENABLED := 1
|
||||
export CGO_CFLAGS := $(OPTIM_CFLAGS) -I$(BUILDKIT_PATH)/$(BUILDKIT_FLAVOR)/include -I$(BUILDKIT_PATH)/$(BUILDKIT_FLAVOR)/sysroot/usr/include -I$(AUDIO_LIBS_DIR)/alsa-lib-$(ALSA_VERSION)/include -I$(AUDIO_LIBS_DIR)/opus-$(OPUS_VERSION)/include -I$(AUDIO_LIBS_DIR)/opus-$(OPUS_VERSION)/celt
|
||||
export CGO_LDFLAGS := -L$(BUILDKIT_PATH)/$(BUILDKIT_FLAVOR)/lib -L$(BUILDKIT_PATH)/$(BUILDKIT_FLAVOR)/sysroot/usr/lib -lrockit -lrockchip_mpp -lrga -lpthread -L$(AUDIO_LIBS_DIR)/alsa-lib-$(ALSA_VERSION)/src/.libs -lasound -L$(AUDIO_LIBS_DIR)/opus-$(OPUS_VERSION)/.libs -lopus -lm -ldl
|
||||
export CGO_CFLAGS := $(OPTIM_CFLAGS) -I$(BUILDKIT_PATH)/$(BUILDKIT_FLAVOR)/include -I$(BUILDKIT_PATH)/$(BUILDKIT_FLAVOR)/sysroot/usr/include
|
||||
export CGO_LDFLAGS := -L$(BUILDKIT_PATH)/$(BUILDKIT_FLAVOR)/lib -L$(BUILDKIT_PATH)/$(BUILDKIT_FLAVOR)/sysroot/usr/lib -lrockit -lrockchip_mpp -lrga -lpthread -lm -ldl
|
||||
|
||||
# Audio-specific flags (only used for audio C binaries, NOT for main Go app)
|
||||
AUDIO_CFLAGS := $(CGO_CFLAGS) -I$(AUDIO_LIBS_DIR)/alsa-lib-$(ALSA_VERSION)/include -I$(AUDIO_LIBS_DIR)/opus-$(OPUS_VERSION)/include -I$(AUDIO_LIBS_DIR)/opus-$(OPUS_VERSION)/celt
|
||||
AUDIO_LDFLAGS := $(AUDIO_LIBS_DIR)/alsa-lib-$(ALSA_VERSION)/src/.libs/libasound.a $(AUDIO_LIBS_DIR)/opus-$(OPUS_VERSION)/.libs/libopus.a -lm -ldl -lpthread
|
||||
|
||||
PROMETHEUS_TAG := github.com/prometheus/common/version
|
||||
KVM_PKG_NAME := github.com/jetkvm/kvm
|
||||
|
|
@ -93,15 +97,15 @@ build_audio_output: build_audio_deps
|
|||
@if [ "$(SKIP_AUDIO_BINARIES_IF_EXISTS)" = "1" ] && [ -f "$(BIN_DIR)/jetkvm_audio_output" ]; then \
|
||||
echo "jetkvm_audio_output already exists, skipping build..."; \
|
||||
else \
|
||||
echo "Building audio output binary..."; \
|
||||
echo "Building audio output binary (100% static)..."; \
|
||||
mkdir -p $(BIN_DIR); \
|
||||
$(CC) $(CGO_CFLAGS) \
|
||||
$(CC) $(AUDIO_CFLAGS) -static \
|
||||
-o $(BIN_DIR)/jetkvm_audio_output \
|
||||
internal/audio/c/jetkvm_audio_output.c \
|
||||
internal/audio/c/ipc_protocol.c \
|
||||
internal/audio/c/audio_common.c \
|
||||
internal/audio/c/audio.c \
|
||||
$(CGO_LDFLAGS); \
|
||||
$(AUDIO_LDFLAGS); \
|
||||
fi
|
||||
|
||||
# Build audio input C binary (IPC → Opus decode → ALSA playback)
|
||||
|
|
@ -109,15 +113,15 @@ build_audio_input: build_audio_deps
|
|||
@if [ "$(SKIP_AUDIO_BINARIES_IF_EXISTS)" = "1" ] && [ -f "$(BIN_DIR)/jetkvm_audio_input" ]; then \
|
||||
echo "jetkvm_audio_input already exists, skipping build..."; \
|
||||
else \
|
||||
echo "Building audio input binary..."; \
|
||||
echo "Building audio input binary (100% static)..."; \
|
||||
mkdir -p $(BIN_DIR); \
|
||||
$(CC) $(CGO_CFLAGS) \
|
||||
$(CC) $(AUDIO_CFLAGS) -static \
|
||||
-o $(BIN_DIR)/jetkvm_audio_input \
|
||||
internal/audio/c/jetkvm_audio_input.c \
|
||||
internal/audio/c/ipc_protocol.c \
|
||||
internal/audio/c/audio_common.c \
|
||||
internal/audio/c/audio.c \
|
||||
$(CGO_LDFLAGS); \
|
||||
$(AUDIO_LDFLAGS); \
|
||||
fi
|
||||
|
||||
# Build both audio binaries and copy to embed location
|
||||
|
|
|
|||
22
README.md
22
README.md
|
|
@ -11,20 +11,13 @@
|
|||
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
JetKVM is a high-performance, open-source KVM over IP (Keyboard, Video, Mouse, Audio) solution designed for efficient remote management of computers, servers, and workstations. Whether you're dealing with boot failures, installing a new operating system, adjusting BIOS settings, or simply taking control of a machine from afar, JetKVM provides the tools to get it done effectively.
|
||||
|
||||
|
||||
|
||||
|
||||
JetKVM is a high-performance, open-source KVM over IP (Keyboard, Video, Mouse) solution designed for efficient remote management of computers, servers, and workstations. Whether you're dealing with boot failures, installing a new operating system, adjusting BIOS settings, or simply taking control of a machine from afar, JetKVM provides the tools to get it done effectively.
|
||||
|
||||
## Features
|
||||
|
||||
- **Ultra-low Latency** - 1080p@60FPS video with 30-60ms latency using H.264 encoding. Smooth mouse, keyboard, and audio for responsive remote control.
|
||||
- **First-Class Audio Support** - JetKVM supports bidirectional, low-latency audio streaming using a dual-subprocess architecture with ALSA and Opus integration via CGO. Features both audio output (PC→Browser) and audio input (Browser→PC) with dedicated subprocesses for optimal performance and isolation.
|
||||
- **Ultra-low Latency** - 1080p@60FPS video with 30-60ms latency using H.264 encoding. Smooth mouse and keyboard interaction for responsive remote control.
|
||||
- **Free & Optional Remote Access** - Remote management via JetKVM Cloud using WebRTC.
|
||||
- **Open-source software** - Written in Golang (with CGO for audio) on Linux. Easily customizable through SSH access to the JetKVM device.
|
||||
- **Open-source software** - Written in Golang on Linux. Easily customizable through SSH access to the JetKVM device.
|
||||
|
||||
## Contributing
|
||||
|
||||
|
|
@ -40,19 +33,18 @@ If you've found an issue and want to report it, please check our [Issues](https:
|
|||
|
||||
# Development
|
||||
|
||||
JetKVM is written in Go & TypeScript, with some C for low-level integration
|
||||
JetKVM is written in Go & TypeScript. with some bits and pieces written in C. An intermediate level of Go & TypeScript knowledge is recommended for comfortable programming.
|
||||
|
||||
The project contains two main parts: the backend software (Go, CGO) that runs on the KVM device, and the frontend software (React/TypeScript) that is served by the KVM device and the cloud.
|
||||
The project contains two main parts, the backend software that runs on the KVM device and the frontend software that is served by the KVM device, and also the cloud.
|
||||
|
||||
For comprehensive development information, including setup, testing, debugging, and contribution guidelines, see **[DEVELOPMENT.md](DEVELOPMENT.md)**.
|
||||
|
||||
For quick device development, use the `./dev_deploy.sh` script. It will build the frontend and backend and deploy them to the local KVM device. Run `./dev_deploy.sh --help` for more information.
|
||||
|
||||
|
||||
## Backend
|
||||
|
||||
The backend is written in Go and is responsible for KVM device management, audio/video streaming, the cloud API, and the cloud web. **Audio uses dedicated subprocesses for both output and input streams, with CGO-based ALSA and Opus processing, IPC communication via Unix sockets, and comprehensive process supervision for reliability.**
|
||||
The backend is written in Go and is responsible for the KVM device management, the cloud API and the cloud web.
|
||||
|
||||
## Frontend
|
||||
|
||||
The frontend is written in React and TypeScript and is served by the KVM device. It has three build targets: `device`, `development`, and `production`. Development is used for the cloud version on your local machine, device is used for building the frontend for the KVM device, and production is used for building the frontend for the cloud.
|
||||
The frontend is written in React and TypeScript and is served by the KVM device. It has three build targets: `device`, `development` and `production`. Development is used for development of the cloud version on your local machine, device is used for building the frontend for the KVM device and production is used for building the frontend for the cloud.
|
||||
|
|
|
|||
|
|
@ -0,0 +1,351 @@
|
|||
package kvm
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/jetkvm/kvm/internal/audio"
|
||||
"github.com/jetkvm/kvm/internal/logging"
|
||||
"github.com/pion/webrtc/v4"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
const (
|
||||
socketPathOutput = "/var/run/audio_output.sock"
|
||||
socketPathInput = "/var/run/audio_input.sock"
|
||||
)
|
||||
|
||||
var (
|
||||
audioMutex sync.Mutex
|
||||
outputSupervisor *audio.Supervisor
|
||||
inputSupervisor *audio.Supervisor
|
||||
outputClient *audio.IPCClient
|
||||
inputClient *audio.IPCClient
|
||||
outputRelay *audio.OutputRelay
|
||||
inputRelay *audio.InputRelay
|
||||
audioInitialized bool
|
||||
activeConnections atomic.Int32
|
||||
audioLogger zerolog.Logger
|
||||
currentAudioTrack *webrtc.TrackLocalStaticSample
|
||||
inputTrackHandling atomic.Bool
|
||||
useUSBForAudioOutput bool
|
||||
audioOutputEnabled atomic.Bool
|
||||
audioInputEnabled atomic.Bool
|
||||
)
|
||||
|
||||
func initAudio() {
|
||||
audioLogger = logging.GetDefaultLogger().With().Str("component", "audio-manager").Logger()
|
||||
|
||||
if err := audio.ExtractEmbeddedBinaries(); err != nil {
|
||||
audioLogger.Error().Err(err).Msg("Failed to extract audio binaries")
|
||||
return
|
||||
}
|
||||
|
||||
// Load audio output source from config
|
||||
ensureConfigLoaded()
|
||||
useUSBForAudioOutput = config.AudioOutputSource == "usb"
|
||||
|
||||
// Enable both by default
|
||||
audioOutputEnabled.Store(true)
|
||||
audioInputEnabled.Store(true)
|
||||
|
||||
audioLogger.Debug().
|
||||
Str("source", config.AudioOutputSource).
|
||||
Msg("Audio subsystem initialized")
|
||||
audioInitialized = true
|
||||
}
|
||||
|
||||
// startAudioSubprocesses starts audio subprocesses and relays (skips already running ones)
|
||||
func startAudioSubprocesses() error {
|
||||
audioMutex.Lock()
|
||||
defer audioMutex.Unlock()
|
||||
|
||||
if !audioInitialized {
|
||||
audioLogger.Warn().Msg("Audio not initialized, skipping subprocess start")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start output subprocess if not running and enabled
|
||||
if outputSupervisor == nil && audioOutputEnabled.Load() {
|
||||
alsaDevice := "hw:0,0" // HDMI
|
||||
if useUSBForAudioOutput {
|
||||
alsaDevice = "hw:1,0" // USB
|
||||
}
|
||||
|
||||
outputSupervisor = audio.NewSupervisor(
|
||||
"audio-output",
|
||||
audio.GetAudioOutputBinaryPath(),
|
||||
socketPathOutput,
|
||||
[]string{
|
||||
"ALSA_CAPTURE_DEVICE=" + alsaDevice,
|
||||
"OPUS_BITRATE=128000",
|
||||
"OPUS_COMPLEXITY=2",
|
||||
},
|
||||
)
|
||||
|
||||
if err := outputSupervisor.Start(); err != nil {
|
||||
audioLogger.Error().Err(err).Msg("Failed to start audio output supervisor")
|
||||
outputSupervisor = nil
|
||||
return err
|
||||
}
|
||||
|
||||
outputClient = audio.NewIPCClient("audio-output", socketPathOutput, 0x4A4B4F55)
|
||||
|
||||
if currentAudioTrack != nil {
|
||||
outputRelay = audio.NewOutputRelay(outputClient, currentAudioTrack)
|
||||
if err := outputRelay.Start(); err != nil {
|
||||
audioLogger.Error().Err(err).Msg("Failed to start audio output relay")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Start input subprocess if not running, USB audio enabled, and input enabled
|
||||
ensureConfigLoaded()
|
||||
if inputSupervisor == nil && audioInputEnabled.Load() && config.UsbDevices != nil && config.UsbDevices.Audio {
|
||||
inputSupervisor = audio.NewSupervisor(
|
||||
"audio-input",
|
||||
audio.GetAudioInputBinaryPath(),
|
||||
socketPathInput,
|
||||
[]string{
|
||||
"ALSA_PLAYBACK_DEVICE=hw:1,0",
|
||||
"OPUS_BITRATE=128000",
|
||||
},
|
||||
)
|
||||
|
||||
if err := inputSupervisor.Start(); err != nil {
|
||||
audioLogger.Error().Err(err).Msg("Failed to start input supervisor")
|
||||
inputSupervisor = nil
|
||||
return err
|
||||
}
|
||||
|
||||
inputClient = audio.NewIPCClient("audio-input", socketPathInput, 0x4A4B4D49)
|
||||
inputRelay = audio.NewInputRelay(inputClient)
|
||||
if err := inputRelay.Start(); err != nil {
|
||||
audioLogger.Error().Err(err).Msg("Failed to start input relay")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// stopOutputSubprocessLocked stops output subprocess (assumes mutex is held)
|
||||
func stopOutputSubprocessLocked() {
|
||||
if outputRelay != nil {
|
||||
outputRelay.Stop()
|
||||
outputRelay = nil
|
||||
}
|
||||
if outputClient != nil {
|
||||
outputClient.Disconnect()
|
||||
outputClient = nil
|
||||
}
|
||||
if outputSupervisor != nil {
|
||||
outputSupervisor.Stop()
|
||||
outputSupervisor = nil
|
||||
}
|
||||
}
|
||||
|
||||
// stopInputSubprocessLocked stops input subprocess (assumes mutex is held)
|
||||
func stopInputSubprocessLocked() {
|
||||
if inputRelay != nil {
|
||||
inputRelay.Stop()
|
||||
inputRelay = nil
|
||||
}
|
||||
if inputClient != nil {
|
||||
inputClient.Disconnect()
|
||||
inputClient = nil
|
||||
}
|
||||
if inputSupervisor != nil {
|
||||
inputSupervisor.Stop()
|
||||
inputSupervisor = nil
|
||||
}
|
||||
}
|
||||
|
||||
// stopAudioSubprocessesLocked stops all audio subprocesses (assumes mutex is held)
|
||||
func stopAudioSubprocessesLocked() {
|
||||
stopOutputSubprocessLocked()
|
||||
stopInputSubprocessLocked()
|
||||
}
|
||||
|
||||
// stopAudioSubprocesses stops all audio subprocesses
|
||||
func stopAudioSubprocesses() {
|
||||
audioMutex.Lock()
|
||||
defer audioMutex.Unlock()
|
||||
stopAudioSubprocessesLocked()
|
||||
}
|
||||
|
||||
func onWebRTCConnect() {
|
||||
count := activeConnections.Add(1)
|
||||
if count == 1 {
|
||||
if err := startAudioSubprocesses(); err != nil {
|
||||
audioLogger.Error().Err(err).Msg("Failed to start audio subprocesses")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func onWebRTCDisconnect() {
|
||||
count := activeConnections.Add(-1)
|
||||
if count == 0 {
|
||||
// Stop audio immediately to release HDMI audio device which shares hardware with video device
|
||||
stopAudioSubprocesses()
|
||||
}
|
||||
}
|
||||
|
||||
func setAudioTrack(audioTrack *webrtc.TrackLocalStaticSample) {
|
||||
audioMutex.Lock()
|
||||
defer audioMutex.Unlock()
|
||||
|
||||
currentAudioTrack = audioTrack
|
||||
|
||||
if outputRelay != nil {
|
||||
outputRelay.Stop()
|
||||
outputRelay = nil
|
||||
}
|
||||
|
||||
if outputClient != nil {
|
||||
outputRelay = audio.NewOutputRelay(outputClient, audioTrack)
|
||||
if err := outputRelay.Start(); err != nil {
|
||||
audioLogger.Error().Err(err).Msg("Failed to start output relay")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetAudioOutputSource switches between HDMI and USB audio output
|
||||
func SetAudioOutputSource(useUSB bool) error {
|
||||
audioMutex.Lock()
|
||||
defer audioMutex.Unlock()
|
||||
|
||||
if useUSBForAudioOutput == useUSB {
|
||||
return nil
|
||||
}
|
||||
|
||||
useUSBForAudioOutput = useUSB
|
||||
|
||||
ensureConfigLoaded()
|
||||
if useUSB {
|
||||
config.AudioOutputSource = "usb"
|
||||
} else {
|
||||
config.AudioOutputSource = "hdmi"
|
||||
}
|
||||
if err := SaveConfig(); err != nil {
|
||||
audioLogger.Error().Err(err).Msg("Failed to save config")
|
||||
return err
|
||||
}
|
||||
|
||||
stopOutputSubprocessLocked()
|
||||
|
||||
// Restart if there are active connections
|
||||
if activeConnections.Load() > 0 {
|
||||
audioMutex.Unlock()
|
||||
err := startAudioSubprocesses()
|
||||
audioMutex.Lock()
|
||||
if err != nil {
|
||||
audioLogger.Error().Err(err).Msg("Failed to restart audio output")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func setPendingInputTrack(track *webrtc.TrackRemote) {
|
||||
audioMutex.Lock()
|
||||
defer audioMutex.Unlock()
|
||||
|
||||
// Start input track handler only once per WebRTC session
|
||||
if inputTrackHandling.CompareAndSwap(false, true) {
|
||||
go handleInputTrackForSession(track)
|
||||
}
|
||||
}
|
||||
|
||||
// SetAudioOutputEnabled enables or disables audio output
|
||||
func SetAudioOutputEnabled(enabled bool) error {
|
||||
if audioOutputEnabled.Swap(enabled) == enabled {
|
||||
return nil // Already in desired state
|
||||
}
|
||||
|
||||
if enabled {
|
||||
if activeConnections.Load() > 0 {
|
||||
return startAudioSubprocesses()
|
||||
}
|
||||
} else {
|
||||
audioMutex.Lock()
|
||||
stopOutputSubprocessLocked()
|
||||
audioMutex.Unlock()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetAudioInputEnabled enables or disables audio input
|
||||
func SetAudioInputEnabled(enabled bool) error {
|
||||
if audioInputEnabled.Swap(enabled) == enabled {
|
||||
return nil // Already in desired state
|
||||
}
|
||||
|
||||
if enabled {
|
||||
if activeConnections.Load() > 0 {
|
||||
return startAudioSubprocesses()
|
||||
}
|
||||
} else {
|
||||
audioMutex.Lock()
|
||||
stopInputSubprocessLocked()
|
||||
audioMutex.Unlock()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleInputTrackForSession runs for the entire WebRTC session lifetime
|
||||
// It continuously reads from the track and sends to whatever relay is currently active
|
||||
func handleInputTrackForSession(track *webrtc.TrackRemote) {
|
||||
defer inputTrackHandling.Store(false)
|
||||
|
||||
audioLogger.Debug().
|
||||
Str("codec", track.Codec().MimeType).
|
||||
Str("track_id", track.ID()).
|
||||
Msg("starting session-lifetime track handler")
|
||||
|
||||
for {
|
||||
// Read RTP packet (must always read to keep track alive)
|
||||
rtpPacket, _, err := track.ReadRTP()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
audioLogger.Debug().Msg("audio track ended")
|
||||
return
|
||||
}
|
||||
audioLogger.Warn().Err(err).Msg("failed to read RTP packet")
|
||||
continue
|
||||
}
|
||||
|
||||
// Extract Opus payload
|
||||
opusData := rtpPacket.Payload
|
||||
if len(opusData) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Only send if input is enabled
|
||||
if !audioInputEnabled.Load() {
|
||||
continue // Drop frame but keep reading
|
||||
}
|
||||
|
||||
// Get client in single mutex operation (hot path optimization)
|
||||
audioMutex.Lock()
|
||||
client := inputClient
|
||||
audioMutex.Unlock()
|
||||
|
||||
if client == nil {
|
||||
continue // No relay, drop frame but keep reading
|
||||
}
|
||||
|
||||
if !client.IsConnected() {
|
||||
if err := client.Connect(); err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if err := client.WriteMessage(0, opusData); err != nil {
|
||||
client.Disconnect()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
package kvm
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/coder/websocket"
|
||||
"github.com/jetkvm/kvm/internal/audio"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
var audioControlService *audio.AudioControlService
|
||||
|
||||
func ensureAudioControlService() *audio.AudioControlService {
|
||||
if audioControlService == nil {
|
||||
sessionProvider := &KVMSessionProvider{}
|
||||
audioControlService = audio.NewAudioControlService(sessionProvider, logger)
|
||||
|
||||
// Set up RPC callback function for the audio package
|
||||
audio.SetRPCCallbacks(
|
||||
func() *audio.AudioControlService { return audioControlService },
|
||||
)
|
||||
}
|
||||
return audioControlService
|
||||
}
|
||||
|
||||
// handleSubscribeAudioEvents handles WebSocket audio event subscription
|
||||
func handleSubscribeAudioEvents(connectionID string, wsCon *websocket.Conn, runCtx context.Context, l *zerolog.Logger) {
|
||||
ensureAudioControlService()
|
||||
audioControlService.SubscribeToAudioEvents(connectionID, wsCon, runCtx, l)
|
||||
}
|
||||
|
||||
// handleUnsubscribeAudioEvents handles WebSocket audio event unsubscription
|
||||
func handleUnsubscribeAudioEvents(connectionID string, l *zerolog.Logger) {
|
||||
ensureAudioControlService()
|
||||
audioControlService.UnsubscribeFromAudioEvents(connectionID, l)
|
||||
}
|
||||
11
cloud.go
11
cloud.go
|
|
@ -20,7 +20,6 @@ import (
|
|||
|
||||
"github.com/coder/websocket"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/jetkvm/kvm/internal/audio"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
|
|
@ -482,16 +481,6 @@ func handleSessionRequest(
|
|||
cancelKeyboardMacro()
|
||||
|
||||
currentSession = session
|
||||
|
||||
// Set up audio relay callback to get current session's audio track
|
||||
// This is needed for audio output to work after enable/disable cycles
|
||||
audio.SetCurrentSessionCallback(func() audio.AudioTrackWriter {
|
||||
if currentSession != nil {
|
||||
return currentSession.AudioTrack
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
_ = wsjson.Write(context.Background(), c, gin.H{"type": "answer", "data": sd})
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -104,6 +104,7 @@ type Config struct {
|
|||
UsbDevices *usbgadget.Devices `json:"usb_devices"`
|
||||
NetworkConfig *network.NetworkConfig `json:"network_config"`
|
||||
DefaultLogLevel string `json:"default_log_level"`
|
||||
AudioOutputSource string `json:"audio_output_source"` // "hdmi" or "usb"
|
||||
}
|
||||
|
||||
func (c *Config) GetDisplayRotation() uint16 {
|
||||
|
|
@ -159,10 +160,10 @@ var defaultConfig = &Config{
|
|||
RelativeMouse: true,
|
||||
Keyboard: true,
|
||||
MassStorage: true,
|
||||
Audio: true,
|
||||
},
|
||||
NetworkConfig: &network.NetworkConfig{},
|
||||
DefaultLogLevel: "INFO",
|
||||
NetworkConfig: &network.NetworkConfig{},
|
||||
DefaultLogLevel: "INFO",
|
||||
AudioOutputSource: "hdmi",
|
||||
}
|
||||
|
||||
var (
|
||||
|
|
|
|||
39
go.mod
39
go.mod
|
|
@ -5,13 +5,14 @@ go 1.24.4
|
|||
require (
|
||||
github.com/Masterminds/semver/v3 v3.4.0
|
||||
github.com/beevik/ntp v1.4.3
|
||||
github.com/coder/websocket v1.8.13
|
||||
github.com/coder/websocket v1.8.14
|
||||
github.com/coreos/go-oidc/v3 v3.15.0
|
||||
github.com/creack/pty v1.1.24
|
||||
github.com/erikdubbelboer/gspt v0.0.0-20210805194459-ce36a5128377
|
||||
github.com/fsnotify/fsnotify v1.9.0
|
||||
github.com/gin-contrib/logger v1.2.6
|
||||
github.com/gin-gonic/gin v1.10.1
|
||||
github.com/go-co-op/gocron/v2 v2.16.5
|
||||
github.com/go-co-op/gocron/v2 v2.16.6
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/guregu/null/v6 v6.0.0
|
||||
github.com/gwatts/rootcerts v0.0.0-20250901182336-dc5ae18bd79f
|
||||
|
|
@ -19,8 +20,8 @@ require (
|
|||
github.com/pion/mdns/v2 v2.0.7
|
||||
github.com/pion/webrtc/v4 v4.1.4
|
||||
github.com/pojntfx/go-nbd v0.3.2
|
||||
github.com/prometheus/client_golang v1.23.0
|
||||
github.com/prometheus/common v0.66.0
|
||||
github.com/prometheus/client_golang v1.23.2
|
||||
github.com/prometheus/common v0.66.1
|
||||
github.com/prometheus/procfs v0.17.0
|
||||
github.com/psanford/httpreadat v0.1.0
|
||||
github.com/rs/xid v1.6.0
|
||||
|
|
@ -30,37 +31,31 @@ require (
|
|||
github.com/vearutop/statigz v1.5.0
|
||||
github.com/vishvananda/netlink v1.3.1
|
||||
go.bug.st/serial v1.6.4
|
||||
golang.org/x/crypto v0.41.0
|
||||
golang.org/x/net v0.43.0
|
||||
golang.org/x/sys v0.35.0
|
||||
golang.org/x/crypto v0.42.0
|
||||
golang.org/x/net v0.44.0
|
||||
golang.org/x/sys v0.36.0
|
||||
)
|
||||
|
||||
replace github.com/pojntfx/go-nbd v0.3.2 => github.com/chemhack/go-nbd v0.0.0-20241006125820-59e45f5b1e7b
|
||||
|
||||
require (
|
||||
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bytedance/sonic v1.13.3 // indirect
|
||||
github.com/bytedance/sonic/loader v0.2.4 // indirect
|
||||
github.com/bytedance/sonic v1.14.0 // indirect
|
||||
github.com/bytedance/sonic/loader v0.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/cloudwego/base64x v0.1.5 // indirect
|
||||
github.com/cloudwego/base64x v0.1.6 // indirect
|
||||
github.com/creack/goselect v0.1.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/erikdubbelboer/gspt v0.0.0-20210805194459-ce36a5128377 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.9 // indirect
|
||||
github.com/gin-contrib/sse v1.1.0 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.1.0 // indirect
|
||||
github.com/go-ole/go-ole v1.2.4 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.26.0 // indirect
|
||||
github.com/go-playground/validator/v10 v10.27.0 // indirect
|
||||
github.com/goccy/go-json v0.10.5 // indirect
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect
|
||||
github.com/jonboulle/clockwork v0.5.0 // indirect
|
||||
github.com/jpillora/overseer v1.1.6 // indirect
|
||||
github.com/jpillora/s3 v1.1.4 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
|
|
@ -90,10 +85,10 @@ require (
|
|||
github.com/ugorji/go/codec v1.3.0 // indirect
|
||||
github.com/vishvananda/netns v0.0.5 // indirect
|
||||
github.com/wlynxg/anet v0.0.5 // indirect
|
||||
golang.org/x/arch v0.18.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
golang.org/x/arch v0.20.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/text v0.28.0 // indirect
|
||||
google.golang.org/protobuf v1.36.8 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
golang.org/x/text v0.29.0 // indirect
|
||||
google.golang.org/protobuf v1.36.9 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
|
|
|||
86
go.sum
86
go.sum
|
|
@ -1,7 +1,5 @@
|
|||
github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
|
||||
github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
|
||||
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk=
|
||||
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
|
||||
github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA=
|
||||
github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
|
||||
github.com/beevik/ntp v1.4.3 h1:PlbTvE5NNy4QHmA4Mg57n7mcFTmr1W1j3gcK7L1lqho=
|
||||
|
|
@ -10,20 +8,18 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
|||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bool64/dev v0.2.39 h1:kP8DnMGlWXhGYJEZE/J0l/gVBdbuhoPGL+MJG4QbofE=
|
||||
github.com/bool64/dev v0.2.39/go.mod h1:iJbh1y/HkunEPhgebWRNcs8wfGq7sjvJ6W5iabL8ACg=
|
||||
github.com/bytedance/sonic v1.13.3 h1:MS8gmaH16Gtirygw7jV91pDCN33NyMrPbN7qiYhEsF0=
|
||||
github.com/bytedance/sonic v1.13.3/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1+KgkJhz4=
|
||||
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
|
||||
github.com/bytedance/sonic/loader v0.2.4 h1:ZWCw4stuXUsn1/+zQDqeE7JKP+QO47tz7QCNan80NzY=
|
||||
github.com/bytedance/sonic/loader v0.2.4/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI=
|
||||
github.com/bytedance/sonic v1.14.0 h1:/OfKt8HFw0kh2rj8N0F6C/qPGRESq0BbaNZgcNXXzQQ=
|
||||
github.com/bytedance/sonic v1.14.0/go.mod h1:WoEbx8WTcFJfzCe0hbmyTGrfjt8PzNEBdxlNUO24NhA=
|
||||
github.com/bytedance/sonic/loader v0.3.0 h1:dskwH8edlzNMctoruo8FPTJDF3vLtDT0sXZwvZJyqeA=
|
||||
github.com/bytedance/sonic/loader v0.3.0/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chemhack/go-nbd v0.0.0-20241006125820-59e45f5b1e7b h1:dSbDgy72Y1sjLPWLv7vs0fMFuhMBMViiT9PJZiZWZNs=
|
||||
github.com/chemhack/go-nbd v0.0.0-20241006125820-59e45f5b1e7b/go.mod h1:SehHnbi2e8NiSAKby42Itm8SIoS7b+wAprsfPH3qgYk=
|
||||
github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4=
|
||||
github.com/cloudwego/base64x v0.1.5/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
|
||||
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
|
||||
github.com/coder/websocket v1.8.13 h1:f3QZdXy7uGVz+4uCJy2nTZyM0yTBj8yANEHhqlXZ9FE=
|
||||
github.com/coder/websocket v1.8.13/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs=
|
||||
github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M=
|
||||
github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU=
|
||||
github.com/coder/websocket v1.8.14 h1:9L0p0iKiNOibykf283eHkKUHHrpG7f65OE3BhhO7v9g=
|
||||
github.com/coder/websocket v1.8.14/go.mod h1:NX3SzP+inril6yawo5CQXx8+fk145lPDC6pumgx0mVg=
|
||||
github.com/coreos/go-oidc/v3 v3.15.0 h1:R6Oz8Z4bqWR7VFQ+sPSvZPQv4x8M+sJkDO5ojgwlyAg=
|
||||
github.com/coreos/go-oidc/v3 v3.15.0/go.mod h1:HaZ3szPaZ0e4r6ebqvsLWlk2Tn+aejfmrfah6hnSYEU=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
|
|
@ -46,20 +42,18 @@ github.com/gin-contrib/sse v1.1.0 h1:n0w2GMuUpWDVp7qSpvze6fAu9iRxJY4Hmj6AmBOU05w
|
|||
github.com/gin-contrib/sse v1.1.0/go.mod h1:hxRZ5gVpWMT7Z0B0gSNYqqsSCNIJMjzvm6fqCz9vjwM=
|
||||
github.com/gin-gonic/gin v1.10.1 h1:T0ujvqyCSqRopADpgPgiTT63DUQVSfojyME59Ei63pQ=
|
||||
github.com/gin-gonic/gin v1.10.1/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y=
|
||||
github.com/go-co-op/gocron/v2 v2.16.5 h1:j228Jxk7bb9CF8LKR3gS+bK3rcjRUINjlVI+ZMp26Ss=
|
||||
github.com/go-co-op/gocron/v2 v2.16.5/go.mod h1:zAfC/GFQ668qHxOVl/D68Jh5Ce7sDqX6TJnSQyRkRBc=
|
||||
github.com/go-co-op/gocron/v2 v2.16.6 h1:zI2Ya9sqvuLcgqJgV79LwoJXM8h20Z/drtB7ATbpRWo=
|
||||
github.com/go-co-op/gocron/v2 v2.16.6/go.mod h1:zAfC/GFQ668qHxOVl/D68Jh5Ce7sDqX6TJnSQyRkRBc=
|
||||
github.com/go-jose/go-jose/v4 v4.1.0 h1:cYSYxd3pw5zd2FSXk2vGdn9igQU2PS8MuxrCOCl0FdY=
|
||||
github.com/go-jose/go-jose/v4 v4.1.0/go.mod h1:GG/vqmYm3Von2nYiB2vGTXzdoNKE5tix5tuc6iAd+sw=
|
||||
github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI=
|
||||
github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM=
|
||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
||||
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
||||
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
|
||||
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
|
||||
github.com/go-playground/validator/v10 v10.26.0 h1:SP05Nqhjcvz81uJaRfEV0YBSSSGMc/iMaVtFbr3Sw2k=
|
||||
github.com/go-playground/validator/v10 v10.26.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo=
|
||||
github.com/go-playground/validator/v10 v10.27.0 h1:w8+XrWVMhGkxOaaowyKH35gFydVHOvC0/uWoy2Fzwn4=
|
||||
github.com/go-playground/validator/v10 v10.27.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo=
|
||||
github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
|
||||
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
|
|
@ -68,26 +62,18 @@ github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX
|
|||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248=
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
|
||||
github.com/guregu/null/v6 v6.0.0 h1:N14VRS+4di81i1PXRiprbQJ9EM9gqBa0+KVMeS/QSjQ=
|
||||
github.com/guregu/null/v6 v6.0.0/go.mod h1:hrMIhIfrOZeLPZhROSn149tpw2gHkidAqxoXNyeX3iQ=
|
||||
github.com/gwatts/rootcerts v0.0.0-20250901182336-dc5ae18bd79f h1:08t2PbrkDgW2+mwCQ3jhKUBrCM9Bc9SeH5j2Dst3B+0=
|
||||
github.com/gwatts/rootcerts v0.0.0-20250901182336-dc5ae18bd79f/go.mod h1:5Kt9XkWvkGi2OHOq0QsGxebHmhCcqJ8KCbNg/a6+n+g=
|
||||
github.com/jonboulle/clockwork v0.5.0 h1:Hyh9A8u51kptdkR+cqRpT1EebBwTn1oK9YfGYbdFz6I=
|
||||
github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60=
|
||||
github.com/jpillora/overseer v1.1.6 h1:3ygYfNcR3FfOr22miu3vR1iQcXKMHbmULBh98rbkIyo=
|
||||
github.com/jpillora/overseer v1.1.6/go.mod h1:aPXQtxuVb9PVWRWTXpo+LdnC/YXQ0IBLNXqKMJmgk88=
|
||||
github.com/jpillora/s3 v1.1.4 h1:YCCKDWzb/Ye9EBNd83ATRF/8wPEy0xd43Rezb6u6fzc=
|
||||
github.com/jpillora/s3 v1.1.4/go.mod h1:yedE603V+crlFi1Kl/5vZJaBu9pUzE9wvKegU/lF2zs=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
|
||||
github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
|
|
@ -152,12 +138,12 @@ github.com/pion/webrtc/v4 v4.1.4/go.mod h1:Oab9npu1iZtQRMic3K3toYq5zFPvToe/QBw7d
|
|||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc=
|
||||
github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE=
|
||||
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
|
||||
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.66.0 h1:K/rJPHrG3+AoQs50r2+0t7zMnMzek2Vbv31OFVsMeVY=
|
||||
github.com/prometheus/common v0.66.0/go.mod h1:Ux6NtV1B4LatamKE63tJBntoxD++xmtI/lK0VtEplN4=
|
||||
github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
|
||||
github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
|
||||
github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0=
|
||||
github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw=
|
||||
github.com/psanford/httpreadat v0.1.0 h1:VleW1HS2zO7/4c7c7zNl33fO6oYACSagjJIyMIwZLUE=
|
||||
|
|
@ -170,15 +156,12 @@ github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU=
|
|||
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
|
||||
github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY=
|
||||
github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ=
|
||||
github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
|
||||
github.com/smartystreets/gunit v1.1.3/go.mod h1:EH5qMBab2UclzXUcpR8b93eHsIlp9u+pDQIRp5DZNzQ=
|
||||
github.com/sourcegraph/tf-dag v0.2.2-0.20250131204052-3e8ff1477b4f h1:VgoRCP1efSCEZIcF2THLQ46+pIBzzgNiaUBe9wEDwYU=
|
||||
github.com/sourcegraph/tf-dag v0.2.2-0.20250131204052-3e8ff1477b4f/go.mod h1:pzro7BGorij2WgrjEammtrkbo3+xldxo+KaGLGUiD+Q=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
|
|
@ -200,12 +183,14 @@ go.bug.st/serial v1.6.4 h1:7FmqNPgVp3pu2Jz5PoPtbZ9jJO5gnEnZIvnI1lzve8A=
|
|||
go.bug.st/serial v1.6.4/go.mod h1:nofMJxTeNVny/m6+KaafC6vJGj3miwQZ6vW4BZUGJPI=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
golang.org/x/arch v0.18.0 h1:WN9poc33zL4AzGxqf8VtpKUnGvMi8O9lhNyBMF/85qc=
|
||||
golang.org/x/arch v0.18.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk=
|
||||
golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
|
||||
golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
|
||||
golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
|
||||
golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
|
||||
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
|
||||
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
|
||||
golang.org/x/arch v0.20.0 h1:dx1zTU0MAE98U+TQ8BLl7XsJbgze2WnNKF/8tGp/Q6c=
|
||||
golang.org/x/arch v0.20.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk=
|
||||
golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI=
|
||||
golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8=
|
||||
golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I=
|
||||
golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
|
@ -213,20 +198,17 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
|
||||
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4=
|
||||
golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw=
|
||||
golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
|
||||
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
|
||||
google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc=
|
||||
google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ=
|
||||
golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA=
|
||||
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
|
||||
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
|
||||
google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw=
|
||||
google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=
|
||||
|
|
|
|||
|
|
@ -1,38 +0,0 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// AudioState holds all audio-related state with a single mutex
|
||||
type AudioState struct {
|
||||
mu sync.RWMutex
|
||||
audioMuted bool
|
||||
microphoneMuted bool
|
||||
}
|
||||
|
||||
var globalAudioState = &AudioState{}
|
||||
|
||||
func SetAudioMuted(muted bool) {
|
||||
globalAudioState.mu.Lock()
|
||||
defer globalAudioState.mu.Unlock()
|
||||
globalAudioState.audioMuted = muted
|
||||
}
|
||||
|
||||
func IsAudioMuted() bool {
|
||||
globalAudioState.mu.RLock()
|
||||
defer globalAudioState.mu.RUnlock()
|
||||
return globalAudioState.audioMuted
|
||||
}
|
||||
|
||||
func SetMicrophoneMuted(muted bool) {
|
||||
globalAudioState.mu.Lock()
|
||||
defer globalAudioState.mu.Unlock()
|
||||
globalAudioState.microphoneMuted = muted
|
||||
}
|
||||
|
||||
func IsMicrophoneMuted() bool {
|
||||
globalAudioState.mu.RLock()
|
||||
defer globalAudioState.mu.RUnlock()
|
||||
return globalAudioState.microphoneMuted
|
||||
}
|
||||
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* Bidirectional audio processing optimized for ARM NEON SIMD:
|
||||
* - OUTPUT PATH: TC358743 HDMI audio → Client speakers
|
||||
* Pipeline: ALSA hw:0,0 capture → 2.5x gain → Opus encode (96kbps, FEC enabled)
|
||||
* Pipeline: ALSA hw:0,0 capture → Opus encode (128kbps, FEC enabled)
|
||||
*
|
||||
* - INPUT PATH: Client microphone → Device speakers
|
||||
* Pipeline: Opus decode (with FEC) → ALSA hw:1,0 playback
|
||||
|
|
@ -11,7 +11,6 @@
|
|||
* Key features:
|
||||
* - ARM NEON SIMD optimization for all audio operations
|
||||
* - Opus in-band FEC for packet loss resilience
|
||||
* - Ultra-low CPU usage (~0.5% on RV1106)
|
||||
* - S16_LE @ 48kHz stereo, 20ms frames (960 samples)
|
||||
*/
|
||||
|
||||
|
|
@ -22,63 +21,56 @@
|
|||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
#include <sched.h>
|
||||
#include <time.h>
|
||||
#include <signal.h>
|
||||
|
||||
// ARM NEON SIMD support (always available on JetKVM's ARM Cortex-A7)
|
||||
#include <arm_neon.h>
|
||||
|
||||
// RV1106 (Cortex-A7) has 64-byte cache lines
|
||||
#define CACHE_LINE_SIZE 64
|
||||
#define SIMD_ALIGN __attribute__((aligned(16)))
|
||||
#define CACHE_ALIGN __attribute__((aligned(CACHE_LINE_SIZE)))
|
||||
#define SIMD_PREFETCH(addr, rw, locality) __builtin_prefetch(addr, rw, locality)
|
||||
|
||||
static int trace_logging_enabled = 0;
|
||||
static int simd_initialized = 0;
|
||||
|
||||
static void simd_init_once(void) {
|
||||
if (simd_initialized) return;
|
||||
simd_initialized = 1;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// GLOBAL STATE VARIABLES
|
||||
// ============================================================================
|
||||
// Compile-time trace logging - disabled for production (zero overhead)
|
||||
#define TRACE_LOG(...) ((void)0)
|
||||
|
||||
// ALSA device handles
|
||||
static snd_pcm_t *pcm_capture_handle = NULL; // OUTPUT: TC358743 HDMI audio → client
|
||||
static snd_pcm_t *pcm_playback_handle = NULL; // INPUT: Client microphone → device speakers
|
||||
|
||||
// ALSA device names
|
||||
static const char *alsa_capture_device = NULL;
|
||||
static const char *alsa_playback_device = NULL;
|
||||
|
||||
// Opus codec instances
|
||||
static OpusEncoder *encoder = NULL;
|
||||
static OpusDecoder *decoder = NULL;
|
||||
|
||||
// Audio format (S16_LE @ 48kHz stereo)
|
||||
static int sample_rate = 48000;
|
||||
static int channels = 2;
|
||||
static int frame_size = 960; // 20ms frames at 48kHz
|
||||
static uint32_t sample_rate = 48000;
|
||||
static uint8_t channels = 2;
|
||||
static uint16_t frame_size = 960; // 20ms frames at 48kHz
|
||||
|
||||
// Opus encoder settings (optimized for minimal CPU ~0.5% on RV1106)
|
||||
static int opus_bitrate = 96000; // 96 kbps - good quality/bandwidth balance
|
||||
static int opus_complexity = 1; // Complexity 1 - minimal CPU usage
|
||||
static int opus_vbr = 1; // Variable bitrate enabled
|
||||
static int opus_vbr_constraint = 1; // Constrained VBR - predictable bandwidth
|
||||
static int opus_signal_type = -1000; // OPUS_AUTO - automatic signal type detection
|
||||
static int opus_bandwidth = 1103; // OPUS_BANDWIDTH_WIDEBAND (50-8000 Hz)
|
||||
static int opus_dtx = 0; // DTX disabled - no discontinuous transmission
|
||||
static int opus_lsb_depth = 16; // 16-bit depth - matches S16_LE format
|
||||
static uint32_t opus_bitrate = 128000;
|
||||
static uint8_t opus_complexity = 2;
|
||||
static uint16_t max_packet_size = 1500;
|
||||
|
||||
// Network configuration
|
||||
static int max_packet_size = 1500;
|
||||
// Opus encoder constants (hardcoded for production)
|
||||
#define OPUS_VBR 1 // VBR enabled
|
||||
#define OPUS_VBR_CONSTRAINT 0 // Unconstrained VBR (better for low-volume signals)
|
||||
#define OPUS_SIGNAL_TYPE 3002 // OPUS_SIGNAL_MUSIC (better transient handling)
|
||||
#define OPUS_BANDWIDTH 1105 // OPUS_BANDWIDTH_FULLBAND (20kHz, enabled by 128kbps bitrate)
|
||||
#define OPUS_DTX 0 // DTX disabled (prevents audio drops)
|
||||
#define OPUS_LSB_DEPTH 16 // 16-bit depth
|
||||
|
||||
// ALSA retry configuration
|
||||
static int sleep_microseconds = 1000;
|
||||
static int max_attempts_global = 5;
|
||||
static int max_backoff_us_global = 500000;
|
||||
|
||||
// ALSA buffer configuration (not currently used - kept for future optimization)
|
||||
static const int optimized_buffer_size = 1;
|
||||
|
||||
|
||||
// ============================================================================
|
||||
// FUNCTION DECLARATIONS
|
||||
// ============================================================================
|
||||
static uint32_t sleep_microseconds = 1000;
|
||||
static uint32_t sleep_milliseconds = 1; // Precomputed: sleep_microseconds / 1000
|
||||
static uint8_t max_attempts_global = 5;
|
||||
static uint32_t max_backoff_us_global = 500000;
|
||||
|
||||
int jetkvm_audio_capture_init();
|
||||
void jetkvm_audio_capture_close();
|
||||
|
|
@ -88,156 +80,141 @@ int jetkvm_audio_playback_init();
|
|||
void jetkvm_audio_playback_close();
|
||||
int jetkvm_audio_decode_write(void *opus_buf, int opus_size);
|
||||
|
||||
void update_audio_constants(int bitrate, int complexity, int vbr, int vbr_constraint,
|
||||
int signal_type, int bandwidth, int dtx, int lsb_depth, int sr, int ch,
|
||||
int fs, int max_pkt, int sleep_us, int max_attempts, int max_backoff);
|
||||
void set_trace_logging(int enabled);
|
||||
int update_opus_encoder_params(int bitrate, int complexity, int vbr, int vbr_constraint,
|
||||
int signal_type, int bandwidth, int dtx);
|
||||
void update_audio_constants(uint32_t bitrate, uint8_t complexity,
|
||||
uint32_t sr, uint8_t ch, uint16_t fs, uint16_t max_pkt,
|
||||
uint32_t sleep_us, uint8_t max_attempts, uint32_t max_backoff);
|
||||
void update_audio_decoder_constants(uint32_t sr, uint8_t ch, uint16_t fs, uint16_t max_pkt,
|
||||
uint32_t sleep_us, uint8_t max_attempts, uint32_t max_backoff);
|
||||
int update_opus_encoder_params(uint32_t bitrate, uint8_t complexity);
|
||||
|
||||
// ============================================================================
|
||||
// CONFIGURATION FUNCTIONS
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Sync configuration from Go to C
|
||||
* Sync encoder configuration from Go to C
|
||||
*/
|
||||
void update_audio_constants(int bitrate, int complexity, int vbr, int vbr_constraint,
|
||||
int signal_type, int bandwidth, int dtx, int lsb_depth, int sr, int ch,
|
||||
int fs, int max_pkt, int sleep_us, int max_attempts, int max_backoff) {
|
||||
void update_audio_constants(uint32_t bitrate, uint8_t complexity,
|
||||
uint32_t sr, uint8_t ch, uint16_t fs, uint16_t max_pkt,
|
||||
uint32_t sleep_us, uint8_t max_attempts, uint32_t max_backoff) {
|
||||
opus_bitrate = bitrate;
|
||||
opus_complexity = complexity;
|
||||
opus_vbr = vbr;
|
||||
opus_vbr_constraint = vbr_constraint;
|
||||
opus_signal_type = signal_type;
|
||||
opus_bandwidth = bandwidth;
|
||||
opus_dtx = dtx;
|
||||
opus_lsb_depth = lsb_depth;
|
||||
sample_rate = sr;
|
||||
channels = ch;
|
||||
frame_size = fs;
|
||||
max_packet_size = max_pkt;
|
||||
sleep_microseconds = sleep_us;
|
||||
sleep_milliseconds = sleep_us / 1000; // Precompute for snd_pcm_wait
|
||||
max_attempts_global = max_attempts;
|
||||
max_backoff_us_global = max_backoff;
|
||||
}
|
||||
|
||||
/**
|
||||
* Enable/disable trace logging (zero overhead when disabled)
|
||||
* Sync decoder configuration from Go to C (no encoder-only params)
|
||||
*/
|
||||
void set_trace_logging(int enabled) {
|
||||
trace_logging_enabled = enabled;
|
||||
void update_audio_decoder_constants(uint32_t sr, uint8_t ch, uint16_t fs, uint16_t max_pkt,
|
||||
uint32_t sleep_us, uint8_t max_attempts, uint32_t max_backoff) {
|
||||
sample_rate = sr;
|
||||
channels = ch;
|
||||
frame_size = fs;
|
||||
max_packet_size = max_pkt;
|
||||
sleep_microseconds = sleep_us;
|
||||
sleep_milliseconds = sleep_us / 1000; // Precompute for snd_pcm_wait
|
||||
max_attempts_global = max_attempts;
|
||||
max_backoff_us_global = max_backoff;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize ALSA device names from environment variables
|
||||
* Must be called before jetkvm_audio_capture_init or jetkvm_audio_playback_init
|
||||
*/
|
||||
static void init_alsa_devices_from_env(void) {
|
||||
if (alsa_capture_device == NULL) {
|
||||
alsa_capture_device = getenv("ALSA_CAPTURE_DEVICE");
|
||||
if (alsa_capture_device == NULL || alsa_capture_device[0] == '\0') {
|
||||
alsa_capture_device = "hw:0,0"; // Default to HDMI
|
||||
}
|
||||
}
|
||||
if (alsa_playback_device == NULL) {
|
||||
alsa_playback_device = getenv("ALSA_PLAYBACK_DEVICE");
|
||||
if (alsa_playback_device == NULL || alsa_playback_device[0] == '\0') {
|
||||
alsa_playback_device = "hw:1,0"; // Default to USB gadget
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// SIMD-OPTIMIZED BUFFER OPERATIONS (ARM NEON)
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Clear audio buffer using NEON (8 samples/iteration)
|
||||
* @param buffer Audio buffer to clear
|
||||
* @param samples Number of samples to zero out
|
||||
* Clear audio buffer using NEON (16 samples/iteration with 2x unrolling)
|
||||
*/
|
||||
static inline void simd_clear_samples_s16(short *buffer, int samples) {
|
||||
simd_init_once();
|
||||
|
||||
int simd_samples = samples & ~7;
|
||||
static inline void simd_clear_samples_s16(short * __restrict__ buffer, uint32_t samples) {
|
||||
const int16x8_t zero = vdupq_n_s16(0);
|
||||
uint32_t i = 0;
|
||||
|
||||
// SIMD path: zero 8 samples per iteration
|
||||
for (int i = 0; i < simd_samples; i += 8) {
|
||||
// Process 16 samples at a time (2x unrolled for better pipeline utilization)
|
||||
uint32_t simd_samples = samples & ~15U;
|
||||
for (; i < simd_samples; i += 16) {
|
||||
vst1q_s16(&buffer[i], zero);
|
||||
vst1q_s16(&buffer[i + 8], zero);
|
||||
}
|
||||
|
||||
// Scalar path: handle remaining samples
|
||||
for (int i = simd_samples; i < samples; i++) {
|
||||
// Handle remaining 8 samples
|
||||
if (i + 8 <= samples) {
|
||||
vst1q_s16(&buffer[i], zero);
|
||||
i += 8;
|
||||
}
|
||||
|
||||
// Scalar: remaining samples
|
||||
for (; i < samples; i++) {
|
||||
buffer[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply gain using NEON Q15 fixed-point math (8 samples/iteration)
|
||||
* Uses vqrdmulhq_s16 for single-instruction saturating rounded multiply-high
|
||||
* @param samples Audio buffer to scale in-place
|
||||
* @param count Number of samples to process
|
||||
* @param volume Gain multiplier (e.g., 2.5 for 2.5x gain)
|
||||
*/
|
||||
static inline void simd_scale_volume_s16(short *samples, int count, float volume) {
|
||||
simd_init_once();
|
||||
|
||||
// Convert float gain to Q14 fixed-point for vqrdmulhq_s16
|
||||
// vqrdmulhq_s16 extracts bits [30:15], so multiply by 16384 (2^14) instead of 32768 (2^15)
|
||||
int16_t vol_fixed = (int16_t)(volume * 16384.0f);
|
||||
int16x8_t vol_vec = vdupq_n_s16(vol_fixed);
|
||||
int simd_count = count & ~7;
|
||||
|
||||
// SIMD path: process 8 samples per iteration
|
||||
for (int i = 0; i < simd_count; i += 8) {
|
||||
int16x8_t samples_vec = vld1q_s16(&samples[i]);
|
||||
int16x8_t result = vqrdmulhq_s16(samples_vec, vol_vec);
|
||||
vst1q_s16(&samples[i], result);
|
||||
}
|
||||
|
||||
// Scalar path: handle remaining samples
|
||||
for (int i = simd_count; i < count; i++) {
|
||||
samples[i] = (short)((samples[i] * vol_fixed) >> 14);
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// INITIALIZATION STATE TRACKING
|
||||
// ============================================================================
|
||||
|
||||
static volatile int capture_initializing = 0;
|
||||
static volatile int capture_initialized = 0;
|
||||
static volatile int playback_initializing = 0;
|
||||
static volatile int playback_initialized = 0;
|
||||
static volatile sig_atomic_t capture_initializing = 0;
|
||||
static volatile sig_atomic_t capture_initialized = 0;
|
||||
static volatile sig_atomic_t playback_initializing = 0;
|
||||
static volatile sig_atomic_t playback_initialized = 0;
|
||||
|
||||
/**
|
||||
* Update Opus encoder settings at runtime (does NOT modify FEC settings)
|
||||
* Note: FEC configuration remains unchanged - set at initialization
|
||||
* Update Opus encoder settings at runtime (does NOT modify FEC or hardcoded settings)
|
||||
* @return 0 on success, -1 if not initialized, >0 if some settings failed
|
||||
*/
|
||||
int update_opus_encoder_params(int bitrate, int complexity, int vbr, int vbr_constraint,
|
||||
int signal_type, int bandwidth, int dtx) {
|
||||
int update_opus_encoder_params(uint32_t bitrate, uint8_t complexity) {
|
||||
if (!encoder || !capture_initialized) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Update global configuration variables
|
||||
// Update runtime-configurable parameters
|
||||
opus_bitrate = bitrate;
|
||||
opus_complexity = complexity;
|
||||
opus_vbr = vbr;
|
||||
opus_vbr_constraint = vbr_constraint;
|
||||
opus_signal_type = signal_type;
|
||||
opus_bandwidth = bandwidth;
|
||||
opus_dtx = dtx;
|
||||
|
||||
// Apply settings to encoder (FEC settings not modified)
|
||||
// Apply settings to encoder
|
||||
int result = 0;
|
||||
result |= opus_encoder_ctl(encoder, OPUS_SET_BITRATE(opus_bitrate));
|
||||
result |= opus_encoder_ctl(encoder, OPUS_SET_COMPLEXITY(opus_complexity));
|
||||
result |= opus_encoder_ctl(encoder, OPUS_SET_VBR(opus_vbr));
|
||||
result |= opus_encoder_ctl(encoder, OPUS_SET_VBR_CONSTRAINT(opus_vbr_constraint));
|
||||
result |= opus_encoder_ctl(encoder, OPUS_SET_SIGNAL(opus_signal_type));
|
||||
result |= opus_encoder_ctl(encoder, OPUS_SET_BANDWIDTH(opus_bandwidth));
|
||||
result |= opus_encoder_ctl(encoder, OPUS_SET_DTX(opus_dtx));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// ALSA UTILITY FUNCTIONS
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Open ALSA device with exponential backoff retry
|
||||
* @return 0 on success, negative error code on failure
|
||||
*/
|
||||
// Helper: High-precision sleep using nanosleep (better than usleep)
|
||||
static inline void precise_sleep_us(uint32_t microseconds) {
|
||||
struct timespec ts = {
|
||||
.tv_sec = microseconds / 1000000,
|
||||
.tv_nsec = (microseconds % 1000000) * 1000
|
||||
};
|
||||
nanosleep(&ts, NULL);
|
||||
}
|
||||
|
||||
static int safe_alsa_open(snd_pcm_t **handle, const char *device, snd_pcm_stream_t stream) {
|
||||
int attempt = 0;
|
||||
uint8_t attempt = 0;
|
||||
int err;
|
||||
int backoff_us = sleep_microseconds;
|
||||
uint32_t backoff_us = sleep_microseconds;
|
||||
|
||||
while (attempt < max_attempts_global) {
|
||||
err = snd_pcm_open(handle, device, stream, SND_PCM_NONBLOCK);
|
||||
|
|
@ -248,17 +225,18 @@ static int safe_alsa_open(snd_pcm_t **handle, const char *device, snd_pcm_stream
|
|||
|
||||
attempt++;
|
||||
|
||||
// Exponential backoff with bit shift (faster than multiplication)
|
||||
if (err == -EBUSY || err == -EAGAIN) {
|
||||
usleep(backoff_us);
|
||||
backoff_us = (backoff_us * 2 < max_backoff_us_global) ? backoff_us * 2 : max_backoff_us_global;
|
||||
precise_sleep_us(backoff_us);
|
||||
backoff_us = (backoff_us << 1 < max_backoff_us_global) ? (backoff_us << 1) : max_backoff_us_global;
|
||||
} else if (err == -ENODEV || err == -ENOENT) {
|
||||
usleep(backoff_us * 2);
|
||||
backoff_us = (backoff_us * 2 < max_backoff_us_global) ? backoff_us * 2 : max_backoff_us_global;
|
||||
precise_sleep_us(backoff_us << 1);
|
||||
backoff_us = (backoff_us << 1 < max_backoff_us_global) ? (backoff_us << 1) : max_backoff_us_global;
|
||||
} else if (err == -EPERM || err == -EACCES) {
|
||||
usleep(backoff_us / 2);
|
||||
precise_sleep_us(backoff_us >> 1);
|
||||
} else {
|
||||
usleep(backoff_us);
|
||||
backoff_us = (backoff_us * 2 < max_backoff_us_global) ? backoff_us * 2 : max_backoff_us_global;
|
||||
precise_sleep_us(backoff_us);
|
||||
backoff_us = (backoff_us << 1 < max_backoff_us_global) ? (backoff_us << 1) : max_backoff_us_global;
|
||||
}
|
||||
}
|
||||
return err;
|
||||
|
|
@ -299,13 +277,13 @@ static int configure_alsa_device(snd_pcm_t *handle, const char *device_name) {
|
|||
if (err < 0) return err;
|
||||
}
|
||||
|
||||
snd_pcm_uframes_t period_size = optimized_buffer_size ? frame_size : frame_size / 2;
|
||||
snd_pcm_uframes_t period_size = frame_size; // Optimized: use full frame as period
|
||||
if (period_size < 64) period_size = 64;
|
||||
|
||||
err = snd_pcm_hw_params_set_period_size_near(handle, params, &period_size, 0);
|
||||
if (err < 0) return err;
|
||||
|
||||
snd_pcm_uframes_t buffer_size = optimized_buffer_size ? period_size * 2 : period_size * 4;
|
||||
snd_pcm_uframes_t buffer_size = period_size * 2; // Optimized: minimal buffer for low latency
|
||||
err = snd_pcm_hw_params_set_buffer_size_near(handle, params, &buffer_size);
|
||||
if (err < 0) return err;
|
||||
|
||||
|
|
@ -327,9 +305,7 @@ static int configure_alsa_device(snd_pcm_t *handle, const char *device_name) {
|
|||
return snd_pcm_prepare(handle);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// AUDIO OUTPUT PATH FUNCTIONS (TC358743 HDMI Audio → Client Speakers)
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Initialize OUTPUT path (TC358743 HDMI capture → Opus encoder)
|
||||
|
|
@ -339,7 +315,7 @@ static int configure_alsa_device(snd_pcm_t *handle, const char *device_name) {
|
|||
int jetkvm_audio_capture_init() {
|
||||
int err;
|
||||
|
||||
simd_init_once();
|
||||
init_alsa_devices_from_env();
|
||||
|
||||
if (__sync_bool_compare_and_swap(&capture_initializing, 0, 1) == 0) {
|
||||
return -EBUSY;
|
||||
|
|
@ -359,8 +335,11 @@ int jetkvm_audio_capture_init() {
|
|||
pcm_capture_handle = NULL;
|
||||
}
|
||||
|
||||
err = safe_alsa_open(&pcm_capture_handle, "hw:0,0", SND_PCM_STREAM_CAPTURE);
|
||||
err = safe_alsa_open(&pcm_capture_handle, alsa_capture_device, SND_PCM_STREAM_CAPTURE);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Failed to open ALSA capture device %s: %s\n",
|
||||
alsa_capture_device, snd_strerror(err));
|
||||
fflush(stderr);
|
||||
capture_initializing = 0;
|
||||
return -1;
|
||||
}
|
||||
|
|
@ -387,17 +366,15 @@ int jetkvm_audio_capture_init() {
|
|||
// Configure encoder with optimized settings
|
||||
opus_encoder_ctl(encoder, OPUS_SET_BITRATE(opus_bitrate));
|
||||
opus_encoder_ctl(encoder, OPUS_SET_COMPLEXITY(opus_complexity));
|
||||
opus_encoder_ctl(encoder, OPUS_SET_VBR(opus_vbr));
|
||||
opus_encoder_ctl(encoder, OPUS_SET_VBR_CONSTRAINT(opus_vbr_constraint));
|
||||
opus_encoder_ctl(encoder, OPUS_SET_SIGNAL(opus_signal_type));
|
||||
opus_encoder_ctl(encoder, OPUS_SET_BANDWIDTH(opus_bandwidth));
|
||||
opus_encoder_ctl(encoder, OPUS_SET_DTX(opus_dtx));
|
||||
opus_encoder_ctl(encoder, OPUS_SET_LSB_DEPTH(opus_lsb_depth));
|
||||
opus_encoder_ctl(encoder, OPUS_SET_VBR(OPUS_VBR));
|
||||
opus_encoder_ctl(encoder, OPUS_SET_VBR_CONSTRAINT(OPUS_VBR_CONSTRAINT));
|
||||
opus_encoder_ctl(encoder, OPUS_SET_SIGNAL(OPUS_SIGNAL_TYPE));
|
||||
opus_encoder_ctl(encoder, OPUS_SET_BANDWIDTH(OPUS_BANDWIDTH));
|
||||
opus_encoder_ctl(encoder, OPUS_SET_DTX(OPUS_DTX));
|
||||
opus_encoder_ctl(encoder, OPUS_SET_LSB_DEPTH(OPUS_LSB_DEPTH));
|
||||
|
||||
// Enable in-band FEC (Forward Error Correction) for network resilience
|
||||
// Embeds redundant data in packets to recover from packet loss (adds ~2-5% bitrate overhead)
|
||||
opus_encoder_ctl(encoder, OPUS_SET_INBAND_FEC(1));
|
||||
opus_encoder_ctl(encoder, OPUS_SET_PACKET_LOSS_PERC(10)); // Optimize for 10% expected loss
|
||||
opus_encoder_ctl(encoder, OPUS_SET_PACKET_LOSS_PERC(20));
|
||||
|
||||
capture_initialized = 1;
|
||||
capture_initializing = 0;
|
||||
|
|
@ -406,31 +383,25 @@ int jetkvm_audio_capture_init() {
|
|||
|
||||
/**
|
||||
* Read HDMI audio, encode to Opus (OUTPUT path hot function)
|
||||
* Processing pipeline: ALSA capture → 2.5x gain → Opus encode
|
||||
* @param opus_buf Output buffer for encoded Opus packet
|
||||
* @return >0 = Opus packet size in bytes, -1 = error
|
||||
*/
|
||||
__attribute__((hot)) int jetkvm_audio_read_encode(void * __restrict__ opus_buf) {
|
||||
// Static buffers persist across calls for better cache locality
|
||||
static short SIMD_ALIGN pcm_buffer[1920]; // 960 frames × 2 channels
|
||||
|
||||
// Local variables
|
||||
static short CACHE_ALIGN pcm_buffer[960 * 2]; // Cache-aligned
|
||||
unsigned char * __restrict__ out = (unsigned char*)opus_buf;
|
||||
int pcm_rc;
|
||||
int err = 0;
|
||||
int recovery_attempts = 0;
|
||||
const int max_recovery_attempts = 3;
|
||||
int nb_bytes;
|
||||
int32_t pcm_rc, nb_bytes;
|
||||
int32_t err = 0;
|
||||
uint8_t recovery_attempts = 0;
|
||||
const uint8_t max_recovery_attempts = 3;
|
||||
|
||||
// Prefetch output buffer for write
|
||||
SIMD_PREFETCH(out, 1, 3);
|
||||
SIMD_PREFETCH(pcm_buffer, 0, 3);
|
||||
// Prefetch for write (out) and read (pcm_buffer) - RV1106 has small L1 cache
|
||||
SIMD_PREFETCH(out, 1, 0); // Write, immediate use
|
||||
SIMD_PREFETCH(pcm_buffer, 0, 0); // Read, immediate use
|
||||
SIMD_PREFETCH(pcm_buffer + 64, 0, 1); // Prefetch next cache line
|
||||
|
||||
if (__builtin_expect(!capture_initialized || !pcm_capture_handle || !encoder || !opus_buf, 0)) {
|
||||
if (trace_logging_enabled) {
|
||||
printf("[AUDIO_OUTPUT] jetkvm_audio_read_encode: Failed safety checks - capture_initialized=%d, pcm_capture_handle=%p, encoder=%p, opus_buf=%p\n",
|
||||
TRACE_LOG("[AUDIO_OUTPUT] jetkvm_audio_read_encode: Failed safety checks - capture_initialized=%d, pcm_capture_handle=%p, encoder=%p, opus_buf=%p\n",
|
||||
capture_initialized, pcm_capture_handle, encoder, opus_buf);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
@ -452,15 +423,17 @@ retry_read:
|
|||
}
|
||||
goto retry_read;
|
||||
} else if (pcm_rc == -EAGAIN) {
|
||||
return 0;
|
||||
// Wait for data to be available
|
||||
snd_pcm_wait(pcm_capture_handle, sleep_milliseconds);
|
||||
goto retry_read;
|
||||
} else if (pcm_rc == -ESTRPIPE) {
|
||||
recovery_attempts++;
|
||||
if (recovery_attempts > max_recovery_attempts) {
|
||||
return -1;
|
||||
}
|
||||
int resume_attempts = 0;
|
||||
uint8_t resume_attempts = 0;
|
||||
while ((err = snd_pcm_resume(pcm_capture_handle)) == -EAGAIN && resume_attempts < 10) {
|
||||
usleep(sleep_microseconds);
|
||||
snd_pcm_wait(pcm_capture_handle, sleep_milliseconds);
|
||||
resume_attempts++;
|
||||
}
|
||||
if (err < 0) {
|
||||
|
|
@ -485,7 +458,7 @@ retry_read:
|
|||
if (recovery_attempts <= 1 && pcm_rc == -EINTR) {
|
||||
goto retry_read;
|
||||
} else if (recovery_attempts <= 1 && pcm_rc == -EBUSY) {
|
||||
usleep(sleep_microseconds / 2);
|
||||
snd_pcm_wait(pcm_capture_handle, 1); // Wait 1ms for device
|
||||
goto retry_read;
|
||||
}
|
||||
return -1;
|
||||
|
|
@ -494,27 +467,80 @@ retry_read:
|
|||
|
||||
// Zero-pad if we got a short read
|
||||
if (__builtin_expect(pcm_rc < frame_size, 0)) {
|
||||
int remaining_samples = (frame_size - pcm_rc) * channels;
|
||||
uint32_t remaining_samples = (frame_size - pcm_rc) * channels;
|
||||
simd_clear_samples_s16(&pcm_buffer[pcm_rc * channels], remaining_samples);
|
||||
}
|
||||
|
||||
// Apply 2.5x gain boost to prevent quantization noise at low volumes
|
||||
// HDMI audio typically transmitted at -6 to -12dB; boost prevents Opus noise floor artifacts
|
||||
simd_scale_volume_s16(pcm_buffer, frame_size * channels, 2.5f);
|
||||
// Find peak amplitude with NEON SIMD
|
||||
uint32_t total_samples = frame_size * channels;
|
||||
int16x8_t vmax = vdupq_n_s16(0);
|
||||
|
||||
// Encode PCM to Opus (20ms frame → ~200 bytes at 96kbps)
|
||||
nb_bytes = opus_encode(encoder, pcm_buffer, frame_size, out, max_packet_size);
|
||||
|
||||
if (trace_logging_enabled && nb_bytes > 0) {
|
||||
printf("[AUDIO_OUTPUT] jetkvm_audio_read_encode: Successfully encoded %d PCM frames to %d Opus bytes\n", pcm_rc, nb_bytes);
|
||||
uint32_t i;
|
||||
for (i = 0; i + 8 <= total_samples; i += 8) {
|
||||
int16x8_t v = vld1q_s16(&pcm_buffer[i]);
|
||||
int16x8_t vabs = vabsq_s16(v);
|
||||
vmax = vmaxq_s16(vmax, vabs);
|
||||
}
|
||||
|
||||
// Horizontal max reduction (manual for ARMv7)
|
||||
int16x4_t vmax_low = vget_low_s16(vmax);
|
||||
int16x4_t vmax_high = vget_high_s16(vmax);
|
||||
int16x4_t vmax_reduced = vmax_s16(vmax_low, vmax_high);
|
||||
vmax_reduced = vpmax_s16(vmax_reduced, vmax_reduced);
|
||||
vmax_reduced = vpmax_s16(vmax_reduced, vmax_reduced);
|
||||
int16_t peak = vget_lane_s16(vmax_reduced, 0);
|
||||
|
||||
// Handle remaining samples
|
||||
for (; i < total_samples; i++) {
|
||||
int16_t abs_val = (pcm_buffer[i] < 0) ? -pcm_buffer[i] : pcm_buffer[i];
|
||||
if (abs_val > peak) peak = abs_val;
|
||||
}
|
||||
|
||||
// Apply gain if signal is weak (below -18dB = 4096) for best quality
|
||||
// Target: boost to ~50% of range (16384) to improve SNR
|
||||
if (peak > 0 && peak < 4096) {
|
||||
float gain = 16384.0f / peak;
|
||||
if (gain > 8.0f) gain = 8.0f; // Max 18dB boost for best quality
|
||||
|
||||
// Apply gain with NEON and saturation
|
||||
float32x4_t vgain = vdupq_n_f32(gain);
|
||||
for (i = 0; i + 8 <= total_samples; i += 8) {
|
||||
int16x8_t v = vld1q_s16(&pcm_buffer[i]);
|
||||
|
||||
// Convert to float, apply gain, saturate back to int16
|
||||
int32x4_t v_low = vmovl_s16(vget_low_s16(v));
|
||||
int32x4_t v_high = vmovl_s16(vget_high_s16(v));
|
||||
|
||||
float32x4_t f_low = vcvtq_f32_s32(v_low);
|
||||
float32x4_t f_high = vcvtq_f32_s32(v_high);
|
||||
|
||||
f_low = vmulq_f32(f_low, vgain);
|
||||
f_high = vmulq_f32(f_high, vgain);
|
||||
|
||||
v_low = vcvtq_s32_f32(f_low);
|
||||
v_high = vcvtq_s32_f32(f_high);
|
||||
|
||||
// Saturate to int16 range
|
||||
int16x4_t result_low = vqmovn_s32(v_low);
|
||||
int16x4_t result_high = vqmovn_s32(v_high);
|
||||
|
||||
vst1q_s16(&pcm_buffer[i], vcombine_s16(result_low, result_high));
|
||||
}
|
||||
|
||||
// Handle remaining samples
|
||||
for (; i < total_samples; i++) {
|
||||
int32_t boosted = (int32_t)(pcm_buffer[i] * gain);
|
||||
if (boosted > 32767) boosted = 32767;
|
||||
if (boosted < -32768) boosted = -32768;
|
||||
pcm_buffer[i] = (int16_t)boosted;
|
||||
}
|
||||
}
|
||||
|
||||
nb_bytes = opus_encode(encoder, pcm_buffer, frame_size, out, max_packet_size);
|
||||
return nb_bytes;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// AUDIO INPUT PATH FUNCTIONS (Client Microphone → Device Speakers)
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Initialize INPUT path (Opus decoder → device speakers)
|
||||
|
|
@ -524,7 +550,7 @@ retry_read:
|
|||
int jetkvm_audio_playback_init() {
|
||||
int err;
|
||||
|
||||
simd_init_once();
|
||||
init_alsa_devices_from_env();
|
||||
|
||||
if (__sync_bool_compare_and_swap(&playback_initializing, 0, 1) == 0) {
|
||||
return -EBUSY;
|
||||
|
|
@ -544,8 +570,11 @@ int jetkvm_audio_playback_init() {
|
|||
pcm_playback_handle = NULL;
|
||||
}
|
||||
|
||||
err = safe_alsa_open(&pcm_playback_handle, "hw:1,0", SND_PCM_STREAM_PLAYBACK);
|
||||
err = safe_alsa_open(&pcm_playback_handle, alsa_playback_device, SND_PCM_STREAM_PLAYBACK);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Failed to open ALSA playback device %s: %s\n",
|
||||
alsa_playback_device, snd_strerror(err));
|
||||
fflush(stderr);
|
||||
err = safe_alsa_open(&pcm_playback_handle, "default", SND_PCM_STREAM_PLAYBACK);
|
||||
if (err < 0) {
|
||||
playback_initializing = 0;
|
||||
|
|
@ -582,39 +611,27 @@ int jetkvm_audio_playback_init() {
|
|||
* @param opus_size Size of Opus packet in bytes
|
||||
* @return >0 = PCM frames written, 0 = frame skipped, -1/-2 = error
|
||||
*/
|
||||
__attribute__((hot)) int jetkvm_audio_decode_write(void * __restrict__ opus_buf, int opus_size) {
|
||||
// Static buffer persists across calls for better cache locality
|
||||
static short SIMD_ALIGN pcm_buffer[1920]; // 960 frames × 2 channels
|
||||
|
||||
// Local variables
|
||||
__attribute__((hot)) int jetkvm_audio_decode_write(void * __restrict__ opus_buf, int32_t opus_size) {
|
||||
static short CACHE_ALIGN pcm_buffer[960 * 2]; // Cache-aligned
|
||||
unsigned char * __restrict__ in = (unsigned char*)opus_buf;
|
||||
int pcm_frames;
|
||||
int pcm_rc;
|
||||
int err = 0;
|
||||
int recovery_attempts = 0;
|
||||
const int max_recovery_attempts = 3;
|
||||
int32_t pcm_frames, pcm_rc, err = 0;
|
||||
uint8_t recovery_attempts = 0;
|
||||
const uint8_t max_recovery_attempts = 3;
|
||||
|
||||
// Prefetch input buffer for read
|
||||
SIMD_PREFETCH(in, 0, 3);
|
||||
// Prefetch input buffer - locality 0 for immediate use
|
||||
SIMD_PREFETCH(in, 0, 0);
|
||||
|
||||
if (__builtin_expect(!playback_initialized || !pcm_playback_handle || !decoder || !opus_buf || opus_size <= 0, 0)) {
|
||||
if (trace_logging_enabled) {
|
||||
printf("[AUDIO_INPUT] jetkvm_audio_decode_write: Failed safety checks - playback_initialized=%d, pcm_playback_handle=%p, decoder=%p, opus_buf=%p, opus_size=%d\n",
|
||||
TRACE_LOG("[AUDIO_INPUT] jetkvm_audio_decode_write: Failed safety checks - playback_initialized=%d, pcm_playback_handle=%p, decoder=%p, opus_buf=%p, opus_size=%d\n",
|
||||
playback_initialized, pcm_playback_handle, decoder, opus_buf, opus_size);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (opus_size > max_packet_size) {
|
||||
if (trace_logging_enabled) {
|
||||
printf("[AUDIO_INPUT] jetkvm_audio_decode_write: Opus packet too large - size=%d, max=%d\n", opus_size, max_packet_size);
|
||||
}
|
||||
TRACE_LOG("[AUDIO_INPUT] jetkvm_audio_decode_write: Opus packet too large - size=%d, max=%d\n", opus_size, max_packet_size);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (trace_logging_enabled) {
|
||||
printf("[AUDIO_INPUT] jetkvm_audio_decode_write: Processing Opus packet - size=%d bytes\n", opus_size);
|
||||
}
|
||||
TRACE_LOG("[AUDIO_INPUT] jetkvm_audio_decode_write: Processing Opus packet - size=%d bytes\n", opus_size);
|
||||
|
||||
// Decode Opus packet to PCM (FEC automatically applied if embedded in packet)
|
||||
// decode_fec=0 means normal decode (FEC data is used automatically when present)
|
||||
|
|
@ -622,168 +639,114 @@ __attribute__((hot)) int jetkvm_audio_decode_write(void * __restrict__ opus_buf,
|
|||
|
||||
if (__builtin_expect(pcm_frames < 0, 0)) {
|
||||
// Decode failed - attempt packet loss concealment using FEC from previous packet
|
||||
if (trace_logging_enabled) {
|
||||
printf("[AUDIO_INPUT] jetkvm_audio_decode_write: Opus decode failed with error %d, attempting packet loss concealment\n", pcm_frames);
|
||||
}
|
||||
TRACE_LOG("[AUDIO_INPUT] jetkvm_audio_decode_write: Opus decode failed with error %d, attempting packet loss concealment\n", pcm_frames);
|
||||
|
||||
// decode_fec=1 means use FEC data from the NEXT packet to reconstruct THIS lost packet
|
||||
pcm_frames = opus_decode(decoder, NULL, 0, pcm_buffer, frame_size, 1);
|
||||
if (pcm_frames < 0) {
|
||||
if (trace_logging_enabled) {
|
||||
printf("[AUDIO_INPUT] jetkvm_audio_decode_write: Packet loss concealment also failed with error %d\n", pcm_frames);
|
||||
}
|
||||
TRACE_LOG("[AUDIO_INPUT] jetkvm_audio_decode_write: Packet loss concealment also failed with error %d\n", pcm_frames);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (trace_logging_enabled) {
|
||||
printf("[AUDIO_INPUT] jetkvm_audio_decode_write: Packet loss concealment succeeded, recovered %d frames\n", pcm_frames);
|
||||
}
|
||||
} else if (trace_logging_enabled) {
|
||||
printf("[AUDIO_INPUT] jetkvm_audio_decode_write: Opus decode successful - decoded %d PCM frames\n", pcm_frames);
|
||||
}
|
||||
TRACE_LOG("[AUDIO_INPUT] jetkvm_audio_decode_write: Packet loss concealment succeeded, recovered %d frames\n", pcm_frames);
|
||||
} else
|
||||
TRACE_LOG("[AUDIO_INPUT] jetkvm_audio_decode_write: Opus decode successful - decoded %d PCM frames\n", pcm_frames);
|
||||
|
||||
retry_write:
|
||||
// Write decoded PCM to ALSA playback device
|
||||
pcm_rc = snd_pcm_writei(pcm_playback_handle, pcm_buffer, pcm_frames);
|
||||
if (__builtin_expect(pcm_rc < 0, 0)) {
|
||||
if (trace_logging_enabled) {
|
||||
printf("[AUDIO_INPUT] jetkvm_audio_decode_write: ALSA write failed with error %d (%s), attempt %d/%d\n",
|
||||
TRACE_LOG("[AUDIO_INPUT] jetkvm_audio_decode_write: ALSA write failed with error %d (%s), attempt %d/%d\n",
|
||||
pcm_rc, snd_strerror(pcm_rc), recovery_attempts + 1, max_recovery_attempts);
|
||||
}
|
||||
|
||||
if (pcm_rc == -EPIPE) {
|
||||
recovery_attempts++;
|
||||
if (recovery_attempts > max_recovery_attempts) {
|
||||
if (trace_logging_enabled) {
|
||||
printf("[AUDIO_INPUT] jetkvm_audio_decode_write: Buffer underrun recovery failed after %d attempts\n", max_recovery_attempts);
|
||||
}
|
||||
TRACE_LOG("[AUDIO_INPUT] jetkvm_audio_decode_write: Buffer underrun recovery failed after %d attempts\n", max_recovery_attempts);
|
||||
return -2;
|
||||
}
|
||||
|
||||
if (trace_logging_enabled) {
|
||||
printf("[AUDIO_INPUT] jetkvm_audio_decode_write: Buffer underrun detected, attempting recovery (attempt %d)\n", recovery_attempts);
|
||||
}
|
||||
TRACE_LOG("[AUDIO_INPUT] jetkvm_audio_decode_write: Buffer underrun detected, attempting recovery (attempt %d)\n", recovery_attempts);
|
||||
err = snd_pcm_prepare(pcm_playback_handle);
|
||||
if (err < 0) {
|
||||
if (trace_logging_enabled) {
|
||||
printf("[AUDIO_INPUT] jetkvm_audio_decode_write: snd_pcm_prepare failed (%s), trying drop+prepare\n", snd_strerror(err));
|
||||
}
|
||||
TRACE_LOG("[AUDIO_INPUT] jetkvm_audio_decode_write: snd_pcm_prepare failed (%s), trying drop+prepare\n", snd_strerror(err));
|
||||
snd_pcm_drop(pcm_playback_handle);
|
||||
err = snd_pcm_prepare(pcm_playback_handle);
|
||||
if (err < 0) {
|
||||
if (trace_logging_enabled) {
|
||||
printf("[AUDIO_INPUT] jetkvm_audio_decode_write: drop+prepare recovery failed (%s)\n", snd_strerror(err));
|
||||
}
|
||||
TRACE_LOG("[AUDIO_INPUT] jetkvm_audio_decode_write: drop+prepare recovery failed (%s)\n", snd_strerror(err));
|
||||
return -2;
|
||||
}
|
||||
}
|
||||
|
||||
if (trace_logging_enabled) {
|
||||
printf("[AUDIO_INPUT] jetkvm_audio_decode_write: Buffer underrun recovery successful, retrying write\n");
|
||||
}
|
||||
TRACE_LOG("[AUDIO_INPUT] jetkvm_audio_decode_write: Buffer underrun recovery successful, retrying write\n");
|
||||
goto retry_write;
|
||||
} else if (pcm_rc == -ESTRPIPE) {
|
||||
recovery_attempts++;
|
||||
if (recovery_attempts > max_recovery_attempts) {
|
||||
if (trace_logging_enabled) {
|
||||
printf("[AUDIO_INPUT] jetkvm_audio_decode_write: Device suspend recovery failed after %d attempts\n", max_recovery_attempts);
|
||||
}
|
||||
TRACE_LOG("[AUDIO_INPUT] jetkvm_audio_decode_write: Device suspend recovery failed after %d attempts\n", max_recovery_attempts);
|
||||
return -2;
|
||||
}
|
||||
|
||||
if (trace_logging_enabled) {
|
||||
printf("[AUDIO_INPUT] jetkvm_audio_decode_write: Device suspended, attempting resume (attempt %d)\n", recovery_attempts);
|
||||
}
|
||||
int resume_attempts = 0;
|
||||
TRACE_LOG("[AUDIO_INPUT] jetkvm_audio_decode_write: Device suspended, attempting resume (attempt %d)\n", recovery_attempts);
|
||||
uint8_t resume_attempts = 0;
|
||||
while ((err = snd_pcm_resume(pcm_playback_handle)) == -EAGAIN && resume_attempts < 10) {
|
||||
usleep(sleep_microseconds);
|
||||
snd_pcm_wait(pcm_playback_handle, sleep_milliseconds);
|
||||
resume_attempts++;
|
||||
}
|
||||
if (err < 0) {
|
||||
if (trace_logging_enabled) {
|
||||
printf("[AUDIO_INPUT] jetkvm_audio_decode_write: Device resume failed (%s), trying prepare fallback\n", snd_strerror(err));
|
||||
}
|
||||
TRACE_LOG("[AUDIO_INPUT] jetkvm_audio_decode_write: Device resume failed (%s), trying prepare fallback\n", snd_strerror(err));
|
||||
err = snd_pcm_prepare(pcm_playback_handle);
|
||||
if (err < 0) {
|
||||
if (trace_logging_enabled) {
|
||||
printf("[AUDIO_INPUT] jetkvm_audio_decode_write: Prepare fallback failed (%s)\n", snd_strerror(err));
|
||||
}
|
||||
TRACE_LOG("[AUDIO_INPUT] jetkvm_audio_decode_write: Prepare fallback failed (%s)\n", snd_strerror(err));
|
||||
return -2;
|
||||
}
|
||||
}
|
||||
if (trace_logging_enabled) {
|
||||
printf("[AUDIO_INPUT] jetkvm_audio_decode_write: Device suspend recovery successful, skipping frame\n");
|
||||
}
|
||||
TRACE_LOG("[AUDIO_INPUT] jetkvm_audio_decode_write: Device suspend recovery successful, skipping frame\n");
|
||||
return 0;
|
||||
} else if (pcm_rc == -ENODEV) {
|
||||
if (trace_logging_enabled) {
|
||||
printf("[AUDIO_INPUT] jetkvm_audio_decode_write: Device disconnected (ENODEV) - critical error\n");
|
||||
}
|
||||
TRACE_LOG("[AUDIO_INPUT] jetkvm_audio_decode_write: Device disconnected (ENODEV) - critical error\n");
|
||||
return -2;
|
||||
} else if (pcm_rc == -EIO) {
|
||||
recovery_attempts++;
|
||||
if (recovery_attempts <= max_recovery_attempts) {
|
||||
if (trace_logging_enabled) {
|
||||
printf("[AUDIO_INPUT] jetkvm_audio_decode_write: I/O error detected, attempting recovery\n");
|
||||
}
|
||||
TRACE_LOG("[AUDIO_INPUT] jetkvm_audio_decode_write: I/O error detected, attempting recovery\n");
|
||||
snd_pcm_drop(pcm_playback_handle);
|
||||
err = snd_pcm_prepare(pcm_playback_handle);
|
||||
if (err >= 0) {
|
||||
if (trace_logging_enabled) {
|
||||
printf("[AUDIO_INPUT] jetkvm_audio_decode_write: I/O error recovery successful, retrying write\n");
|
||||
}
|
||||
TRACE_LOG("[AUDIO_INPUT] jetkvm_audio_decode_write: I/O error recovery successful, retrying write\n");
|
||||
goto retry_write;
|
||||
}
|
||||
if (trace_logging_enabled) {
|
||||
printf("[AUDIO_INPUT] jetkvm_audio_decode_write: I/O error recovery failed (%s)\n", snd_strerror(err));
|
||||
}
|
||||
TRACE_LOG("[AUDIO_INPUT] jetkvm_audio_decode_write: I/O error recovery failed (%s)\n", snd_strerror(err));
|
||||
}
|
||||
return -2;
|
||||
} else if (pcm_rc == -EAGAIN) {
|
||||
recovery_attempts++;
|
||||
if (recovery_attempts <= max_recovery_attempts) {
|
||||
if (trace_logging_enabled) {
|
||||
printf("[AUDIO_INPUT] jetkvm_audio_decode_write: Device not ready (EAGAIN), waiting and retrying\n");
|
||||
}
|
||||
snd_pcm_wait(pcm_playback_handle, sleep_microseconds / 4000);
|
||||
TRACE_LOG("[AUDIO_INPUT] jetkvm_audio_decode_write: Device not ready (EAGAIN), waiting and retrying\n");
|
||||
snd_pcm_wait(pcm_playback_handle, 1); // Wait 1ms
|
||||
goto retry_write;
|
||||
}
|
||||
if (trace_logging_enabled) {
|
||||
printf("[AUDIO_INPUT] jetkvm_audio_decode_write: Device not ready recovery failed after %d attempts\n", max_recovery_attempts);
|
||||
}
|
||||
TRACE_LOG("[AUDIO_INPUT] jetkvm_audio_decode_write: Device not ready recovery failed after %d attempts\n", max_recovery_attempts);
|
||||
return -2;
|
||||
} else {
|
||||
recovery_attempts++;
|
||||
if (recovery_attempts <= 1 && (pcm_rc == -EINTR || pcm_rc == -EBUSY)) {
|
||||
if (trace_logging_enabled) {
|
||||
printf("[AUDIO_INPUT] jetkvm_audio_decode_write: Transient error %d (%s), retrying once\n", pcm_rc, snd_strerror(pcm_rc));
|
||||
}
|
||||
usleep(sleep_microseconds / 2);
|
||||
TRACE_LOG("[AUDIO_INPUT] jetkvm_audio_decode_write: Transient error %d (%s), retrying once\n", pcm_rc, snd_strerror(pcm_rc));
|
||||
snd_pcm_wait(pcm_playback_handle, 1); // Wait 1ms
|
||||
goto retry_write;
|
||||
}
|
||||
if (trace_logging_enabled) {
|
||||
printf("[AUDIO_INPUT] jetkvm_audio_decode_write: Unrecoverable error %d (%s)\n", pcm_rc, snd_strerror(pcm_rc));
|
||||
}
|
||||
TRACE_LOG("[AUDIO_INPUT] jetkvm_audio_decode_write: Unrecoverable error %d (%s)\n", pcm_rc, snd_strerror(pcm_rc));
|
||||
return -2;
|
||||
}
|
||||
}
|
||||
|
||||
if (trace_logging_enabled) {
|
||||
printf("[AUDIO_INPUT] jetkvm_audio_decode_write: Successfully wrote %d PCM frames to device\n", pcm_frames);
|
||||
}
|
||||
TRACE_LOG("[AUDIO_INPUT] jetkvm_audio_decode_write: Successfully wrote %d PCM frames to device\n", pcm_frames);
|
||||
return pcm_frames;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// CLEANUP FUNCTIONS
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Close INPUT path (thread-safe with drain)
|
||||
*/
|
||||
void jetkvm_audio_playback_close() {
|
||||
while (playback_initializing) {
|
||||
usleep(sleep_microseconds);
|
||||
sched_yield();
|
||||
}
|
||||
|
||||
if (__sync_bool_compare_and_swap(&playback_initialized, 1, 0) == 0) {
|
||||
|
|
@ -806,7 +769,7 @@ void jetkvm_audio_playback_close() {
|
|||
*/
|
||||
void jetkvm_audio_capture_close() {
|
||||
while (capture_initializing) {
|
||||
usleep(sleep_microseconds);
|
||||
sched_yield();
|
||||
}
|
||||
|
||||
if (__sync_bool_compare_and_swap(&capture_initialized, 1, 0) == 0) {
|
||||
|
|
|
|||
|
|
@ -5,21 +5,24 @@
|
|||
*/
|
||||
|
||||
#include "audio_common.h"
|
||||
#include "ipc_protocol.h"
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <signal.h>
|
||||
#include <unistd.h>
|
||||
#include <sys/socket.h>
|
||||
#include <time.h>
|
||||
|
||||
// Forward declarations for encoder update (only in output server)
|
||||
extern int update_opus_encoder_params(uint32_t bitrate, uint8_t complexity);
|
||||
|
||||
// ============================================================================
|
||||
// GLOBAL STATE FOR SIGNAL HANDLER
|
||||
// ============================================================================
|
||||
|
||||
// Pointer to the running flag that will be set to 0 on shutdown
|
||||
static volatile sig_atomic_t *g_running_ptr = NULL;
|
||||
|
||||
// ============================================================================
|
||||
// SIGNAL HANDLERS
|
||||
// ============================================================================
|
||||
|
||||
static void signal_handler(int signo) {
|
||||
if (signo == SIGTERM || signo == SIGINT) {
|
||||
|
|
@ -46,16 +49,13 @@ void audio_common_setup_signal_handlers(volatile sig_atomic_t *running) {
|
|||
signal(SIGPIPE, SIG_IGN);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// CONFIGURATION PARSING
|
||||
// ============================================================================
|
||||
|
||||
int audio_common_parse_env_int(const char *name, int default_value) {
|
||||
int32_t audio_common_parse_env_int(const char *name, int32_t default_value) {
|
||||
const char *str = getenv(name);
|
||||
if (str == NULL || str[0] == '\0') {
|
||||
return default_value;
|
||||
}
|
||||
return atoi(str);
|
||||
return (int32_t)atoi(str);
|
||||
}
|
||||
|
||||
const char* audio_common_parse_env_string(const char *name, const char *default_value) {
|
||||
|
|
@ -66,15 +66,103 @@ const char* audio_common_parse_env_string(const char *name, const char *default_
|
|||
return str;
|
||||
}
|
||||
|
||||
int audio_common_is_trace_enabled(void) {
|
||||
const char *pion_trace = getenv("PION_LOG_TRACE");
|
||||
if (pion_trace == NULL) {
|
||||
return 0;
|
||||
// COMMON CONFIGURATION
|
||||
|
||||
void audio_common_load_config(audio_config_t *config, int is_output) {
|
||||
// ALSA device configuration
|
||||
if (is_output) {
|
||||
config->alsa_device = audio_common_parse_env_string("ALSA_CAPTURE_DEVICE", "hw:0,0");
|
||||
} else {
|
||||
config->alsa_device = audio_common_parse_env_string("ALSA_PLAYBACK_DEVICE", "hw:1,0");
|
||||
}
|
||||
|
||||
// Check if "audio" is in comma-separated list
|
||||
if (strstr(pion_trace, "audio") != NULL) {
|
||||
return 1;
|
||||
// Common Opus configuration
|
||||
config->opus_bitrate = audio_common_parse_env_int("OPUS_BITRATE", 128000);
|
||||
config->opus_complexity = audio_common_parse_env_int("OPUS_COMPLEXITY", 2);
|
||||
|
||||
// Audio format
|
||||
config->sample_rate = audio_common_parse_env_int("AUDIO_SAMPLE_RATE", 48000);
|
||||
config->channels = audio_common_parse_env_int("AUDIO_CHANNELS", 2);
|
||||
config->frame_size = audio_common_parse_env_int("AUDIO_FRAME_SIZE", 960);
|
||||
|
||||
// Log configuration
|
||||
printf("Audio %s Server Configuration:\n", is_output ? "Output" : "Input");
|
||||
printf(" ALSA Device: %s\n", config->alsa_device);
|
||||
printf(" Sample Rate: %d Hz\n", config->sample_rate);
|
||||
printf(" Channels: %d\n", config->channels);
|
||||
printf(" Frame Size: %d samples\n", config->frame_size);
|
||||
if (is_output) {
|
||||
printf(" Opus Bitrate: %d bps\n", config->opus_bitrate);
|
||||
printf(" Opus Complexity: %d\n", config->opus_complexity);
|
||||
}
|
||||
}
|
||||
|
||||
void audio_common_print_startup(const char *server_name) {
|
||||
printf("JetKVM %s Starting...\n", server_name);
|
||||
}
|
||||
|
||||
void audio_common_print_shutdown(const char *server_name) {
|
||||
printf("Shutting down %s...\n", server_name);
|
||||
}
|
||||
|
||||
|
||||
int audio_common_handle_opus_config(const uint8_t *data, uint32_t length, int is_encoder) {
|
||||
ipc_opus_config_t config;
|
||||
|
||||
if (ipc_parse_opus_config(data, length, &config) != 0) {
|
||||
fprintf(stderr, "Failed to parse Opus config\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (is_encoder) {
|
||||
printf("Received Opus config: bitrate=%u, complexity=%u\n",
|
||||
config.bitrate, config.complexity);
|
||||
|
||||
int result = update_opus_encoder_params(
|
||||
config.bitrate,
|
||||
config.complexity
|
||||
);
|
||||
|
||||
if (result != 0) {
|
||||
fprintf(stderr, "Warning: Failed to apply Opus encoder parameters\n");
|
||||
}
|
||||
} else {
|
||||
printf("Received Opus config (informational): bitrate=%u, complexity=%u\n",
|
||||
config.bitrate, config.complexity);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// IPC MAIN LOOP HELPERS
|
||||
|
||||
int audio_common_server_loop(int server_sock, volatile sig_atomic_t *running,
|
||||
connection_handler_t handler) {
|
||||
while (*running) {
|
||||
printf("Waiting for client connection...\n");
|
||||
|
||||
int client_sock = accept(server_sock, NULL, NULL);
|
||||
if (client_sock < 0) {
|
||||
if (*running) {
|
||||
fprintf(stderr, "Failed to accept client, retrying...\n");
|
||||
struct timespec ts = {1, 0}; // 1 second
|
||||
nanosleep(&ts, NULL);
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
printf("Client connected (fd=%d)\n", client_sock);
|
||||
|
||||
// Run handler with this client
|
||||
handler(client_sock, running);
|
||||
|
||||
// Close client connection
|
||||
close(client_sock);
|
||||
|
||||
if (*running) {
|
||||
printf("Client disconnected, waiting for next client...\n");
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -8,10 +8,23 @@
|
|||
#define JETKVM_AUDIO_COMMON_H
|
||||
|
||||
#include <signal.h>
|
||||
#include <stdint.h>
|
||||
|
||||
// SHARED CONSTANTS
|
||||
|
||||
// Audio processing parameters
|
||||
#define AUDIO_MAX_PACKET_SIZE 1500 // Maximum Opus packet size
|
||||
#define AUDIO_SLEEP_MICROSECONDS 1000 // Default sleep time in microseconds
|
||||
#define AUDIO_MAX_ATTEMPTS 5 // Maximum retry attempts
|
||||
#define AUDIO_MAX_BACKOFF_US 500000 // Maximum backoff in microseconds
|
||||
|
||||
// Error handling
|
||||
#define AUDIO_MAX_CONSECUTIVE_ERRORS 10 // Maximum consecutive errors before giving up
|
||||
|
||||
// Performance monitoring
|
||||
#define AUDIO_TRACE_MASK 0x3FF // Log every 1024th frame (bit mask for efficiency)
|
||||
|
||||
// ============================================================================
|
||||
// SIGNAL HANDLERS
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Setup signal handlers for graceful shutdown.
|
||||
|
|
@ -22,9 +35,6 @@
|
|||
*/
|
||||
void audio_common_setup_signal_handlers(volatile sig_atomic_t *running);
|
||||
|
||||
// ============================================================================
|
||||
// CONFIGURATION PARSING
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Parse integer from environment variable.
|
||||
|
|
@ -34,7 +44,7 @@ void audio_common_setup_signal_handlers(volatile sig_atomic_t *running);
|
|||
* @param default_value Default value if not set
|
||||
* @return Parsed integer value or default
|
||||
*/
|
||||
int audio_common_parse_env_int(const char *name, int default_value);
|
||||
int32_t audio_common_parse_env_int(const char *name, int32_t default_value);
|
||||
|
||||
/**
|
||||
* Parse string from environment variable.
|
||||
|
|
@ -46,12 +56,105 @@ int audio_common_parse_env_int(const char *name, int default_value);
|
|||
*/
|
||||
const char* audio_common_parse_env_string(const char *name, const char *default_value);
|
||||
|
||||
|
||||
// COMMON CONFIGURATION
|
||||
|
||||
/**
|
||||
* Check if trace logging is enabled for audio subsystem.
|
||||
* Looks for "audio" in PION_LOG_TRACE comma-separated list.
|
||||
*
|
||||
* @return 1 if enabled, 0 otherwise
|
||||
* Common audio configuration structure
|
||||
*/
|
||||
int audio_common_is_trace_enabled(void);
|
||||
typedef struct {
|
||||
const char *alsa_device; // ALSA device path
|
||||
int opus_bitrate; // Opus bitrate
|
||||
int opus_complexity; // Opus complexity
|
||||
int sample_rate; // Sample rate
|
||||
int channels; // Number of channels
|
||||
int frame_size; // Frame size in samples
|
||||
} audio_config_t;
|
||||
|
||||
/**
|
||||
* Load common audio configuration from environment
|
||||
* @param config Output configuration
|
||||
* @param is_output true for output server, false for input
|
||||
*/
|
||||
void audio_common_load_config(audio_config_t *config, int is_output);
|
||||
|
||||
/**
|
||||
* Print server startup message
|
||||
* @param server_name Name of the server (e.g., "Audio Output Server")
|
||||
*/
|
||||
void audio_common_print_startup(const char *server_name);
|
||||
|
||||
/**
|
||||
* Print server shutdown message
|
||||
* @param server_name Name of the server
|
||||
*/
|
||||
void audio_common_print_shutdown(const char *server_name);
|
||||
|
||||
// ERROR TRACKING
|
||||
|
||||
/**
|
||||
* Error tracking state for audio processing loops
|
||||
*/
|
||||
typedef struct {
|
||||
uint8_t consecutive_errors; // Current consecutive error count
|
||||
uint32_t frame_count; // Total frames processed
|
||||
} audio_error_tracker_t;
|
||||
|
||||
/**
|
||||
* Initialize error tracker
|
||||
*/
|
||||
static inline void audio_error_tracker_init(audio_error_tracker_t *tracker) {
|
||||
tracker->consecutive_errors = 0;
|
||||
tracker->frame_count = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Record an error and check if we should give up
|
||||
* Returns 1 if too many errors, 0 to continue
|
||||
*/
|
||||
static inline uint8_t audio_error_tracker_record_error(audio_error_tracker_t *tracker) {
|
||||
tracker->consecutive_errors++;
|
||||
return (tracker->consecutive_errors >= AUDIO_MAX_CONSECUTIVE_ERRORS) ? 1 : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Record success and increment frame count
|
||||
*/
|
||||
static inline void audio_error_tracker_record_success(audio_error_tracker_t *tracker) {
|
||||
tracker->consecutive_errors = 0;
|
||||
tracker->frame_count++;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if we should log trace info for this frame
|
||||
*/
|
||||
static inline uint8_t audio_error_tracker_should_trace(audio_error_tracker_t *tracker) {
|
||||
return ((tracker->frame_count & AUDIO_TRACE_MASK) == 1) ? 1 : 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Parse Opus config message and optionally apply to encoder.
|
||||
* @param data Raw message data
|
||||
* @param length Message length
|
||||
* @param is_encoder If true, apply config to encoder (output server)
|
||||
* @return 0 on success, -1 on error
|
||||
*/
|
||||
int audio_common_handle_opus_config(const uint8_t *data, uint32_t length, int is_encoder);
|
||||
|
||||
// IPC MAIN LOOP HELPERS
|
||||
|
||||
/**
|
||||
* Common server accept loop with signal handling.
|
||||
* Accepts clients and calls handler function for each connection.
|
||||
*
|
||||
* @param server_sock Server socket from ipc_create_server
|
||||
* @param running Pointer to running flag (set to 0 on shutdown)
|
||||
* @param handler Connection handler function
|
||||
* @return 0 on clean shutdown, -1 on error
|
||||
*/
|
||||
typedef int (*connection_handler_t)(int client_sock, volatile sig_atomic_t *running);
|
||||
int audio_common_server_loop(int server_sock, volatile sig_atomic_t *running,
|
||||
connection_handler_t handler);
|
||||
|
||||
#endif // JETKVM_AUDIO_COMMON_H
|
||||
|
|
|
|||
|
|
@ -18,9 +18,7 @@
|
|||
#include <sys/uio.h>
|
||||
#include <endian.h>
|
||||
|
||||
// ============================================================================
|
||||
// HELPER FUNCTIONS
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Read exactly N bytes from socket (loops until complete or error).
|
||||
|
|
@ -37,11 +35,11 @@ int ipc_read_full(int sock, void *buf, size_t len) {
|
|||
if (errno == EINTR) {
|
||||
continue; // Interrupted by signal, retry
|
||||
}
|
||||
return -1; // Read error
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (n == 0) {
|
||||
return -1; // EOF (connection closed)
|
||||
return -1; // Connection closed
|
||||
}
|
||||
|
||||
ptr += n;
|
||||
|
|
@ -51,21 +49,8 @@ int ipc_read_full(int sock, void *buf, size_t len) {
|
|||
return 0; // Success
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current time in nanoseconds (Unix epoch).
|
||||
* Compatible with Go time.Now().UnixNano().
|
||||
*/
|
||||
int64_t ipc_get_time_ns(void) {
|
||||
struct timespec ts;
|
||||
if (clock_gettime(CLOCK_REALTIME, &ts) != 0) {
|
||||
return 0; // Fallback on error
|
||||
}
|
||||
return (int64_t)ts.tv_sec * 1000000000LL + (int64_t)ts.tv_nsec;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// MESSAGE READ/WRITE
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Read a complete IPC message from socket.
|
||||
|
|
@ -80,7 +65,7 @@ int ipc_read_message(int sock, ipc_message_t *msg, uint32_t expected_magic) {
|
|||
// Initialize message
|
||||
memset(msg, 0, sizeof(ipc_message_t));
|
||||
|
||||
// 1. Read header (17 bytes)
|
||||
// 1. Read header (9 bytes)
|
||||
if (ipc_read_full(sock, &msg->header, IPC_HEADER_SIZE) != 0) {
|
||||
return -1;
|
||||
}
|
||||
|
|
@ -88,8 +73,6 @@ int ipc_read_message(int sock, ipc_message_t *msg, uint32_t expected_magic) {
|
|||
// 2. Convert from little-endian (required on big-endian systems)
|
||||
msg->header.magic = le32toh(msg->header.magic);
|
||||
msg->header.length = le32toh(msg->header.length);
|
||||
msg->header.timestamp = le64toh(msg->header.timestamp);
|
||||
// Note: type is uint8_t, no conversion needed
|
||||
|
||||
// 3. Validate magic number
|
||||
if (msg->header.magic != expected_magic) {
|
||||
|
|
@ -124,6 +107,48 @@ int ipc_read_message(int sock, ipc_message_t *msg, uint32_t expected_magic) {
|
|||
return 0; // Success
|
||||
}
|
||||
|
||||
/**
|
||||
* Read a complete IPC message using pre-allocated buffer (zero-copy).
|
||||
*/
|
||||
int ipc_read_message_zerocopy(int sock, ipc_message_t *msg, uint32_t expected_magic,
|
||||
uint8_t *buffer, uint32_t buffer_size) {
|
||||
if (msg == NULL || buffer == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Initialize message
|
||||
memset(msg, 0, sizeof(ipc_message_t));
|
||||
|
||||
// 1. Read header (9 bytes)
|
||||
if (ipc_read_full(sock, &msg->header, IPC_HEADER_SIZE) != 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// 2. Convert from little-endian
|
||||
msg->header.magic = le32toh(msg->header.magic);
|
||||
msg->header.length = le32toh(msg->header.length);
|
||||
|
||||
// 3. Validate magic number
|
||||
if (msg->header.magic != expected_magic) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// 4. Validate length
|
||||
if (msg->header.length > IPC_MAX_FRAME_SIZE || msg->header.length > buffer_size) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// 5. Read payload directly into provided buffer (zero-copy)
|
||||
if (msg->header.length > 0) {
|
||||
if (ipc_read_full(sock, buffer, msg->header.length) != 0) {
|
||||
return -1;
|
||||
}
|
||||
msg->data = buffer; // Point to provided buffer, no allocation
|
||||
}
|
||||
|
||||
return 0; // Success
|
||||
}
|
||||
|
||||
/**
|
||||
* Write a complete IPC message to socket.
|
||||
* Uses writev() for atomic header+payload write.
|
||||
|
|
@ -143,7 +168,6 @@ int ipc_write_message(int sock, uint32_t magic, uint8_t type,
|
|||
header.magic = htole32(magic);
|
||||
header.type = type;
|
||||
header.length = htole32(length);
|
||||
header.timestamp = htole64(ipc_get_time_ns());
|
||||
|
||||
// Use writev for atomic write (if possible)
|
||||
struct iovec iov[2];
|
||||
|
|
@ -177,9 +201,6 @@ int ipc_write_message(int sock, uint32_t magic, uint8_t type,
|
|||
return 0; // Success
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// CONFIGURATION PARSING
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Parse Opus configuration from message data (36 bytes, little-endian).
|
||||
|
|
@ -241,9 +262,7 @@ void ipc_free_message(ipc_message_t *msg) {
|
|||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// SOCKET MANAGEMENT
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Create Unix domain socket server.
|
||||
|
|
|
|||
|
|
@ -14,9 +14,7 @@
|
|||
#include <stdint.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
// ============================================================================
|
||||
// PROTOCOL CONSTANTS
|
||||
// ============================================================================
|
||||
|
||||
// Magic numbers (ASCII representation when read as little-endian)
|
||||
#define IPC_MAGIC_OUTPUT 0x4A4B4F55 // "JKOU" - JetKVM Output (device → browser)
|
||||
|
|
@ -31,34 +29,32 @@
|
|||
#define IPC_MSG_TYPE_ACK 5 // Acknowledgment
|
||||
|
||||
// Size constraints
|
||||
#define IPC_HEADER_SIZE 17 // Fixed header size
|
||||
#define IPC_MAX_FRAME_SIZE 4096 // Maximum payload size (matches Go Config.MaxFrameSize)
|
||||
#define IPC_HEADER_SIZE 9 // Fixed header size (reduced from 17)
|
||||
#define IPC_MAX_FRAME_SIZE 1024 // Maximum payload size (128kbps @ 20ms = ~600 bytes worst case with VBR+FEC)
|
||||
|
||||
// Socket paths
|
||||
#define IPC_SOCKET_OUTPUT "/var/run/audio_output.sock"
|
||||
#define IPC_SOCKET_INPUT "/var/run/audio_input.sock"
|
||||
|
||||
// ============================================================================
|
||||
// WIRE FORMAT STRUCTURES
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* IPC message header (17 bytes, little-endian)
|
||||
* IPC message header (9 bytes, little-endian)
|
||||
*
|
||||
* Byte layout:
|
||||
* [0-3] magic uint32_t LE Magic number (0x4A4B4F55 or 0x4A4B4D49)
|
||||
* [4] type uint8_t Message type (0-5)
|
||||
* [5-8] length uint32_t LE Payload size in bytes
|
||||
* [9-16] timestamp int64_t LE Unix nanoseconds (time.Now().UnixNano())
|
||||
* [17+] data uint8_t[] Variable payload
|
||||
* [9+] data uint8_t[] Variable payload
|
||||
*
|
||||
* CRITICAL: Must use __attribute__((packed)) to prevent padding.
|
||||
*
|
||||
* NOTE: Timestamp removed (was unused, saved 8 bytes per message)
|
||||
*/
|
||||
typedef struct __attribute__((packed)) {
|
||||
uint32_t magic; // Magic number (LE)
|
||||
uint8_t type; // Message type
|
||||
uint32_t length; // Payload length in bytes (LE)
|
||||
int64_t timestamp; // Unix nanoseconds (LE)
|
||||
} ipc_header_t;
|
||||
|
||||
/**
|
||||
|
|
@ -83,12 +79,12 @@ typedef struct __attribute__((packed)) {
|
|||
typedef struct __attribute__((packed)) {
|
||||
uint32_t sample_rate; // Samples per second (48000)
|
||||
uint32_t channels; // Number of channels (2)
|
||||
uint32_t frame_size; // Samples per frame (960)
|
||||
uint32_t bitrate; // Bits per second (96000)
|
||||
uint32_t complexity; // Encoder complexity 0-10 (1=fast, 10=best quality)
|
||||
uint32_t frame_size; // Samples per frame per channel (960 = 20ms @ 48kHz)
|
||||
uint32_t bitrate; // Bits per second (128000)
|
||||
uint32_t complexity; // Encoder complexity 0-10 (2=balanced quality/speed)
|
||||
uint32_t vbr; // Variable bitrate: 0=disabled, 1=enabled
|
||||
uint32_t signal_type; // Signal type: -1000=auto, 3001=music, 3002=voice
|
||||
uint32_t bandwidth; // Bandwidth: 1101=narrowband, 1102=mediumband, 1103=wideband
|
||||
uint32_t signal_type; // Signal type: -1000=auto, 3001=voice, 3002=music
|
||||
uint32_t bandwidth; // Bandwidth: 1101=narrowband, 1102=mediumband, 1103=wideband, 1104=superwideband, 1105=fullband
|
||||
uint32_t dtx; // Discontinuous transmission: 0=disabled, 1=enabled
|
||||
} ipc_opus_config_t;
|
||||
|
||||
|
|
@ -100,15 +96,12 @@ typedef struct {
|
|||
uint8_t *data; // Dynamically allocated payload (NULL if length=0)
|
||||
} ipc_message_t;
|
||||
|
||||
// ============================================================================
|
||||
// FUNCTION DECLARATIONS
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Read a complete IPC message from socket.
|
||||
*
|
||||
* This function:
|
||||
* 1. Reads exactly 17 bytes (header)
|
||||
* 1. Reads exactly 9 bytes (header)
|
||||
* 2. Validates magic number
|
||||
* 3. Validates length <= IPC_MAX_FRAME_SIZE
|
||||
* 4. Allocates and reads payload if length > 0
|
||||
|
|
@ -123,11 +116,25 @@ typedef struct {
|
|||
*/
|
||||
int ipc_read_message(int sock, ipc_message_t *msg, uint32_t expected_magic);
|
||||
|
||||
/**
|
||||
* Read a complete IPC message using pre-allocated buffer (zero-copy).
|
||||
*
|
||||
* @param sock Socket file descriptor
|
||||
* @param msg Message structure to fill
|
||||
* @param expected_magic Expected magic number for validation
|
||||
* @param buffer Pre-allocated buffer for message data
|
||||
* @param buffer_size Size of pre-allocated buffer
|
||||
* @return 0 on success, -1 on error
|
||||
*
|
||||
* msg->data will point to buffer (no allocation). Caller does NOT need to free.
|
||||
*/
|
||||
int ipc_read_message_zerocopy(int sock, ipc_message_t *msg, uint32_t expected_magic,
|
||||
uint8_t *buffer, uint32_t buffer_size);
|
||||
|
||||
/**
|
||||
* Write a complete IPC message to socket.
|
||||
*
|
||||
* This function writes header + payload atomically (if possible via writev).
|
||||
* Sets timestamp to current time.
|
||||
*
|
||||
* @param sock Socket file descriptor
|
||||
* @param magic Magic number (IPC_MAGIC_OUTPUT or IPC_MAGIC_INPUT)
|
||||
|
|
@ -166,12 +173,6 @@ int ipc_parse_config(const uint8_t *data, uint32_t length, ipc_config_t *config)
|
|||
*/
|
||||
void ipc_free_message(ipc_message_t *msg);
|
||||
|
||||
/**
|
||||
* Get current time in nanoseconds (Unix epoch).
|
||||
*
|
||||
* @return Time in nanoseconds (compatible with Go time.Now().UnixNano())
|
||||
*/
|
||||
int64_t ipc_get_time_ns(void);
|
||||
|
||||
/**
|
||||
* Create Unix domain socket server.
|
||||
|
|
|
|||
|
|
@ -14,161 +14,76 @@
|
|||
#include "audio_common.h"
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <signal.h>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
|
||||
// Forward declarations from audio.c
|
||||
extern int jetkvm_audio_playback_init(void);
|
||||
extern void jetkvm_audio_playback_close(void);
|
||||
extern int jetkvm_audio_decode_write(void *opus_buf, int opus_size);
|
||||
extern void update_audio_constants(int bitrate, int complexity, int vbr, int vbr_constraint,
|
||||
int signal_type, int bandwidth, int dtx, int lsb_depth,
|
||||
int sr, int ch, int fs, int max_pkt,
|
||||
int sleep_us, int max_attempts, int max_backoff);
|
||||
extern void set_trace_logging(int enabled);
|
||||
extern void update_audio_decoder_constants(uint32_t sr, uint8_t ch, uint16_t fs, uint16_t max_pkt,
|
||||
uint32_t sleep_us, uint8_t max_attempts, uint32_t max_backoff);
|
||||
|
||||
// Note: Input server uses decoder, not encoder, so no update_opus_encoder_params
|
||||
|
||||
// ============================================================================
|
||||
// GLOBAL STATE
|
||||
// ============================================================================
|
||||
static volatile sig_atomic_t g_running = 1;
|
||||
|
||||
static volatile sig_atomic_t g_running = 1; // Shutdown flag
|
||||
|
||||
// Audio configuration (from environment variables)
|
||||
typedef struct {
|
||||
const char *alsa_device; // ALSA playback device (default: "hw:1,0")
|
||||
int opus_bitrate; // Opus bitrate (informational for decoder)
|
||||
int opus_complexity; // Opus complexity (decoder ignores this)
|
||||
int sample_rate; // Sample rate (default: 48000)
|
||||
int channels; // Channels (default: 2)
|
||||
int frame_size; // Frame size in samples (default: 960)
|
||||
int trace_logging; // Enable trace logging (default: 0)
|
||||
} audio_config_t;
|
||||
|
||||
// ============================================================================
|
||||
// CONFIGURATION PARSING
|
||||
// ============================================================================
|
||||
|
||||
static void load_audio_config(audio_config_t *config) {
|
||||
// ALSA device configuration
|
||||
config->alsa_device = audio_common_parse_env_string("ALSA_PLAYBACK_DEVICE", "hw:1,0");
|
||||
|
||||
// Opus configuration (informational only for decoder)
|
||||
config->opus_bitrate = audio_common_parse_env_int("OPUS_BITRATE", 96000);
|
||||
config->opus_complexity = audio_common_parse_env_int("OPUS_COMPLEXITY", 1);
|
||||
|
||||
// Audio format
|
||||
config->sample_rate = audio_common_parse_env_int("AUDIO_SAMPLE_RATE", 48000);
|
||||
config->channels = audio_common_parse_env_int("AUDIO_CHANNELS", 2);
|
||||
config->frame_size = audio_common_parse_env_int("AUDIO_FRAME_SIZE", 960);
|
||||
|
||||
// Logging
|
||||
config->trace_logging = audio_common_is_trace_enabled();
|
||||
|
||||
// Log configuration
|
||||
printf("Audio Input Server Configuration:\n");
|
||||
printf(" ALSA Device: %s\n", config->alsa_device);
|
||||
printf(" Sample Rate: %d Hz\n", config->sample_rate);
|
||||
printf(" Channels: %d\n", config->channels);
|
||||
printf(" Frame Size: %d samples\n", config->frame_size);
|
||||
printf(" Trace Logging: %s\n", config->trace_logging ? "enabled" : "disabled");
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// MESSAGE HANDLING
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Handle OpusConfig message: informational only for decoder.
|
||||
* Decoder config updates are less critical than encoder.
|
||||
* Returns 0 on success.
|
||||
*/
|
||||
static int handle_opus_config(const uint8_t *data, uint32_t length) {
|
||||
ipc_opus_config_t config;
|
||||
|
||||
if (ipc_parse_opus_config(data, length, &config) != 0) {
|
||||
fprintf(stderr, "Failed to parse Opus config\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
printf("Received Opus config (informational): bitrate=%u, complexity=%u\n",
|
||||
config.bitrate, config.complexity);
|
||||
|
||||
// Note: Decoder doesn't need most of these parameters.
|
||||
// Opus decoder automatically adapts to encoder settings embedded in stream.
|
||||
// FEC (Forward Error Correction) is enabled automatically when present in packets.
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Send ACK response for heartbeat messages.
|
||||
*/
|
||||
static int send_ack(int client_sock) {
|
||||
static inline int32_t send_ack(int32_t client_sock) {
|
||||
return ipc_write_message(client_sock, IPC_MAGIC_INPUT, IPC_MSG_TYPE_ACK, NULL, 0);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// MAIN LOOP
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Main audio decode and playback loop.
|
||||
* Receives Opus frames via IPC, decodes, writes to ALSA.
|
||||
*/
|
||||
static int run_audio_loop(int client_sock) {
|
||||
int consecutive_errors = 0;
|
||||
const int max_consecutive_errors = 10;
|
||||
int frame_count = 0;
|
||||
static int run_audio_loop(int client_sock, volatile sig_atomic_t *running) {
|
||||
audio_error_tracker_t tracker;
|
||||
audio_error_tracker_init(&tracker);
|
||||
|
||||
// Static buffer for zero-copy IPC (no malloc/free per frame)
|
||||
static uint8_t frame_buffer[IPC_MAX_FRAME_SIZE] __attribute__((aligned(64)));
|
||||
|
||||
printf("Starting audio input loop...\n");
|
||||
|
||||
while (g_running) {
|
||||
while (*running) {
|
||||
ipc_message_t msg;
|
||||
|
||||
// Read message from client (blocking)
|
||||
if (ipc_read_message(client_sock, &msg, IPC_MAGIC_INPUT) != 0) {
|
||||
if (g_running) {
|
||||
if (ipc_read_message_zerocopy(client_sock, &msg, IPC_MAGIC_INPUT,
|
||||
frame_buffer, sizeof(frame_buffer)) != 0) {
|
||||
if (*running) {
|
||||
fprintf(stderr, "Failed to read message from client\n");
|
||||
}
|
||||
break; // Client disconnected or error
|
||||
break;
|
||||
}
|
||||
|
||||
// Process message based on type
|
||||
switch (msg.header.type) {
|
||||
case IPC_MSG_TYPE_OPUS_FRAME: {
|
||||
if (msg.header.length == 0 || msg.data == NULL) {
|
||||
fprintf(stderr, "Warning: Empty Opus frame received\n");
|
||||
ipc_free_message(&msg);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Decode Opus and write to ALSA
|
||||
int frames_written = jetkvm_audio_decode_write(msg.data, msg.header.length);
|
||||
|
||||
if (frames_written < 0) {
|
||||
consecutive_errors++;
|
||||
fprintf(stderr, "Audio decode/write failed (error %d/%d)\n",
|
||||
consecutive_errors, max_consecutive_errors);
|
||||
tracker.consecutive_errors + 1, AUDIO_MAX_CONSECUTIVE_ERRORS);
|
||||
|
||||
if (consecutive_errors >= max_consecutive_errors) {
|
||||
if (audio_error_tracker_record_error(&tracker)) {
|
||||
fprintf(stderr, "Too many consecutive errors, giving up\n");
|
||||
ipc_free_message(&msg);
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
// Success - reset error counter
|
||||
consecutive_errors = 0;
|
||||
frame_count++;
|
||||
audio_error_tracker_record_success(&tracker);
|
||||
|
||||
// Trace logging (periodic)
|
||||
if (frame_count % 1000 == 1) {
|
||||
printf("Processed frame %d (opus_size=%u, pcm_frames=%d)\n",
|
||||
frame_count, msg.header.length, frames_written);
|
||||
if (audio_error_tracker_should_trace(&tracker)) {
|
||||
printf("Processed frame %u (opus_size=%u, pcm_frames=%d)\n",
|
||||
tracker.frame_count, msg.header.length, frames_written);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -181,14 +96,13 @@ static int run_audio_loop(int client_sock) {
|
|||
break;
|
||||
|
||||
case IPC_MSG_TYPE_OPUS_CONFIG:
|
||||
handle_opus_config(msg.data, msg.header.length);
|
||||
audio_common_handle_opus_config(msg.data, msg.header.length, 0);
|
||||
send_ack(client_sock);
|
||||
break;
|
||||
|
||||
case IPC_MSG_TYPE_STOP:
|
||||
printf("Received stop message\n");
|
||||
ipc_free_message(&msg);
|
||||
g_running = 0;
|
||||
*running = 0;
|
||||
return 0;
|
||||
|
||||
case IPC_MSG_TYPE_HEARTBEAT:
|
||||
|
|
@ -199,48 +113,32 @@ static int run_audio_loop(int client_sock) {
|
|||
printf("Warning: Unknown message type: %u\n", msg.header.type);
|
||||
break;
|
||||
}
|
||||
|
||||
ipc_free_message(&msg);
|
||||
}
|
||||
|
||||
printf("Audio input loop ended after %d frames\n", frame_count);
|
||||
printf("Audio input loop ended after %u frames\n", tracker.frame_count);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// MAIN
|
||||
// ============================================================================
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
printf("JetKVM Audio Input Server Starting...\n");
|
||||
audio_common_print_startup("Audio Input Server");
|
||||
|
||||
// Setup signal handlers
|
||||
audio_common_setup_signal_handlers(&g_running);
|
||||
|
||||
// Load configuration from environment
|
||||
audio_config_t config;
|
||||
load_audio_config(&config);
|
||||
audio_common_load_config(&config, 0); // 0 = input server
|
||||
|
||||
// Set trace logging
|
||||
set_trace_logging(config.trace_logging);
|
||||
|
||||
// Apply audio constants to audio.c
|
||||
update_audio_constants(
|
||||
config.opus_bitrate,
|
||||
config.opus_complexity,
|
||||
1, // vbr
|
||||
1, // vbr_constraint
|
||||
-1000, // signal_type (auto)
|
||||
1103, // bandwidth (wideband)
|
||||
0, // dtx
|
||||
16, // lsb_depth
|
||||
// Apply decoder constants to audio.c (encoder params not needed)
|
||||
update_audio_decoder_constants(
|
||||
config.sample_rate,
|
||||
config.channels,
|
||||
config.frame_size,
|
||||
1500, // max_packet_size
|
||||
1000, // sleep_microseconds
|
||||
5, // max_attempts
|
||||
500000 // max_backoff_us
|
||||
AUDIO_MAX_PACKET_SIZE,
|
||||
AUDIO_SLEEP_MICROSECONDS,
|
||||
AUDIO_MAX_ATTEMPTS,
|
||||
AUDIO_MAX_BACKOFF_US
|
||||
);
|
||||
|
||||
// Initialize audio playback (Opus decoder + ALSA playback)
|
||||
|
|
@ -259,32 +157,9 @@ int main(int argc, char **argv) {
|
|||
}
|
||||
|
||||
// Main connection loop
|
||||
while (g_running) {
|
||||
printf("Waiting for client connection...\n");
|
||||
audio_common_server_loop(server_sock, &g_running, run_audio_loop);
|
||||
|
||||
int client_sock = ipc_accept_client(server_sock);
|
||||
if (client_sock < 0) {
|
||||
if (g_running) {
|
||||
fprintf(stderr, "Failed to accept client, retrying...\n");
|
||||
sleep(1);
|
||||
continue;
|
||||
}
|
||||
break; // Shutting down
|
||||
}
|
||||
|
||||
// Run audio loop with this client
|
||||
run_audio_loop(client_sock);
|
||||
|
||||
// Close client connection
|
||||
close(client_sock);
|
||||
|
||||
if (g_running) {
|
||||
printf("Client disconnected, waiting for next client...\n");
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
printf("Shutting down audio input server...\n");
|
||||
audio_common_print_shutdown("audio input server");
|
||||
close(server_sock);
|
||||
unlink(IPC_SOCKET_INPUT);
|
||||
jetkvm_audio_playback_close();
|
||||
|
|
|
|||
|
|
@ -16,133 +16,44 @@
|
|||
#include <signal.h>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <sched.h>
|
||||
#include <time.h>
|
||||
|
||||
// Forward declarations from audio.c
|
||||
extern int jetkvm_audio_capture_init(void);
|
||||
extern void jetkvm_audio_capture_close(void);
|
||||
extern int jetkvm_audio_read_encode(void *opus_buf);
|
||||
extern void update_audio_constants(int bitrate, int complexity, int vbr, int vbr_constraint,
|
||||
int signal_type, int bandwidth, int dtx, int lsb_depth,
|
||||
int sr, int ch, int fs, int max_pkt,
|
||||
int sleep_us, int max_attempts, int max_backoff);
|
||||
extern void set_trace_logging(int enabled);
|
||||
extern int update_opus_encoder_params(int bitrate, int complexity, int vbr, int vbr_constraint,
|
||||
int signal_type, int bandwidth, int dtx);
|
||||
extern void update_audio_constants(uint32_t bitrate, uint8_t complexity,
|
||||
uint32_t sr, uint8_t ch, uint16_t fs, uint16_t max_pkt,
|
||||
uint32_t sleep_us, uint8_t max_attempts, uint32_t max_backoff);
|
||||
extern int update_opus_encoder_params(uint32_t bitrate, uint8_t complexity);
|
||||
|
||||
// ============================================================================
|
||||
// GLOBAL STATE
|
||||
// ============================================================================
|
||||
|
||||
static volatile sig_atomic_t g_running = 1; // Shutdown flag
|
||||
static volatile sig_atomic_t g_running = 1;
|
||||
|
||||
// Audio configuration (from environment variables)
|
||||
typedef struct {
|
||||
const char *alsa_device; // ALSA capture device (default: "hw:0,0")
|
||||
int opus_bitrate; // Opus bitrate (default: 96000)
|
||||
int opus_complexity; // Opus complexity 0-10 (default: 1)
|
||||
int opus_vbr; // VBR enabled (default: 1)
|
||||
int opus_vbr_constraint; // VBR constraint (default: 1)
|
||||
int opus_signal_type; // Signal type (default: -1000 = auto)
|
||||
int opus_bandwidth; // Bandwidth (default: 1103 = wideband)
|
||||
int opus_dtx; // DTX enabled (default: 0)
|
||||
int opus_lsb_depth; // LSB depth (default: 16)
|
||||
int sample_rate; // Sample rate (default: 48000)
|
||||
int channels; // Channels (default: 2)
|
||||
int frame_size; // Frame size in samples (default: 960)
|
||||
int trace_logging; // Enable trace logging (default: 0)
|
||||
} audio_config_t;
|
||||
|
||||
// ============================================================================
|
||||
// CONFIGURATION PARSING
|
||||
// ============================================================================
|
||||
|
||||
static void load_audio_config(audio_config_t *config) {
|
||||
// ALSA device configuration
|
||||
config->alsa_device = audio_common_parse_env_string("ALSA_CAPTURE_DEVICE", "hw:0,0");
|
||||
|
||||
// Opus encoder configuration
|
||||
config->opus_bitrate = audio_common_parse_env_int("OPUS_BITRATE", 96000);
|
||||
config->opus_complexity = audio_common_parse_env_int("OPUS_COMPLEXITY", 1);
|
||||
config->opus_vbr = audio_common_parse_env_int("OPUS_VBR", 1);
|
||||
config->opus_vbr_constraint = audio_common_parse_env_int("OPUS_VBR_CONSTRAINT", 1);
|
||||
config->opus_signal_type = audio_common_parse_env_int("OPUS_SIGNAL_TYPE", -1000);
|
||||
config->opus_bandwidth = audio_common_parse_env_int("OPUS_BANDWIDTH", 1103);
|
||||
config->opus_dtx = audio_common_parse_env_int("OPUS_DTX", 0);
|
||||
config->opus_lsb_depth = audio_common_parse_env_int("OPUS_LSB_DEPTH", 16);
|
||||
|
||||
// Audio format
|
||||
config->sample_rate = audio_common_parse_env_int("AUDIO_SAMPLE_RATE", 48000);
|
||||
config->channels = audio_common_parse_env_int("AUDIO_CHANNELS", 2);
|
||||
config->frame_size = audio_common_parse_env_int("AUDIO_FRAME_SIZE", 960);
|
||||
|
||||
// Logging
|
||||
config->trace_logging = audio_common_is_trace_enabled();
|
||||
|
||||
// Log configuration
|
||||
printf("Audio Output Server Configuration:\n");
|
||||
printf(" ALSA Device: %s\n", config->alsa_device);
|
||||
printf(" Sample Rate: %d Hz\n", config->sample_rate);
|
||||
printf(" Channels: %d\n", config->channels);
|
||||
printf(" Frame Size: %d samples\n", config->frame_size);
|
||||
printf(" Opus Bitrate: %d bps\n", config->opus_bitrate);
|
||||
printf(" Opus Complexity: %d\n", config->opus_complexity);
|
||||
printf(" Trace Logging: %s\n", config->trace_logging ? "enabled" : "disabled");
|
||||
static void load_output_config(audio_config_t *common) {
|
||||
audio_common_load_config(common, 1); // 1 = output server
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// MESSAGE HANDLING
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Handle OpusConfig message: update encoder parameters dynamically.
|
||||
* Returns 0 on success, -1 on error.
|
||||
*/
|
||||
static int handle_opus_config(const uint8_t *data, uint32_t length) {
|
||||
ipc_opus_config_t config;
|
||||
|
||||
if (ipc_parse_opus_config(data, length, &config) != 0) {
|
||||
fprintf(stderr, "Failed to parse Opus config\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
printf("Received Opus config: bitrate=%u, complexity=%u, vbr=%u\n",
|
||||
config.bitrate, config.complexity, config.vbr);
|
||||
|
||||
// Apply configuration to encoder
|
||||
// Note: Signal type needs special handling for negative values
|
||||
int signal_type = (int)(int32_t)config.signal_type; // Treat as signed
|
||||
|
||||
int result = update_opus_encoder_params(
|
||||
config.bitrate,
|
||||
config.complexity,
|
||||
config.vbr,
|
||||
config.vbr, // Use VBR value for constraint (simplified)
|
||||
signal_type,
|
||||
config.bandwidth,
|
||||
config.dtx
|
||||
);
|
||||
|
||||
if (result != 0) {
|
||||
fprintf(stderr, "Warning: Failed to apply some Opus encoder parameters\n");
|
||||
// Continue anyway - encoder may not be initialized yet
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle incoming IPC messages from client (non-blocking).
|
||||
* Returns 0 on success, -1 on error.
|
||||
*/
|
||||
static int handle_incoming_messages(int client_sock) {
|
||||
static int handle_incoming_messages(int client_sock, volatile sig_atomic_t *running) {
|
||||
// Static buffer for zero-copy IPC (control messages are small)
|
||||
static uint8_t msg_buffer[IPC_MAX_FRAME_SIZE] __attribute__((aligned(64)));
|
||||
|
||||
// Set non-blocking mode for client socket
|
||||
int flags = fcntl(client_sock, F_GETFL, 0);
|
||||
fcntl(client_sock, F_SETFL, flags | O_NONBLOCK);
|
||||
|
||||
ipc_message_t msg;
|
||||
|
||||
// Try to read message (non-blocking)
|
||||
int result = ipc_read_message(client_sock, &msg, IPC_MAGIC_OUTPUT);
|
||||
// Try to read message (non-blocking, zero-copy)
|
||||
int result = ipc_read_message_zerocopy(client_sock, &msg, IPC_MAGIC_OUTPUT,
|
||||
msg_buffer, sizeof(msg_buffer));
|
||||
|
||||
// Restore blocking mode
|
||||
fcntl(client_sock, F_SETFL, flags);
|
||||
|
|
@ -151,22 +62,20 @@ static int handle_incoming_messages(int client_sock) {
|
|||
if (errno == EAGAIN || errno == EWOULDBLOCK) {
|
||||
return 0; // No message available, not an error
|
||||
}
|
||||
return -1; // Connection error
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Process message based on type
|
||||
switch (msg.header.type) {
|
||||
case IPC_MSG_TYPE_OPUS_CONFIG:
|
||||
handle_opus_config(msg.data, msg.header.length);
|
||||
audio_common_handle_opus_config(msg.data, msg.header.length, 1);
|
||||
break;
|
||||
|
||||
case IPC_MSG_TYPE_STOP:
|
||||
printf("Received stop message\n");
|
||||
g_running = 0;
|
||||
*running = 0;
|
||||
break;
|
||||
|
||||
case IPC_MSG_TYPE_HEARTBEAT:
|
||||
// Informational only, no response needed
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
@ -174,118 +83,90 @@ static int handle_incoming_messages(int client_sock) {
|
|||
break;
|
||||
}
|
||||
|
||||
ipc_free_message(&msg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// MAIN LOOP
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Main audio capture and encode loop.
|
||||
* Continuously reads from ALSA, encodes to Opus, sends via IPC.
|
||||
*/
|
||||
static int run_audio_loop(int client_sock) {
|
||||
static int run_audio_loop(int client_sock, volatile sig_atomic_t *running) {
|
||||
uint8_t opus_buffer[IPC_MAX_FRAME_SIZE];
|
||||
int consecutive_errors = 0;
|
||||
const int max_consecutive_errors = 10;
|
||||
int frame_count = 0;
|
||||
audio_error_tracker_t tracker;
|
||||
audio_error_tracker_init(&tracker);
|
||||
|
||||
printf("Starting audio output loop...\n");
|
||||
|
||||
while (g_running) {
|
||||
// Handle any incoming configuration messages (non-blocking)
|
||||
if (handle_incoming_messages(client_sock) < 0) {
|
||||
while (*running) {
|
||||
if (handle_incoming_messages(client_sock, running) < 0) {
|
||||
fprintf(stderr, "Client disconnected, waiting for reconnection...\n");
|
||||
break; // Client disconnected
|
||||
break;
|
||||
}
|
||||
|
||||
// Capture audio and encode to Opus
|
||||
int opus_size = jetkvm_audio_read_encode(opus_buffer);
|
||||
|
||||
if (opus_size < 0) {
|
||||
consecutive_errors++;
|
||||
fprintf(stderr, "Audio read/encode failed (error %d/%d)\n",
|
||||
consecutive_errors, max_consecutive_errors);
|
||||
tracker.consecutive_errors + 1, AUDIO_MAX_CONSECUTIVE_ERRORS);
|
||||
|
||||
if (consecutive_errors >= max_consecutive_errors) {
|
||||
if (audio_error_tracker_record_error(&tracker)) {
|
||||
fprintf(stderr, "Too many consecutive errors, giving up\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
usleep(10000); // 10ms backoff
|
||||
// No sleep needed - jetkvm_audio_read_encode already uses snd_pcm_wait internally
|
||||
continue;
|
||||
}
|
||||
|
||||
if (opus_size == 0) {
|
||||
// No data available (non-blocking mode or empty frame)
|
||||
usleep(1000); // 1ms sleep
|
||||
// Frame skipped for recovery, minimal yield
|
||||
sched_yield();
|
||||
continue;
|
||||
}
|
||||
|
||||
// Reset error counter on success
|
||||
consecutive_errors = 0;
|
||||
frame_count++;
|
||||
audio_error_tracker_record_success(&tracker);
|
||||
|
||||
// Send Opus frame via IPC
|
||||
if (ipc_write_message(client_sock, IPC_MAGIC_OUTPUT, IPC_MSG_TYPE_OPUS_FRAME,
|
||||
opus_buffer, opus_size) != 0) {
|
||||
fprintf(stderr, "Failed to send frame to client\n");
|
||||
break; // Client disconnected
|
||||
break;
|
||||
}
|
||||
|
||||
// Trace logging (periodic)
|
||||
if (frame_count % 1000 == 1) {
|
||||
printf("Sent frame %d (size=%d bytes)\n", frame_count, opus_size);
|
||||
if (audio_error_tracker_should_trace(&tracker)) {
|
||||
printf("Sent frame %u (size=%d bytes)\n", tracker.frame_count, opus_size);
|
||||
}
|
||||
|
||||
// Small delay to prevent busy-waiting (frame rate ~50 FPS @ 48kHz/960)
|
||||
usleep(1000); // 1ms
|
||||
}
|
||||
|
||||
printf("Audio output loop ended after %d frames\n", frame_count);
|
||||
printf("Audio output loop ended after %u frames\n", tracker.frame_count);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// MAIN
|
||||
// ============================================================================
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
printf("JetKVM Audio Output Server Starting...\n");
|
||||
audio_common_print_startup("Audio Output Server");
|
||||
|
||||
// Setup signal handlers
|
||||
audio_common_setup_signal_handlers(&g_running);
|
||||
|
||||
// Load configuration from environment
|
||||
audio_config_t config;
|
||||
load_audio_config(&config);
|
||||
|
||||
// Set trace logging
|
||||
set_trace_logging(config.trace_logging);
|
||||
audio_config_t common;
|
||||
load_output_config(&common);
|
||||
|
||||
// Apply audio constants to audio.c
|
||||
update_audio_constants(
|
||||
config.opus_bitrate,
|
||||
config.opus_complexity,
|
||||
config.opus_vbr,
|
||||
config.opus_vbr_constraint,
|
||||
config.opus_signal_type,
|
||||
config.opus_bandwidth,
|
||||
config.opus_dtx,
|
||||
config.opus_lsb_depth,
|
||||
config.sample_rate,
|
||||
config.channels,
|
||||
config.frame_size,
|
||||
1500, // max_packet_size
|
||||
1000, // sleep_microseconds
|
||||
5, // max_attempts
|
||||
500000 // max_backoff_us
|
||||
common.opus_bitrate,
|
||||
common.opus_complexity,
|
||||
common.sample_rate,
|
||||
common.channels,
|
||||
common.frame_size,
|
||||
AUDIO_MAX_PACKET_SIZE,
|
||||
AUDIO_SLEEP_MICROSECONDS,
|
||||
AUDIO_MAX_ATTEMPTS,
|
||||
AUDIO_MAX_BACKOFF_US
|
||||
);
|
||||
|
||||
// Initialize audio capture
|
||||
printf("Initializing audio capture on device: %s\n", config.alsa_device);
|
||||
printf("Initializing audio capture on device: %s\n", common.alsa_device);
|
||||
if (jetkvm_audio_capture_init() != 0) {
|
||||
fprintf(stderr, "Failed to initialize audio capture\n");
|
||||
return 1;
|
||||
|
|
@ -300,32 +181,9 @@ int main(int argc, char **argv) {
|
|||
}
|
||||
|
||||
// Main connection loop
|
||||
while (g_running) {
|
||||
printf("Waiting for client connection...\n");
|
||||
audio_common_server_loop(server_sock, &g_running, run_audio_loop);
|
||||
|
||||
int client_sock = ipc_accept_client(server_sock);
|
||||
if (client_sock < 0) {
|
||||
if (g_running) {
|
||||
fprintf(stderr, "Failed to accept client, retrying...\n");
|
||||
sleep(1);
|
||||
continue;
|
||||
}
|
||||
break; // Shutting down
|
||||
}
|
||||
|
||||
// Run audio loop with this client
|
||||
run_audio_loop(client_sock);
|
||||
|
||||
// Close client connection
|
||||
close(client_sock);
|
||||
|
||||
if (g_running) {
|
||||
printf("Client disconnected, waiting for next client...\n");
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
printf("Shutting down audio output server...\n");
|
||||
audio_common_print_shutdown("audio output server");
|
||||
close(server_sock);
|
||||
unlink(IPC_SOCKET_OUTPUT);
|
||||
jetkvm_audio_capture_close();
|
||||
|
|
|
|||
|
|
@ -1,559 +0,0 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/jetkvm/kvm/internal/logging"
|
||||
)
|
||||
|
||||
// AudioConfigConstants centralizes all hardcoded values used across audio components.
|
||||
// This configuration system allows runtime tuning of audio performance, quality, and resource usage.
|
||||
type AudioConfigConstants struct {
|
||||
// Audio Quality Presets
|
||||
MaxAudioFrameSize int // Maximum audio frame size in bytes (default: 4096)
|
||||
MaxPCMBufferSize int // Maximum PCM buffer size in bytes for separate buffer optimization
|
||||
|
||||
// Opus Encoding Parameters
|
||||
OpusBitrate int // Target bitrate for Opus encoding in bps (default: 128000)
|
||||
OpusComplexity int // Computational complexity 0-10 (default: 10 for best quality)
|
||||
OpusVBR int // Variable Bit Rate: 0=CBR, 1=VBR (default: 1)
|
||||
OpusVBRConstraint int // VBR constraint: 0=unconstrained, 1=constrained (default: 0)
|
||||
OpusDTX int // Discontinuous Transmission: 0=disabled, 1=enabled (default: 0)
|
||||
|
||||
// Audio Parameters
|
||||
SampleRate int // Audio sampling frequency in Hz (default: 48000)
|
||||
Channels int // Number of audio channels: 1=mono, 2=stereo (default: 2)
|
||||
FrameSize int // Samples per audio frame (default: 960 for 20ms at 48kHz)
|
||||
MaxPacketSize int // Maximum encoded packet size in bytes (default: 4000)
|
||||
|
||||
// Optimal Audio Configuration (S16_LE @ 48kHz stereo from HDMI)
|
||||
// Single optimized setting - no quality presets needed
|
||||
OptimalOutputBitrate int // Output bitrate: 96 kbps (optimal for stereo @ 48kHz)
|
||||
OptimalInputBitrate int // Input bitrate: 48 kbps (optimal for mono mic @ 48kHz)
|
||||
|
||||
// Optimal OPUS Encoder Parameters (minimal CPU usage)
|
||||
OptimalOpusComplexity int // Complexity: 1 (minimal CPU ~0.5%)
|
||||
OptimalOpusVBR int // VBR: enabled for efficiency
|
||||
OptimalOpusSignalType int // Signal: OPUS_SIGNAL_MUSIC (3002)
|
||||
OptimalOpusBandwidth int // Bandwidth: WIDEBAND (1103 = native 48kHz)
|
||||
OptimalOpusDTX int // DTX: disabled for continuous audio
|
||||
|
||||
// CGO Audio Constants
|
||||
CGOOpusBitrate int // Native Opus encoder bitrate in bps (default: 96000)
|
||||
|
||||
CGOOpusComplexity int // Computational complexity for native Opus encoder (0-10)
|
||||
CGOOpusVBR int // Variable Bit Rate in native Opus encoder (0=CBR, 1=VBR)
|
||||
CGOOpusVBRConstraint int // Constrained VBR in native encoder (0/1)
|
||||
CGOOpusSignalType int // Signal type hint for native Opus encoder
|
||||
CGOOpusBandwidth int // Frequency bandwidth for native Opus encoder
|
||||
CGOOpusDTX int // Discontinuous Transmission in native encoder (0/1)
|
||||
CGOSampleRate int // Sample rate for native audio processing (Hz)
|
||||
CGOChannels int // Channel count for native audio processing
|
||||
CGOFrameSize int // Frame size for native Opus processing (samples)
|
||||
CGOMaxPacketSize int // Maximum packet size for native encoding (bytes)
|
||||
|
||||
// Input IPC Constants
|
||||
InputIPCSampleRate int // Sample rate for input IPC audio processing (Hz)
|
||||
InputIPCChannels int // Channel count for input IPC audio processing
|
||||
InputIPCFrameSize int // Frame size for input IPC processing (samples)
|
||||
|
||||
// Output IPC Constants
|
||||
OutputMaxFrameSize int // Maximum frame size for output processing (bytes)
|
||||
OutputHeaderSize int // Size of output message headers (bytes)
|
||||
|
||||
OutputMessagePoolSize int // Output message pool size (128)
|
||||
|
||||
// Socket Buffer Constants
|
||||
SocketOptimalBuffer int // Optimal socket buffer size (128KB)
|
||||
SocketMaxBuffer int // Maximum socket buffer size (256KB)
|
||||
SocketMinBuffer int // Minimum socket buffer size (32KB)
|
||||
|
||||
// Process Management
|
||||
MaxRestartAttempts int // Maximum restart attempts (5)
|
||||
RestartWindow time.Duration // Restart attempt window (5m)
|
||||
RestartDelay time.Duration // Initial restart delay (2s)
|
||||
MaxRestartDelay time.Duration // Maximum restart delay (30s)
|
||||
|
||||
// Buffer Management
|
||||
|
||||
MaxPoolSize int
|
||||
MessagePoolSize int
|
||||
OptimalSocketBuffer int
|
||||
MaxSocketBuffer int
|
||||
MinSocketBuffer int
|
||||
ChannelBufferSize int
|
||||
AudioFramePoolSize int
|
||||
PageSize int
|
||||
InitialBufferFrames int
|
||||
BytesToMBDivisor int
|
||||
MinReadEncodeBuffer int
|
||||
MaxDecodeWriteBuffer int
|
||||
MinBatchSizeForThreadPinning int
|
||||
|
||||
MagicNumber uint32
|
||||
MaxFrameSize int
|
||||
WriteTimeout time.Duration
|
||||
HeaderSize int
|
||||
MetricsUpdateInterval time.Duration
|
||||
WarmupSamples int
|
||||
MetricsChannelBuffer int
|
||||
LatencyHistorySize int
|
||||
MaxCPUPercent float64
|
||||
MinCPUPercent float64
|
||||
DefaultClockTicks float64
|
||||
DefaultMemoryGB int
|
||||
MaxWarmupSamples int
|
||||
WarmupCPUSamples int
|
||||
LogThrottleIntervalSec int
|
||||
MinValidClockTicks int
|
||||
MaxValidClockTicks int
|
||||
CPUFactor float64
|
||||
MemoryFactor float64
|
||||
LatencyFactor float64
|
||||
|
||||
// Timing Configuration
|
||||
RetryDelay time.Duration // Retry delay
|
||||
MaxRetryDelay time.Duration // Maximum retry delay
|
||||
BackoffMultiplier float64 // Backoff multiplier
|
||||
MaxConsecutiveErrors int // Maximum consecutive errors
|
||||
DefaultSleepDuration time.Duration // 100ms
|
||||
ShortSleepDuration time.Duration // 10ms
|
||||
LongSleepDuration time.Duration // 200ms
|
||||
DefaultTickerInterval time.Duration // 100ms
|
||||
BufferUpdateInterval time.Duration // 500ms
|
||||
InputSupervisorTimeout time.Duration // 5s
|
||||
OutputSupervisorTimeout time.Duration // 5s
|
||||
BatchProcessingDelay time.Duration // 10ms
|
||||
|
||||
// System threshold configuration for buffer management
|
||||
LowCPUThreshold float64 // CPU usage threshold for performance optimization
|
||||
HighCPUThreshold float64 // CPU usage threshold for performance limits
|
||||
LowMemoryThreshold float64 // 50% memory threshold
|
||||
HighMemoryThreshold float64 // 75% memory threshold
|
||||
CooldownPeriod time.Duration // 30s cooldown period
|
||||
RollbackThreshold time.Duration // 300ms rollback threshold
|
||||
|
||||
MaxLatencyThreshold time.Duration // 200ms max latency
|
||||
JitterThreshold time.Duration // 20ms jitter threshold
|
||||
LatencyOptimizationInterval time.Duration // 5s optimization interval
|
||||
MicContentionTimeout time.Duration // 200ms contention timeout
|
||||
PreallocPercentage int // 20% preallocation percentage
|
||||
BackoffStart time.Duration // 50ms initial backoff
|
||||
|
||||
InputMagicNumber uint32 // Magic number for input IPC messages (0x4A4B4D49 "JKMI")
|
||||
|
||||
OutputMagicNumber uint32 // Magic number for output IPC messages (0x4A4B4F55 "JKOU")
|
||||
|
||||
// Calculation Constants
|
||||
PercentageMultiplier float64 // Multiplier for percentage calculations (100.0)
|
||||
AveragingWeight float64 // Weight for weighted averaging (0.7)
|
||||
ScalingFactor float64 // General scaling factor (1.5)
|
||||
CPUMemoryWeight float64 // Weight for CPU factor in calculations (0.5)
|
||||
MemoryWeight float64 // Weight for memory factor (0.3)
|
||||
LatencyWeight float64 // Weight for latency factor (0.2)
|
||||
PoolGrowthMultiplier int // Multiplier for pool size growth (2)
|
||||
LatencyScalingFactor float64 // Scaling factor for latency calculations (2.0)
|
||||
OptimizerAggressiveness float64 // Aggressiveness level for optimization (0.7)
|
||||
|
||||
// CGO Audio Processing Constants
|
||||
CGOUsleepMicroseconds int // Sleep duration for CGO usleep calls (1000μs)
|
||||
|
||||
CGOPCMBufferSize int // PCM buffer size for CGO audio processing
|
||||
CGONanosecondsPerSecond float64 // Nanoseconds per second conversion
|
||||
|
||||
// Output Streaming Constants
|
||||
OutputStreamingFrameIntervalMS int // Output frame interval (20ms for 50 FPS)
|
||||
|
||||
// IPC Constants
|
||||
IPCInitialBufferFrames int // Initial IPC buffer size (500 frames)
|
||||
|
||||
EventTimeoutSeconds int
|
||||
EventTimeFormatString string
|
||||
EventSubscriptionDelayMS int
|
||||
InputProcessingTimeoutMS int
|
||||
InputSocketName string
|
||||
OutputSocketName string
|
||||
AudioInputComponentName string
|
||||
AudioOutputComponentName string
|
||||
AudioServerComponentName string
|
||||
AudioRelayComponentName string
|
||||
AudioEventsComponentName string
|
||||
|
||||
TestSocketTimeout time.Duration
|
||||
TestBufferSize int
|
||||
TestRetryDelay time.Duration
|
||||
LatencyHistogramMaxSamples int
|
||||
LatencyPercentile50 int
|
||||
LatencyPercentile95 int
|
||||
LatencyPercentile99 int
|
||||
|
||||
// Buffer Pool Configuration
|
||||
BufferPoolDefaultSize int // Default buffer pool size when MaxPoolSize is invalid
|
||||
BufferPoolControlSize int // Control buffer pool size
|
||||
ZeroCopyPreallocSizeBytes int // Zero-copy frame pool preallocation size in bytes
|
||||
ZeroCopyMinPreallocFrames int // Minimum preallocated frames for zero-copy pool
|
||||
BufferPoolHitRateBase float64 // Base for hit rate percentage calculation
|
||||
|
||||
HitRateCalculationBase float64
|
||||
MaxLatency time.Duration
|
||||
MinMetricsUpdateInterval time.Duration
|
||||
MaxMetricsUpdateInterval time.Duration
|
||||
MinSampleRate int
|
||||
MaxSampleRate int
|
||||
MaxChannels int
|
||||
|
||||
// CGO Constants
|
||||
CGOMaxBackoffMicroseconds int // Maximum CGO backoff time (500ms)
|
||||
CGOMaxAttempts int // Maximum CGO retry attempts (5)
|
||||
|
||||
// Frame Duration Validation
|
||||
MinFrameDuration time.Duration // Minimum frame duration (10ms)
|
||||
MaxFrameDuration time.Duration // Maximum frame duration (100ms)
|
||||
|
||||
// Valid Sample Rates
|
||||
// Validation Constants
|
||||
ValidSampleRates []int // Supported sample rates (8kHz to 48kHz)
|
||||
MinOpusBitrate int // Minimum Opus bitrate (6000 bps)
|
||||
MaxOpusBitrate int // Maximum Opus bitrate (510000 bps)
|
||||
MaxValidationTime time.Duration // Validation timeout (5s)
|
||||
MinFrameSize int // Minimum frame size (64 bytes)
|
||||
FrameSizeTolerance int // Frame size tolerance (512 bytes)
|
||||
|
||||
// Latency Histogram Buckets
|
||||
LatencyBucket10ms time.Duration // 10ms latency bucket
|
||||
LatencyBucket25ms time.Duration // 25ms latency bucket
|
||||
LatencyBucket50ms time.Duration // 50ms latency bucket
|
||||
LatencyBucket100ms time.Duration // 100ms latency bucket
|
||||
LatencyBucket250ms time.Duration // 250ms latency bucket
|
||||
LatencyBucket500ms time.Duration // 500ms latency bucket
|
||||
LatencyBucket1s time.Duration // 1s latency bucket
|
||||
LatencyBucket2s time.Duration // 2s latency bucket
|
||||
|
||||
MaxAudioProcessorWorkers int
|
||||
MaxAudioReaderWorkers int
|
||||
AudioProcessorQueueSize int
|
||||
AudioReaderQueueSize int
|
||||
WorkerMaxIdleTime time.Duration
|
||||
|
||||
// Connection Retry Configuration
|
||||
MaxConnectionAttempts int // Maximum connection retry attempts
|
||||
ConnectionRetryDelay time.Duration // Initial connection retry delay
|
||||
MaxConnectionRetryDelay time.Duration // Maximum connection retry delay
|
||||
ConnectionBackoffFactor float64 // Connection retry backoff factor
|
||||
ConnectionTimeoutDelay time.Duration // Connection timeout for each attempt
|
||||
ReconnectionInterval time.Duration // Interval for automatic reconnection attempts
|
||||
HealthCheckInterval time.Duration // Health check interval for connections
|
||||
|
||||
// Quality Change Timeout Configuration
|
||||
QualityChangeSupervisorTimeout time.Duration // Timeout for supervisor stop during quality changes
|
||||
QualityChangeTickerInterval time.Duration // Ticker interval for supervisor stop polling
|
||||
QualityChangeSettleDelay time.Duration // Delay for quality change to settle
|
||||
QualityChangeRecoveryDelay time.Duration // Delay before attempting recovery
|
||||
|
||||
}
|
||||
|
||||
// DefaultAudioConfig returns the default configuration constants
|
||||
// These values are carefully chosen based on JetKVM's embedded ARM environment,
|
||||
// real-time audio requirements, and extensive testing for optimal performance.
|
||||
func DefaultAudioConfig() *AudioConfigConstants {
|
||||
return &AudioConfigConstants{
|
||||
// Audio Quality Presets
|
||||
MaxAudioFrameSize: 4096,
|
||||
MaxPCMBufferSize: 8192, // Default PCM buffer size (2x MaxAudioFrameSize for safety)
|
||||
|
||||
// Opus Encoding Parameters
|
||||
OpusBitrate: 128000,
|
||||
OpusComplexity: 10,
|
||||
OpusVBR: 1,
|
||||
OpusVBRConstraint: 0,
|
||||
OpusDTX: 0,
|
||||
|
||||
// Audio Parameters
|
||||
SampleRate: 48000,
|
||||
Channels: 2,
|
||||
FrameSize: 960,
|
||||
MaxPacketSize: 4000,
|
||||
|
||||
// Optimal Audio Configuration (single setting for all use cases)
|
||||
OptimalOutputBitrate: 96, // 96 kbps for stereo @ 48kHz
|
||||
OptimalInputBitrate: 48, // 48 kbps for mono mic @ 48kHz
|
||||
OptimalOpusComplexity: 1, // Complexity 1: minimal CPU (~0.5%)
|
||||
OptimalOpusVBR: 1, // VBR enabled for efficiency
|
||||
OptimalOpusSignalType: 3002, // OPUS_SIGNAL_MUSIC
|
||||
OptimalOpusBandwidth: 1103, // OPUS_BANDWIDTH_WIDEBAND (native 48kHz)
|
||||
OptimalOpusDTX: 0, // DTX disabled for continuous audio
|
||||
|
||||
// CGO Audio Constants - Optimized for S16_LE @ 48kHz with minimal CPU
|
||||
CGOOpusBitrate: 96000, // 96 kbps optimal for stereo @ 48kHz
|
||||
CGOOpusComplexity: 1, // Complexity 1: minimal CPU (~0.5% on RV1106)
|
||||
CGOOpusVBR: 1, // VBR enabled for efficiency
|
||||
CGOOpusVBRConstraint: 1, // Constrained VBR for predictable bitrate
|
||||
CGOOpusSignalType: -1000, // OPUS_AUTO (automatic voice/music detection)
|
||||
CGOOpusBandwidth: 1103, // OPUS_BANDWIDTH_WIDEBAND (native 48kHz, no resampling)
|
||||
CGOOpusDTX: 0, // DTX disabled for continuous audio
|
||||
CGOSampleRate: 48000, // 48 kHz native HDMI sample rate
|
||||
CGOChannels: 2, // Stereo
|
||||
CGOFrameSize: 960, // 20ms frames at 48kHz
|
||||
CGOMaxPacketSize: 1500, // Standard Ethernet MTU
|
||||
|
||||
// Input IPC Constants
|
||||
InputIPCSampleRate: 48000, // Input IPC sample rate (48kHz)
|
||||
InputIPCChannels: 2, // Input IPC channels (stereo)
|
||||
InputIPCFrameSize: 960, // Input IPC frame size (960 samples)
|
||||
|
||||
// Output IPC Constants
|
||||
OutputMaxFrameSize: 4096, // Maximum output frame size
|
||||
OutputHeaderSize: 17, // Output frame header size
|
||||
|
||||
OutputMessagePoolSize: 128, // Output message pool size
|
||||
|
||||
// Socket Buffer Constants
|
||||
SocketOptimalBuffer: 131072, // 128KB optimal socket buffer
|
||||
SocketMaxBuffer: 262144, // 256KB maximum socket buffer
|
||||
SocketMinBuffer: 32768, // 32KB minimum socket buffer
|
||||
|
||||
// Process Management
|
||||
MaxRestartAttempts: 5, // Maximum restart attempts
|
||||
|
||||
RestartWindow: 5 * time.Minute, // Time window for restart attempt counting
|
||||
RestartDelay: 1 * time.Second, // Initial delay before restart attempts
|
||||
MaxRestartDelay: 30 * time.Second, // Maximum delay for exponential backoff
|
||||
|
||||
// Buffer Management
|
||||
|
||||
MaxPoolSize: 100, // Maximum object pool size
|
||||
MessagePoolSize: 1024, // Significantly increased message pool for quality change bursts
|
||||
OptimalSocketBuffer: 262144, // 256KB optimal socket buffer
|
||||
MaxSocketBuffer: 1048576, // 1MB maximum socket buffer
|
||||
MinSocketBuffer: 8192, // 8KB minimum socket buffer
|
||||
ChannelBufferSize: 2048, // Significantly increased channel buffer for quality change bursts
|
||||
AudioFramePoolSize: 1500, // Audio frame object pool size
|
||||
PageSize: 4096, // Memory page size for alignment
|
||||
InitialBufferFrames: 1000, // Increased initial buffer size during startup
|
||||
BytesToMBDivisor: 1024 * 1024, // Byte to megabyte conversion
|
||||
MinReadEncodeBuffer: 1276, // Minimum CGO read/encode buffer
|
||||
MaxDecodeWriteBuffer: 4096, // Maximum CGO decode/write buffer
|
||||
|
||||
// IPC Configuration - Balanced for stability
|
||||
MagicNumber: 0xDEADBEEF, // IPC message validation header
|
||||
MaxFrameSize: 4096, // Maximum audio frame size (4KB)
|
||||
WriteTimeout: 1000 * time.Millisecond, // Further increased timeout to handle quality change bursts
|
||||
HeaderSize: 8, // IPC message header size
|
||||
|
||||
// Monitoring and Metrics - Balanced for stability
|
||||
MetricsUpdateInterval: 1000 * time.Millisecond, // Stable metrics collection frequency
|
||||
WarmupSamples: 10, // Adequate warmup samples for accuracy
|
||||
MetricsChannelBuffer: 100, // Adequate metrics data channel buffer
|
||||
LatencyHistorySize: 100, // Adequate latency measurements to keep
|
||||
|
||||
// Process Monitoring Constants
|
||||
MaxCPUPercent: 100.0, // Maximum CPU percentage
|
||||
MinCPUPercent: 0.01, // Minimum CPU percentage
|
||||
DefaultClockTicks: 250.0, // Default clock ticks for embedded ARM systems
|
||||
DefaultMemoryGB: 8, // Default memory in GB
|
||||
MaxWarmupSamples: 3, // Maximum warmup samples
|
||||
WarmupCPUSamples: 2, // CPU warmup samples
|
||||
LogThrottleIntervalSec: 10, // Log throttle interval in seconds
|
||||
MinValidClockTicks: 50, // Minimum valid clock ticks
|
||||
MaxValidClockTicks: 1000, // Maximum valid clock ticks
|
||||
|
||||
// Performance Tuning
|
||||
CPUFactor: 0.7, // CPU weight in performance calculations
|
||||
MemoryFactor: 0.8, // Memory weight in performance calculations
|
||||
LatencyFactor: 0.9, // Latency weight in performance calculations
|
||||
|
||||
// Error Handling
|
||||
RetryDelay: 100 * time.Millisecond, // Initial retry delay
|
||||
MaxRetryDelay: 5 * time.Second, // Maximum retry delay
|
||||
BackoffMultiplier: 2.0, // Exponential backoff multiplier
|
||||
MaxConsecutiveErrors: 5, // Consecutive error threshold
|
||||
|
||||
// Connection Retry Configuration
|
||||
MaxConnectionAttempts: 15, // Maximum connection retry attempts
|
||||
ConnectionRetryDelay: 50 * time.Millisecond, // Initial connection retry delay
|
||||
MaxConnectionRetryDelay: 2 * time.Second, // Maximum connection retry delay
|
||||
ConnectionBackoffFactor: 1.5, // Connection retry backoff factor
|
||||
ConnectionTimeoutDelay: 5 * time.Second, // Connection timeout for each attempt
|
||||
ReconnectionInterval: 30 * time.Second, // Interval for automatic reconnection attempts
|
||||
HealthCheckInterval: 10 * time.Second, // Health check interval for connections
|
||||
|
||||
// Quality Change Timeout Configuration
|
||||
QualityChangeSupervisorTimeout: 5 * time.Second, // Timeout for supervisor stop during quality changes
|
||||
QualityChangeTickerInterval: 100 * time.Millisecond, // Ticker interval for supervisor stop polling
|
||||
QualityChangeSettleDelay: 2 * time.Second, // Delay for quality change to settle
|
||||
QualityChangeRecoveryDelay: 1 * time.Second, // Delay before attempting recovery
|
||||
|
||||
// Timing Constants - Optimized for quality change stability
|
||||
DefaultSleepDuration: 100 * time.Millisecond, // Balanced polling interval
|
||||
ShortSleepDuration: 10 * time.Millisecond, // Balanced high-frequency polling
|
||||
LongSleepDuration: 200 * time.Millisecond, // Balanced background task delay
|
||||
DefaultTickerInterval: 100 * time.Millisecond, // Balanced periodic task interval
|
||||
BufferUpdateInterval: 250 * time.Millisecond, // Faster buffer size update frequency
|
||||
InputSupervisorTimeout: 5 * time.Second, // Input monitoring timeout
|
||||
OutputSupervisorTimeout: 5 * time.Second, // Output monitoring timeout
|
||||
BatchProcessingDelay: 5 * time.Millisecond, // Reduced batch processing delay
|
||||
|
||||
// System Load Configuration - Optimized for single-core RV1106G3
|
||||
LowCPUThreshold: 0.40, // Adjusted for single-core ARM system
|
||||
HighCPUThreshold: 0.75, // Adjusted for single-core RV1106G3 (current load ~64%)
|
||||
LowMemoryThreshold: 0.60,
|
||||
HighMemoryThreshold: 0.85, // Adjusted for 200MB total memory system
|
||||
|
||||
CooldownPeriod: 15 * time.Second, // Reduced cooldown period
|
||||
RollbackThreshold: 200 * time.Millisecond, // Lower rollback threshold
|
||||
|
||||
MaxLatencyThreshold: 150 * time.Millisecond, // Lower max latency threshold
|
||||
JitterThreshold: 15 * time.Millisecond, // Reduced jitter threshold
|
||||
LatencyOptimizationInterval: 3 * time.Second, // More frequent optimization
|
||||
|
||||
// Microphone Contention Configuration
|
||||
MicContentionTimeout: 200 * time.Millisecond,
|
||||
|
||||
// Buffer Pool Configuration
|
||||
PreallocPercentage: 20,
|
||||
|
||||
// Sleep and Backoff Configuration
|
||||
BackoffStart: 50 * time.Millisecond,
|
||||
|
||||
// Protocol Magic Numbers
|
||||
InputMagicNumber: 0x4A4B4D49, // "JKMI" (JetKVM Microphone Input)
|
||||
OutputMagicNumber: 0x4A4B4F55, // "JKOU" (JetKVM Output)
|
||||
|
||||
// Calculation Constants
|
||||
PercentageMultiplier: 100.0, // Standard percentage conversion (0.5 * 100 = 50%)
|
||||
AveragingWeight: 0.7, // Weight for smoothing values (70% recent, 30% historical)
|
||||
ScalingFactor: 1.5, // General scaling factor for adaptive adjustments
|
||||
|
||||
CPUMemoryWeight: 0.5, // CPU factor weight in combined calculations
|
||||
MemoryWeight: 0.3, // Memory factor weight in combined calculations
|
||||
LatencyWeight: 0.2, // Latency factor weight in combined calculations
|
||||
PoolGrowthMultiplier: 2, // Pool growth multiplier
|
||||
LatencyScalingFactor: 2.0, // Latency ratio scaling factor
|
||||
OptimizerAggressiveness: 0.7, // Optimizer aggressiveness factor
|
||||
|
||||
// CGO Audio Processing Constants - Balanced for stability
|
||||
CGOUsleepMicroseconds: 1000, // 1000 microseconds (1ms) for stable CGO usleep calls
|
||||
CGOPCMBufferSize: 1920, // 1920 samples for PCM buffer (max 2ch*960)
|
||||
CGONanosecondsPerSecond: 1000000000.0, // 1000000000.0 for nanosecond conversions
|
||||
|
||||
// Output Streaming Constants - Balanced for stability
|
||||
OutputStreamingFrameIntervalMS: 20, // 20ms frame interval (50 FPS) for stability
|
||||
|
||||
// IPC Constants
|
||||
IPCInitialBufferFrames: 500, // 500 frames for initial buffer
|
||||
|
||||
// Event Constants - Balanced for stability
|
||||
EventTimeoutSeconds: 2, // 2 seconds for event timeout
|
||||
EventTimeFormatString: "2006-01-02T15:04:05.000Z", // "2006-01-02T15:04:05.000Z" time format
|
||||
EventSubscriptionDelayMS: 100, // 100ms subscription delay
|
||||
|
||||
// Goroutine Pool Configuration
|
||||
MaxAudioProcessorWorkers: 16, // 16 workers for audio processing tasks
|
||||
MaxAudioReaderWorkers: 8, // 8 workers for audio reading tasks
|
||||
AudioProcessorQueueSize: 64, // 64 tasks queue size for processor pool
|
||||
AudioReaderQueueSize: 32, // 32 tasks queue size for reader pool
|
||||
WorkerMaxIdleTime: 60 * time.Second, // 60s maximum idle time before worker termination
|
||||
|
||||
// Input Processing Constants - Balanced for stability
|
||||
InputProcessingTimeoutMS: 10, // 10ms processing timeout threshold
|
||||
|
||||
// Socket Names
|
||||
InputSocketName: "audio_input.sock", // Socket name for audio input IPC
|
||||
OutputSocketName: "audio_output.sock", // Socket name for audio output IPC
|
||||
|
||||
// Component Names
|
||||
AudioInputComponentName: "audio-input", // Component name for input logging
|
||||
AudioOutputComponentName: "audio-output", // Component name for output logging
|
||||
AudioServerComponentName: "audio-server", // Component name for server logging
|
||||
AudioRelayComponentName: "audio-relay", // Component name for relay logging
|
||||
AudioEventsComponentName: "audio-events", // Component name for events logging
|
||||
|
||||
// Test Configuration
|
||||
TestSocketTimeout: 100 * time.Millisecond, // 100ms timeout for test socket operations
|
||||
TestBufferSize: 4096, // 4096 bytes buffer size for test operations
|
||||
TestRetryDelay: 200 * time.Millisecond, // 200ms delay between test retry attempts
|
||||
|
||||
// Latency Histogram Configuration
|
||||
LatencyHistogramMaxSamples: 1000, // 1000 samples for latency tracking
|
||||
LatencyPercentile50: 50, // 50th percentile calculation factor
|
||||
LatencyPercentile95: 95, // 95th percentile calculation factor
|
||||
LatencyPercentile99: 99, // 99th percentile calculation factor
|
||||
|
||||
// Buffer Pool Configuration
|
||||
BufferPoolDefaultSize: 64, // Default buffer pool size when MaxPoolSize is invalid
|
||||
BufferPoolControlSize: 512, // Control buffer pool size
|
||||
ZeroCopyPreallocSizeBytes: 1024 * 1024, // Zero-copy frame pool preallocation size in bytes (1MB)
|
||||
ZeroCopyMinPreallocFrames: 1, // Minimum preallocated frames for zero-copy pool
|
||||
BufferPoolHitRateBase: 100.0, // Base for hit rate percentage calculation
|
||||
|
||||
// Buffer Pool Efficiency Constants
|
||||
HitRateCalculationBase: 100.0, // 100.0 base for hit rate percentage calculation
|
||||
|
||||
// Validation Constants
|
||||
MaxLatency: 500 * time.Millisecond, // 500ms maximum allowed latency
|
||||
MinMetricsUpdateInterval: 100 * time.Millisecond, // 100ms minimum metrics update interval
|
||||
MaxMetricsUpdateInterval: 10 * time.Second, // 10s maximum metrics update interval
|
||||
MinSampleRate: 8000, // 8kHz minimum sample rate
|
||||
MaxSampleRate: 48000, // 48kHz maximum sample rate
|
||||
MaxChannels: 8, // 8 maximum audio channels
|
||||
|
||||
// CGO Constants
|
||||
CGOMaxBackoffMicroseconds: 500000, // 500ms maximum backoff in microseconds
|
||||
CGOMaxAttempts: 5, // 5 maximum retry attempts
|
||||
|
||||
// Validation Frame Size Limits
|
||||
MinFrameDuration: 10 * time.Millisecond, // 10ms minimum frame duration
|
||||
MaxFrameDuration: 100 * time.Millisecond, // 100ms maximum frame duration
|
||||
|
||||
// Valid Sample Rates
|
||||
ValidSampleRates: []int{8000, 12000, 16000, 22050, 24000, 44100, 48000}, // Supported sample rates
|
||||
|
||||
// Opus Bitrate Validation Constants
|
||||
MinOpusBitrate: 6000, // 6000 bps minimum Opus bitrate
|
||||
MaxOpusBitrate: 510000, // 510000 bps maximum Opus bitrate
|
||||
|
||||
// Validation Configuration
|
||||
MaxValidationTime: 5 * time.Second, // 5s maximum validation timeout
|
||||
MinFrameSize: 1, // 1 byte minimum frame size (allow small frames)
|
||||
FrameSizeTolerance: 512, // 512 bytes frame size tolerance
|
||||
|
||||
// Latency Histogram Bucket Configuration
|
||||
LatencyBucket10ms: 10 * time.Millisecond, // 10ms latency bucket
|
||||
LatencyBucket25ms: 25 * time.Millisecond, // 25ms latency bucket
|
||||
LatencyBucket50ms: 50 * time.Millisecond, // 50ms latency bucket
|
||||
LatencyBucket100ms: 100 * time.Millisecond, // 100ms latency bucket
|
||||
LatencyBucket250ms: 250 * time.Millisecond, // 250ms latency bucket
|
||||
LatencyBucket500ms: 500 * time.Millisecond, // 500ms latency bucket
|
||||
LatencyBucket1s: 1 * time.Second, // 1s latency bucket
|
||||
LatencyBucket2s: 2 * time.Second, // 2s latency bucket
|
||||
|
||||
// Batch Audio Processing Configuration
|
||||
MinBatchSizeForThreadPinning: 5, // Minimum batch size to pin thread
|
||||
|
||||
// Performance Configuration Flags - Production optimizations
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// Global configuration instance
|
||||
var Config = DefaultAudioConfig()
|
||||
|
||||
// UpdateConfig allows runtime configuration updates
|
||||
func UpdateConfig(newConfig *AudioConfigConstants) {
|
||||
// Validate the new configuration before applying it
|
||||
if err := ValidateAudioConfigConstants(newConfig); err != nil {
|
||||
// Log validation error and keep current configuration
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "AudioConfig").Logger()
|
||||
logger.Error().Err(err).Msg("Configuration validation failed, keeping current configuration")
|
||||
return
|
||||
}
|
||||
|
||||
Config = newConfig
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "AudioConfig").Logger()
|
||||
logger.Info().Msg("Audio configuration updated successfully")
|
||||
}
|
||||
|
||||
// GetConfig returns the current configuration
|
||||
func GetConfig() *AudioConfigConstants {
|
||||
return Config
|
||||
}
|
||||
|
|
@ -1,271 +0,0 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/coder/websocket"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// AudioControlService provides core audio control operations
|
||||
type AudioControlService struct {
|
||||
sessionProvider SessionProvider
|
||||
logger *zerolog.Logger
|
||||
}
|
||||
|
||||
// NewAudioControlService creates a new audio control service
|
||||
func NewAudioControlService(sessionProvider SessionProvider, logger *zerolog.Logger) *AudioControlService {
|
||||
return &AudioControlService{
|
||||
sessionProvider: sessionProvider,
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// MuteAudio sets the audio mute state by controlling the audio output subprocess
|
||||
func (s *AudioControlService) MuteAudio(muted bool) error {
|
||||
if muted {
|
||||
// Mute: Stop audio output subprocess and relay
|
||||
supervisor := GetAudioOutputSupervisor()
|
||||
if supervisor != nil {
|
||||
supervisor.Stop()
|
||||
}
|
||||
StopAudioRelay()
|
||||
SetAudioMuted(true)
|
||||
} else {
|
||||
// Unmute: Start audio output subprocess and relay
|
||||
if !s.sessionProvider.IsSessionActive() {
|
||||
return errors.New("no active session for audio unmute")
|
||||
}
|
||||
|
||||
supervisor := GetAudioOutputSupervisor()
|
||||
if supervisor != nil {
|
||||
err := supervisor.Start()
|
||||
if err != nil {
|
||||
s.logger.Debug().Err(err).Msg("failed to start audio output supervisor")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Start audio relay
|
||||
err := StartAudioRelay(nil)
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to start audio relay during unmute")
|
||||
return err
|
||||
}
|
||||
|
||||
// Connect the relay to the current WebRTC session's audio track
|
||||
// This is needed because UpdateAudioRelayTrack is normally only called during session creation
|
||||
if err := connectRelayToCurrentSession(); err != nil {
|
||||
s.logger.Warn().Err(err).Msg("failed to connect relay to current session, audio may not work")
|
||||
}
|
||||
SetAudioMuted(false)
|
||||
s.logger.Info().Msg("audio output unmuted (subprocess and relay started)")
|
||||
}
|
||||
|
||||
// Broadcast audio mute state change via WebSocket
|
||||
broadcaster := GetAudioEventBroadcaster()
|
||||
broadcaster.BroadcastAudioMuteChanged(muted)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartMicrophone starts the microphone input
|
||||
func (s *AudioControlService) StartMicrophone() error {
|
||||
if !s.sessionProvider.IsSessionActive() {
|
||||
return errors.New("no active session")
|
||||
}
|
||||
|
||||
audioInputManager := s.sessionProvider.GetAudioInputManager()
|
||||
if audioInputManager == nil {
|
||||
return errors.New("audio input manager not available")
|
||||
}
|
||||
|
||||
if audioInputManager.IsRunning() {
|
||||
s.logger.Info().Msg("microphone already running")
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := audioInputManager.Start(); err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to start microphone")
|
||||
return err
|
||||
}
|
||||
|
||||
s.logger.Info().Msg("microphone started successfully")
|
||||
|
||||
// Broadcast microphone state change via WebSocket
|
||||
broadcaster := GetAudioEventBroadcaster()
|
||||
sessionActive := s.sessionProvider.IsSessionActive()
|
||||
broadcaster.BroadcastMicrophoneStateChanged(true, sessionActive)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// StopMicrophone stops the microphone input
|
||||
func (s *AudioControlService) StopMicrophone() error {
|
||||
if !s.sessionProvider.IsSessionActive() {
|
||||
return errors.New("no active session")
|
||||
}
|
||||
|
||||
audioInputManager := s.sessionProvider.GetAudioInputManager()
|
||||
if audioInputManager == nil {
|
||||
return errors.New("audio input manager not available")
|
||||
}
|
||||
|
||||
if !audioInputManager.IsRunning() {
|
||||
s.logger.Info().Msg("microphone already stopped")
|
||||
return nil
|
||||
}
|
||||
|
||||
audioInputManager.Stop()
|
||||
s.logger.Info().Msg("microphone stopped successfully")
|
||||
|
||||
// Broadcast microphone state change via WebSocket
|
||||
broadcaster := GetAudioEventBroadcaster()
|
||||
sessionActive := s.sessionProvider.IsSessionActive()
|
||||
broadcaster.BroadcastMicrophoneStateChanged(false, sessionActive)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MuteMicrophone sets the microphone mute state by controlling data flow (like audio output)
|
||||
func (s *AudioControlService) MuteMicrophone(muted bool) error {
|
||||
if muted {
|
||||
// Mute: Control data flow, don't stop subprocess (like audio output)
|
||||
SetMicrophoneMuted(true)
|
||||
s.logger.Info().Msg("microphone muted (data flow disabled)")
|
||||
} else {
|
||||
// Unmute: Ensure subprocess is running, then enable data flow
|
||||
if !s.sessionProvider.IsSessionActive() {
|
||||
return errors.New("no active session for microphone unmute")
|
||||
}
|
||||
|
||||
audioInputManager := s.sessionProvider.GetAudioInputManager()
|
||||
if audioInputManager == nil {
|
||||
return errors.New("audio input manager not available")
|
||||
}
|
||||
|
||||
// Start subprocess if not already running (async, non-blocking)
|
||||
if !audioInputManager.IsRunning() {
|
||||
go func() {
|
||||
if err := audioInputManager.Start(); err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to start microphone during unmute")
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Enable data flow immediately
|
||||
SetMicrophoneMuted(false)
|
||||
s.logger.Info().Msg("microphone unmuted (data flow enabled)")
|
||||
}
|
||||
|
||||
// Broadcast microphone state change via WebSocket
|
||||
broadcaster := GetAudioEventBroadcaster()
|
||||
sessionActive := s.sessionProvider.IsSessionActive()
|
||||
|
||||
// Get actual subprocess running status (not mute status)
|
||||
var subprocessRunning bool
|
||||
if sessionActive {
|
||||
audioInputManager := s.sessionProvider.GetAudioInputManager()
|
||||
if audioInputManager != nil {
|
||||
subprocessRunning = audioInputManager.IsRunning()
|
||||
}
|
||||
}
|
||||
|
||||
broadcaster.BroadcastMicrophoneStateChanged(subprocessRunning, sessionActive)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ResetMicrophone resets the microphone
|
||||
func (s *AudioControlService) ResetMicrophone() error {
|
||||
if !s.sessionProvider.IsSessionActive() {
|
||||
return errors.New("no active session")
|
||||
}
|
||||
|
||||
audioInputManager := s.sessionProvider.GetAudioInputManager()
|
||||
if audioInputManager == nil {
|
||||
return errors.New("audio input manager not available")
|
||||
}
|
||||
|
||||
if audioInputManager.IsRunning() {
|
||||
audioInputManager.Stop()
|
||||
s.logger.Info().Msg("stopped microphone for reset")
|
||||
}
|
||||
|
||||
if err := audioInputManager.Start(); err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to restart microphone during reset")
|
||||
return err
|
||||
}
|
||||
|
||||
s.logger.Info().Msg("microphone reset successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAudioStatus returns the current audio output status
|
||||
func (s *AudioControlService) GetAudioStatus() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"muted": IsAudioMuted(),
|
||||
}
|
||||
}
|
||||
|
||||
// GetMicrophoneStatus returns the current microphone status
|
||||
func (s *AudioControlService) GetMicrophoneStatus() map[string]interface{} {
|
||||
if s.sessionProvider == nil {
|
||||
return map[string]interface{}{
|
||||
"error": "no session provider",
|
||||
}
|
||||
}
|
||||
|
||||
if !s.sessionProvider.IsSessionActive() {
|
||||
return map[string]interface{}{
|
||||
"error": "no active session",
|
||||
}
|
||||
}
|
||||
|
||||
audioInputManager := s.sessionProvider.GetAudioInputManager()
|
||||
if audioInputManager == nil {
|
||||
return map[string]interface{}{
|
||||
"error": "no audio input manager",
|
||||
}
|
||||
}
|
||||
|
||||
return map[string]interface{}{
|
||||
"running": audioInputManager.IsRunning(),
|
||||
"ready": audioInputManager.IsReady(),
|
||||
}
|
||||
}
|
||||
|
||||
// SubscribeToAudioEvents subscribes to audio events via WebSocket
|
||||
func (s *AudioControlService) SubscribeToAudioEvents(connectionID string, wsCon *websocket.Conn, runCtx context.Context, logger *zerolog.Logger) {
|
||||
logger.Info().Msg("client subscribing to audio events")
|
||||
broadcaster := GetAudioEventBroadcaster()
|
||||
broadcaster.Subscribe(connectionID, wsCon, runCtx, logger)
|
||||
}
|
||||
|
||||
// UnsubscribeFromAudioEvents unsubscribes from audio events
|
||||
func (s *AudioControlService) UnsubscribeFromAudioEvents(connectionID string, logger *zerolog.Logger) {
|
||||
logger.Info().Str("connection_id", connectionID).Msg("client unsubscribing from audio events")
|
||||
broadcaster := GetAudioEventBroadcaster()
|
||||
broadcaster.Unsubscribe(connectionID)
|
||||
}
|
||||
|
||||
// IsAudioOutputActive returns whether the audio output subprocess is running
|
||||
func (s *AudioControlService) IsAudioOutputActive() bool {
|
||||
return !IsAudioMuted() && IsAudioRelayRunning()
|
||||
}
|
||||
|
||||
// IsMicrophoneActive returns whether the microphone subprocess is running
|
||||
func (s *AudioControlService) IsMicrophoneActive() bool {
|
||||
if !s.sessionProvider.IsSessionActive() {
|
||||
return false
|
||||
}
|
||||
|
||||
audioInputManager := s.sessionProvider.GetAudioInputManager()
|
||||
if audioInputManager == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// For Enable/Disable buttons, we check subprocess status
|
||||
return audioInputManager.IsRunning()
|
||||
}
|
||||
|
|
@ -1,256 +0,0 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
// Audio output metrics
|
||||
audioFramesReceivedTotal = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "jetkvm_audio_frames_received_total",
|
||||
Help: "Total number of audio frames received",
|
||||
},
|
||||
)
|
||||
|
||||
audioFramesDroppedTotal = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "jetkvm_audio_frames_dropped_total",
|
||||
Help: "Total number of audio frames dropped",
|
||||
},
|
||||
)
|
||||
|
||||
audioBytesProcessedTotal = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "jetkvm_audio_bytes_processed_total",
|
||||
Help: "Total number of audio bytes processed",
|
||||
},
|
||||
)
|
||||
|
||||
audioConnectionDropsTotal = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "jetkvm_audio_connection_drops_total",
|
||||
Help: "Total number of audio connection drops",
|
||||
},
|
||||
)
|
||||
|
||||
audioAverageLatencyMilliseconds = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_average_latency_milliseconds",
|
||||
Help: "Average audio latency in milliseconds",
|
||||
},
|
||||
)
|
||||
|
||||
audioLastFrameTimestamp = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_last_frame_timestamp_seconds",
|
||||
Help: "Timestamp of the last audio frame received",
|
||||
},
|
||||
)
|
||||
|
||||
// Microphone input metrics
|
||||
microphoneFramesSentTotal = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "jetkvm_microphone_frames_sent_total",
|
||||
Help: "Total number of microphone frames sent",
|
||||
},
|
||||
)
|
||||
|
||||
microphoneFramesDroppedTotal = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "jetkvm_microphone_frames_dropped_total",
|
||||
Help: "Total number of microphone frames dropped",
|
||||
},
|
||||
)
|
||||
|
||||
microphoneBytesProcessedTotal = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "jetkvm_microphone_bytes_processed_total",
|
||||
Help: "Total number of microphone bytes processed",
|
||||
},
|
||||
)
|
||||
|
||||
microphoneConnectionDropsTotal = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "jetkvm_microphone_connection_drops_total",
|
||||
Help: "Total number of microphone connection drops",
|
||||
},
|
||||
)
|
||||
|
||||
microphoneAverageLatencyMilliseconds = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_microphone_average_latency_milliseconds",
|
||||
Help: "Average microphone latency in milliseconds",
|
||||
},
|
||||
)
|
||||
|
||||
microphoneLastFrameTimestamp = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_microphone_last_frame_timestamp_seconds",
|
||||
Help: "Timestamp of the last microphone frame sent",
|
||||
},
|
||||
)
|
||||
|
||||
// Memory metrics (basic monitoring)
|
||||
memoryHeapAllocBytes = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "jetkvm_audio_memory_heap_alloc_bytes",
|
||||
Help: "Current heap allocation in bytes",
|
||||
},
|
||||
)
|
||||
|
||||
memoryGCCount = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "jetkvm_audio_memory_gc_total",
|
||||
Help: "Total number of garbage collections",
|
||||
},
|
||||
)
|
||||
|
||||
// Metrics update tracking
|
||||
lastMetricsUpdate int64
|
||||
|
||||
// Counter value tracking (since prometheus counters don't have Get() method)
|
||||
audioFramesReceivedValue uint64
|
||||
audioFramesDroppedValue uint64
|
||||
audioBytesProcessedValue uint64
|
||||
audioConnectionDropsValue uint64
|
||||
micFramesSentValue uint64
|
||||
micFramesDroppedValue uint64
|
||||
micBytesProcessedValue uint64
|
||||
micConnectionDropsValue uint64
|
||||
|
||||
// Atomic counter for memory GC
|
||||
memoryGCCountValue uint32
|
||||
)
|
||||
|
||||
// UnifiedAudioMetrics provides a common structure for both input and output audio streams
|
||||
type UnifiedAudioMetrics struct {
|
||||
FramesReceived uint64 `json:"frames_received"`
|
||||
FramesDropped uint64 `json:"frames_dropped"`
|
||||
FramesSent uint64 `json:"frames_sent,omitempty"`
|
||||
BytesProcessed uint64 `json:"bytes_processed"`
|
||||
ConnectionDrops uint64 `json:"connection_drops"`
|
||||
LastFrameTime time.Time `json:"last_frame_time"`
|
||||
AverageLatency time.Duration `json:"average_latency"`
|
||||
}
|
||||
|
||||
// convertAudioInputMetricsToUnified converts AudioInputMetrics to UnifiedAudioMetrics
|
||||
func convertAudioInputMetricsToUnified(metrics AudioInputMetrics) UnifiedAudioMetrics {
|
||||
return UnifiedAudioMetrics{
|
||||
FramesReceived: 0, // AudioInputMetrics doesn't have FramesReceived
|
||||
FramesDropped: uint64(metrics.FramesDropped),
|
||||
FramesSent: uint64(metrics.FramesSent),
|
||||
BytesProcessed: uint64(metrics.BytesProcessed),
|
||||
ConnectionDrops: uint64(metrics.ConnectionDrops),
|
||||
LastFrameTime: metrics.LastFrameTime,
|
||||
AverageLatency: metrics.AverageLatency,
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateAudioMetrics updates Prometheus metrics with current audio data
|
||||
func UpdateAudioMetrics(metrics UnifiedAudioMetrics) {
|
||||
oldReceived := atomic.SwapUint64(&audioFramesReceivedValue, metrics.FramesReceived)
|
||||
if metrics.FramesReceived > oldReceived {
|
||||
audioFramesReceivedTotal.Add(float64(metrics.FramesReceived - oldReceived))
|
||||
}
|
||||
|
||||
oldDropped := atomic.SwapUint64(&audioFramesDroppedValue, metrics.FramesDropped)
|
||||
if metrics.FramesDropped > oldDropped {
|
||||
audioFramesDroppedTotal.Add(float64(metrics.FramesDropped - oldDropped))
|
||||
}
|
||||
|
||||
oldBytes := atomic.SwapUint64(&audioBytesProcessedValue, metrics.BytesProcessed)
|
||||
if metrics.BytesProcessed > oldBytes {
|
||||
audioBytesProcessedTotal.Add(float64(metrics.BytesProcessed - oldBytes))
|
||||
}
|
||||
|
||||
oldDrops := atomic.SwapUint64(&audioConnectionDropsValue, metrics.ConnectionDrops)
|
||||
if metrics.ConnectionDrops > oldDrops {
|
||||
audioConnectionDropsTotal.Add(float64(metrics.ConnectionDrops - oldDrops))
|
||||
}
|
||||
|
||||
// Update gauges
|
||||
audioAverageLatencyMilliseconds.Set(float64(metrics.AverageLatency.Nanoseconds()) / 1e6)
|
||||
if !metrics.LastFrameTime.IsZero() {
|
||||
audioLastFrameTimestamp.Set(float64(metrics.LastFrameTime.Unix()))
|
||||
}
|
||||
|
||||
atomic.StoreInt64(&lastMetricsUpdate, time.Now().Unix())
|
||||
}
|
||||
|
||||
// UpdateMicrophoneMetrics updates Prometheus metrics with current microphone data
|
||||
func UpdateMicrophoneMetrics(metrics UnifiedAudioMetrics) {
|
||||
oldSent := atomic.SwapUint64(&micFramesSentValue, metrics.FramesSent)
|
||||
if metrics.FramesSent > oldSent {
|
||||
microphoneFramesSentTotal.Add(float64(metrics.FramesSent - oldSent))
|
||||
}
|
||||
|
||||
oldDropped := atomic.SwapUint64(&micFramesDroppedValue, metrics.FramesDropped)
|
||||
if metrics.FramesDropped > oldDropped {
|
||||
microphoneFramesDroppedTotal.Add(float64(metrics.FramesDropped - oldDropped))
|
||||
}
|
||||
|
||||
oldBytes := atomic.SwapUint64(&micBytesProcessedValue, metrics.BytesProcessed)
|
||||
if metrics.BytesProcessed > oldBytes {
|
||||
microphoneBytesProcessedTotal.Add(float64(metrics.BytesProcessed - oldBytes))
|
||||
}
|
||||
|
||||
oldDrops := atomic.SwapUint64(&micConnectionDropsValue, metrics.ConnectionDrops)
|
||||
if metrics.ConnectionDrops > oldDrops {
|
||||
microphoneConnectionDropsTotal.Add(float64(metrics.ConnectionDrops - oldDrops))
|
||||
}
|
||||
|
||||
// Update gauges
|
||||
microphoneAverageLatencyMilliseconds.Set(float64(metrics.AverageLatency.Nanoseconds()) / 1e6)
|
||||
if !metrics.LastFrameTime.IsZero() {
|
||||
microphoneLastFrameTimestamp.Set(float64(metrics.LastFrameTime.Unix()))
|
||||
}
|
||||
|
||||
atomic.StoreInt64(&lastMetricsUpdate, time.Now().Unix())
|
||||
}
|
||||
|
||||
// UpdateMemoryMetrics updates basic memory metrics
|
||||
func UpdateMemoryMetrics() {
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
|
||||
memoryHeapAllocBytes.Set(float64(m.HeapAlloc))
|
||||
|
||||
// Update GC count with delta calculation
|
||||
currentGCCount := uint32(m.NumGC)
|
||||
prevGCCount := atomic.SwapUint32(&memoryGCCountValue, currentGCCount)
|
||||
if prevGCCount > 0 && currentGCCount > prevGCCount {
|
||||
memoryGCCount.Add(float64(currentGCCount - prevGCCount))
|
||||
}
|
||||
|
||||
atomic.StoreInt64(&lastMetricsUpdate, time.Now().Unix())
|
||||
}
|
||||
|
||||
// GetLastMetricsUpdate returns the timestamp of the last metrics update
|
||||
func GetLastMetricsUpdate() time.Time {
|
||||
timestamp := atomic.LoadInt64(&lastMetricsUpdate)
|
||||
return time.Unix(timestamp, 0)
|
||||
}
|
||||
|
||||
// StartMetricsUpdater starts a goroutine that periodically updates Prometheus metrics
|
||||
func StartMetricsUpdater() {
|
||||
// Start the centralized metrics collector
|
||||
registry := GetMetricsRegistry()
|
||||
registry.StartMetricsCollector()
|
||||
|
||||
// Start a separate goroutine for periodic updates
|
||||
go func() {
|
||||
ticker := time.NewTicker(5 * time.Second) // Update every 5 seconds
|
||||
defer ticker.Stop()
|
||||
|
||||
for range ticker.C {
|
||||
// Update memory metrics (not part of centralized registry)
|
||||
UpdateMemoryMetrics()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
|
@ -1,79 +0,0 @@
|
|||
//go:build cgo
|
||||
|
||||
package audio
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// MetricsRegistry provides a centralized source of truth for all audio metrics
|
||||
// This eliminates duplication between session-specific and global managers
|
||||
type MetricsRegistry struct {
|
||||
mu sync.RWMutex
|
||||
audioInputMetrics AudioInputMetrics
|
||||
lastUpdate int64 // Unix timestamp
|
||||
}
|
||||
|
||||
var (
|
||||
globalMetricsRegistry *MetricsRegistry
|
||||
registryOnce sync.Once
|
||||
)
|
||||
|
||||
// GetMetricsRegistry returns the global metrics registry instance
|
||||
func GetMetricsRegistry() *MetricsRegistry {
|
||||
registryOnce.Do(func() {
|
||||
globalMetricsRegistry = &MetricsRegistry{
|
||||
lastUpdate: time.Now().Unix(),
|
||||
}
|
||||
})
|
||||
return globalMetricsRegistry
|
||||
}
|
||||
|
||||
// UpdateAudioInputMetrics updates the centralized audio input metrics
|
||||
func (mr *MetricsRegistry) UpdateAudioInputMetrics(metrics AudioInputMetrics) {
|
||||
mr.mu.Lock()
|
||||
mr.audioInputMetrics = metrics
|
||||
mr.lastUpdate = time.Now().Unix()
|
||||
mr.mu.Unlock()
|
||||
|
||||
// Update Prometheus metrics directly to avoid circular dependency
|
||||
UpdateMicrophoneMetrics(convertAudioInputMetricsToUnified(metrics))
|
||||
}
|
||||
|
||||
// GetAudioInputMetrics returns the current audio input metrics
|
||||
func (mr *MetricsRegistry) GetAudioInputMetrics() AudioInputMetrics {
|
||||
mr.mu.RLock()
|
||||
defer mr.mu.RUnlock()
|
||||
return mr.audioInputMetrics
|
||||
}
|
||||
|
||||
// GetLastUpdate returns the timestamp of the last metrics update
|
||||
func (mr *MetricsRegistry) GetLastUpdate() time.Time {
|
||||
timestamp := atomic.LoadInt64(&mr.lastUpdate)
|
||||
return time.Unix(timestamp, 0)
|
||||
}
|
||||
|
||||
// StartMetricsCollector starts a background goroutine to collect metrics
|
||||
func (mr *MetricsRegistry) StartMetricsCollector() {
|
||||
go func() {
|
||||
ticker := time.NewTicker(1 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for range ticker.C {
|
||||
// Collect from session-specific manager if available
|
||||
if sessionProvider := GetSessionProvider(); sessionProvider != nil && sessionProvider.IsSessionActive() {
|
||||
if inputManager := sessionProvider.GetAudioInputManager(); inputManager != nil {
|
||||
metrics := inputManager.GetMetrics()
|
||||
mr.UpdateAudioInputMetrics(metrics)
|
||||
}
|
||||
} else {
|
||||
// Fallback to global manager if no session is active
|
||||
globalManager := getAudioInputManager()
|
||||
metrics := globalManager.GetMetrics()
|
||||
mr.UpdateAudioInputMetrics(metrics)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
|
@ -1,354 +0,0 @@
|
|||
//go:build cgo || arm
|
||||
// +build cgo arm
|
||||
|
||||
package audio
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Validation errors
|
||||
var (
|
||||
ErrInvalidFrameSize = errors.New("invalid frame size")
|
||||
ErrInvalidFrameData = errors.New("invalid frame data")
|
||||
ErrFrameDataEmpty = errors.New("invalid frame data: frame data is empty")
|
||||
ErrFrameDataTooLarge = errors.New("invalid frame data: exceeds maximum")
|
||||
ErrInvalidBufferSize = errors.New("invalid buffer size")
|
||||
|
||||
ErrInvalidLatency = errors.New("invalid latency value")
|
||||
ErrInvalidConfiguration = errors.New("invalid configuration")
|
||||
ErrInvalidSocketConfig = errors.New("invalid socket configuration")
|
||||
ErrInvalidMetricsInterval = errors.New("invalid metrics interval")
|
||||
ErrInvalidSampleRate = errors.New("invalid sample rate")
|
||||
ErrInvalidChannels = errors.New("invalid channels")
|
||||
ErrInvalidBitrate = errors.New("invalid bitrate")
|
||||
ErrInvalidFrameDuration = errors.New("invalid frame duration")
|
||||
ErrInvalidOffset = errors.New("invalid offset")
|
||||
ErrInvalidLength = errors.New("invalid length")
|
||||
)
|
||||
|
||||
// ValidateAudioQuality is deprecated - quality is now fixed at optimal settings
|
||||
func ValidateAudioQuality(quality int) error {
|
||||
// Quality validation removed - using fixed optimal configuration
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateZeroCopyFrame validates zero-copy audio frame
|
||||
// Optimized to use cached max frame size
|
||||
func ValidateZeroCopyFrame(frame *ZeroCopyAudioFrame) error {
|
||||
if frame == nil {
|
||||
return ErrInvalidFrameData
|
||||
}
|
||||
data := frame.Data()
|
||||
if len(data) == 0 {
|
||||
return ErrInvalidFrameData
|
||||
}
|
||||
|
||||
// Fast path: use cached max frame size
|
||||
maxFrameSize := cachedMaxFrameSize
|
||||
if maxFrameSize == 0 {
|
||||
// Fallback: get from cache
|
||||
cache := Config
|
||||
maxFrameSize = cache.MaxAudioFrameSize
|
||||
if maxFrameSize == 0 {
|
||||
// Last resort: use default
|
||||
maxFrameSize = cache.MaxAudioFrameSize
|
||||
}
|
||||
// Cache globally for next calls
|
||||
cachedMaxFrameSize = maxFrameSize
|
||||
}
|
||||
|
||||
if len(data) > maxFrameSize {
|
||||
return ErrInvalidFrameSize
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateBufferSize validates buffer size parameters with enhanced boundary checks
|
||||
// Optimized for minimal overhead in hotpath
|
||||
func ValidateBufferSize(size int) error {
|
||||
if size <= 0 {
|
||||
return fmt.Errorf("%w: buffer size %d must be positive", ErrInvalidBufferSize, size)
|
||||
}
|
||||
// Single boundary check using pre-cached value
|
||||
if size > Config.SocketMaxBuffer {
|
||||
return fmt.Errorf("%w: buffer size %d exceeds maximum %d",
|
||||
ErrInvalidBufferSize, size, Config.SocketMaxBuffer)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateLatency validates latency duration values with reasonable bounds
|
||||
// Optimized to use AudioConfigCache for frequently accessed values
|
||||
func ValidateLatency(latency time.Duration) error {
|
||||
if latency < 0 {
|
||||
return fmt.Errorf("%w: latency %v cannot be negative", ErrInvalidLatency, latency)
|
||||
}
|
||||
|
||||
// Fast path: check against cached max latency
|
||||
cache := Config
|
||||
maxLatency := time.Duration(cache.MaxLatency)
|
||||
|
||||
// If we have a valid cached value, use it
|
||||
if maxLatency > 0 {
|
||||
minLatency := time.Millisecond // Minimum reasonable latency
|
||||
if latency > 0 && latency < minLatency {
|
||||
return fmt.Errorf("%w: latency %v below minimum %v",
|
||||
ErrInvalidLatency, latency, minLatency)
|
||||
}
|
||||
if latency > maxLatency {
|
||||
return fmt.Errorf("%w: latency %v exceeds maximum %v",
|
||||
ErrInvalidLatency, latency, maxLatency)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
minLatency := time.Millisecond // Minimum reasonable latency
|
||||
if latency > 0 && latency < minLatency {
|
||||
return fmt.Errorf("%w: latency %v below minimum %v",
|
||||
ErrInvalidLatency, latency, minLatency)
|
||||
}
|
||||
if latency > Config.MaxLatency {
|
||||
return fmt.Errorf("%w: latency %v exceeds maximum %v",
|
||||
ErrInvalidLatency, latency, Config.MaxLatency)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateMetricsInterval validates metrics update interval
|
||||
// Optimized to use AudioConfigCache for frequently accessed values
|
||||
func ValidateMetricsInterval(interval time.Duration) error {
|
||||
// Fast path: check against cached values
|
||||
cache := Config
|
||||
minInterval := time.Duration(cache.MinMetricsUpdateInterval)
|
||||
maxInterval := time.Duration(cache.MaxMetricsUpdateInterval)
|
||||
|
||||
// If we have valid cached values, use them
|
||||
if minInterval > 0 && maxInterval > 0 {
|
||||
if interval < minInterval {
|
||||
return fmt.Errorf("%w: interval %v below minimum %v",
|
||||
ErrInvalidMetricsInterval, interval, minInterval)
|
||||
}
|
||||
if interval > maxInterval {
|
||||
return fmt.Errorf("%w: interval %v exceeds maximum %v",
|
||||
ErrInvalidMetricsInterval, interval, maxInterval)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
minInterval = Config.MinMetricsUpdateInterval
|
||||
maxInterval = Config.MaxMetricsUpdateInterval
|
||||
if interval < minInterval {
|
||||
return ErrInvalidMetricsInterval
|
||||
}
|
||||
if interval > maxInterval {
|
||||
return ErrInvalidMetricsInterval
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateInputIPCConfig validates input IPC configuration
|
||||
func ValidateInputIPCConfig(sampleRate, channels, frameSize int) error {
|
||||
minSampleRate := Config.MinSampleRate
|
||||
maxSampleRate := Config.MaxSampleRate
|
||||
maxChannels := Config.MaxChannels
|
||||
if sampleRate < minSampleRate || sampleRate > maxSampleRate {
|
||||
return ErrInvalidSampleRate
|
||||
}
|
||||
if channels < 1 || channels > maxChannels {
|
||||
return ErrInvalidChannels
|
||||
}
|
||||
if frameSize <= 0 {
|
||||
return ErrInvalidFrameSize
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateOutputIPCConfig validates output IPC configuration
|
||||
func ValidateOutputIPCConfig(sampleRate, channels, frameSize int) error {
|
||||
minSampleRate := Config.MinSampleRate
|
||||
maxSampleRate := Config.MaxSampleRate
|
||||
maxChannels := Config.MaxChannels
|
||||
if sampleRate < minSampleRate || sampleRate > maxSampleRate {
|
||||
return ErrInvalidSampleRate
|
||||
}
|
||||
if channels < 1 || channels > maxChannels {
|
||||
return ErrInvalidChannels
|
||||
}
|
||||
if frameSize <= 0 {
|
||||
return ErrInvalidFrameSize
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateSampleRate validates audio sample rate values
|
||||
// Optimized for minimal overhead in hotpath
|
||||
func ValidateSampleRate(sampleRate int) error {
|
||||
if sampleRate <= 0 {
|
||||
return fmt.Errorf("%w: sample rate %d must be positive", ErrInvalidSampleRate, sampleRate)
|
||||
}
|
||||
// Direct validation against valid rates
|
||||
for _, rate := range Config.ValidSampleRates {
|
||||
if sampleRate == rate {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("%w: sample rate %d not in valid rates %v",
|
||||
ErrInvalidSampleRate, sampleRate, Config.ValidSampleRates)
|
||||
}
|
||||
|
||||
// ValidateChannelCount validates audio channel count
|
||||
// Optimized for minimal overhead in hotpath
|
||||
func ValidateChannelCount(channels int) error {
|
||||
if channels <= 0 {
|
||||
return fmt.Errorf("%w: channel count %d must be positive", ErrInvalidChannels, channels)
|
||||
}
|
||||
// Direct boundary check
|
||||
if channels > Config.MaxChannels {
|
||||
return fmt.Errorf("%w: channel count %d exceeds maximum %d",
|
||||
ErrInvalidChannels, channels, Config.MaxChannels)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateBitrate validates audio bitrate values (expects kbps)
|
||||
// Optimized for minimal overhead in hotpath
|
||||
func ValidateBitrate(bitrate int) error {
|
||||
if bitrate <= 0 {
|
||||
return fmt.Errorf("%w: bitrate %d must be positive", ErrInvalidBitrate, bitrate)
|
||||
}
|
||||
// Direct boundary check with single conversion
|
||||
bitrateInBps := bitrate * 1000
|
||||
if bitrateInBps < Config.MinOpusBitrate {
|
||||
return fmt.Errorf("%w: bitrate %d kbps (%d bps) below minimum %d bps",
|
||||
ErrInvalidBitrate, bitrate, bitrateInBps, Config.MinOpusBitrate)
|
||||
}
|
||||
if bitrateInBps > Config.MaxOpusBitrate {
|
||||
return fmt.Errorf("%w: bitrate %d kbps (%d bps) exceeds maximum %d bps",
|
||||
ErrInvalidBitrate, bitrate, bitrateInBps, Config.MaxOpusBitrate)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateFrameDuration validates frame duration values
|
||||
// Optimized to use AudioConfigCache for frequently accessed values
|
||||
func ValidateFrameDuration(duration time.Duration) error {
|
||||
if duration <= 0 {
|
||||
return fmt.Errorf("%w: frame duration %v must be positive", ErrInvalidFrameDuration, duration)
|
||||
}
|
||||
|
||||
// Fast path: Check against cached frame size first
|
||||
cache := Config
|
||||
|
||||
// Convert frameSize (samples) to duration for comparison
|
||||
cachedFrameSize := cache.FrameSize
|
||||
cachedSampleRate := cache.SampleRate
|
||||
|
||||
// Only do this calculation if we have valid cached values
|
||||
if cachedFrameSize > 0 && cachedSampleRate > 0 {
|
||||
cachedDuration := time.Duration(cachedFrameSize) * time.Second / time.Duration(cachedSampleRate)
|
||||
|
||||
// Most common case: validating against the current frame duration
|
||||
if duration == cachedDuration {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Fast path: Check against cached min/max frame duration
|
||||
cachedMinDuration := time.Duration(cache.MinFrameDuration)
|
||||
cachedMaxDuration := time.Duration(cache.MaxFrameDuration)
|
||||
|
||||
if cachedMinDuration > 0 && cachedMaxDuration > 0 {
|
||||
if duration < cachedMinDuration {
|
||||
return fmt.Errorf("%w: frame duration %v below minimum %v",
|
||||
ErrInvalidFrameDuration, duration, cachedMinDuration)
|
||||
}
|
||||
if duration > cachedMaxDuration {
|
||||
return fmt.Errorf("%w: frame duration %v exceeds maximum %v",
|
||||
ErrInvalidFrameDuration, duration, cachedMaxDuration)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Slow path: Use current config values
|
||||
updatedMinDuration := time.Duration(cache.MinFrameDuration)
|
||||
updatedMaxDuration := time.Duration(cache.MaxFrameDuration)
|
||||
|
||||
if duration < updatedMinDuration {
|
||||
return fmt.Errorf("%w: frame duration %v below minimum %v",
|
||||
ErrInvalidFrameDuration, duration, updatedMinDuration)
|
||||
}
|
||||
if duration > updatedMaxDuration {
|
||||
return fmt.Errorf("%w: frame duration %v exceeds maximum %v",
|
||||
ErrInvalidFrameDuration, duration, updatedMaxDuration)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateAudioConfigConstants validates audio configuration constants
|
||||
func ValidateAudioConfigConstants(config *AudioConfigConstants) error {
|
||||
// Quality validation removed - using fixed optimal configuration
|
||||
// Validate configuration values if config is provided
|
||||
if config != nil {
|
||||
if Config.MaxFrameSize <= 0 {
|
||||
return fmt.Errorf("invalid MaxFrameSize: %d", Config.MaxFrameSize)
|
||||
}
|
||||
if Config.SampleRate <= 0 {
|
||||
return fmt.Errorf("invalid SampleRate: %d", Config.SampleRate)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Global variable for backward compatibility
|
||||
var cachedMaxFrameSize int
|
||||
|
||||
// InitValidationCache initializes cached validation values with actual config
|
||||
func InitValidationCache() {
|
||||
// Initialize the global cache variable for backward compatibility
|
||||
cachedMaxFrameSize = Config.MaxAudioFrameSize
|
||||
|
||||
// Initialize the global audio config cache
|
||||
cachedMaxFrameSize = Config.MaxAudioFrameSize
|
||||
}
|
||||
|
||||
// ValidateAudioFrame validates audio frame data with cached max size for performance
|
||||
//
|
||||
//go:inline
|
||||
func ValidateAudioFrame(data []byte) error {
|
||||
// Fast path: check length against cached max size in single operation
|
||||
dataLen := len(data)
|
||||
if dataLen == 0 {
|
||||
return ErrFrameDataEmpty
|
||||
}
|
||||
|
||||
// Use global cached value for fastest access - updated during initialization
|
||||
maxSize := cachedMaxFrameSize
|
||||
if maxSize == 0 {
|
||||
// Fallback: get from cache only if global cache not initialized
|
||||
cache := Config
|
||||
maxSize = cache.MaxAudioFrameSize
|
||||
if maxSize == 0 {
|
||||
// Last resort: get fresh value
|
||||
maxSize = cache.MaxAudioFrameSize
|
||||
}
|
||||
// Cache the value globally for next calls
|
||||
cachedMaxFrameSize = maxSize
|
||||
}
|
||||
|
||||
// Single comparison for validation
|
||||
if dataLen > maxSize {
|
||||
return ErrFrameDataTooLarge
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WrapWithMetadata wraps error with metadata for enhanced validation context
|
||||
func WrapWithMetadata(err error, component, operation string, metadata map[string]interface{}) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("%s.%s: %w (metadata: %+v)", component, operation, err, metadata)
|
||||
}
|
||||
|
|
@ -1,6 +1,3 @@
|
|||
//go:build cgo
|
||||
// +build cgo
|
||||
|
||||
package audio
|
||||
|
||||
import (
|
||||
|
|
|
|||
|
|
@ -1,94 +0,0 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
// Global audio input manager instance
|
||||
globalInputManager unsafe.Pointer // *AudioInputManager
|
||||
)
|
||||
|
||||
// AudioInputInterface defines the common interface for audio input managers
|
||||
type AudioInputInterface interface {
|
||||
Start() error
|
||||
Stop()
|
||||
WriteOpusFrame(frame []byte) error
|
||||
IsRunning() bool
|
||||
GetMetrics() AudioInputMetrics
|
||||
}
|
||||
|
||||
// GetSupervisor returns the audio input supervisor for advanced management
|
||||
func (m *AudioInputManager) GetSupervisor() *AudioInputSupervisor {
|
||||
return GetAudioInputSupervisor()
|
||||
}
|
||||
|
||||
// getAudioInputManager returns the audio input manager
|
||||
func getAudioInputManager() AudioInputInterface {
|
||||
ptr := atomic.LoadPointer(&globalInputManager)
|
||||
if ptr == nil {
|
||||
// Create new manager
|
||||
newManager := NewAudioInputManager()
|
||||
if atomic.CompareAndSwapPointer(&globalInputManager, nil, unsafe.Pointer(newManager)) {
|
||||
return newManager
|
||||
}
|
||||
// Another goroutine created it, use that one
|
||||
ptr = atomic.LoadPointer(&globalInputManager)
|
||||
}
|
||||
return (*AudioInputManager)(ptr)
|
||||
}
|
||||
|
||||
// StartAudioInput starts the audio input system using the appropriate manager
|
||||
func StartAudioInput() error {
|
||||
manager := getAudioInputManager()
|
||||
return manager.Start()
|
||||
}
|
||||
|
||||
// StopAudioInput stops the audio input system
|
||||
func StopAudioInput() {
|
||||
manager := getAudioInputManager()
|
||||
manager.Stop()
|
||||
}
|
||||
|
||||
// WriteAudioInputFrame writes an Opus frame to the audio input system
|
||||
func WriteAudioInputFrame(frame []byte) error {
|
||||
manager := getAudioInputManager()
|
||||
return manager.WriteOpusFrame(frame)
|
||||
}
|
||||
|
||||
// IsAudioInputRunning returns whether the audio input system is running
|
||||
func IsAudioInputRunning() bool {
|
||||
manager := getAudioInputManager()
|
||||
return manager.IsRunning()
|
||||
}
|
||||
|
||||
// GetAudioInputMetrics returns current audio input metrics
|
||||
func GetAudioInputMetrics() AudioInputMetrics {
|
||||
manager := getAudioInputManager()
|
||||
return manager.GetMetrics()
|
||||
}
|
||||
|
||||
// GetAudioInputIPCSupervisor returns the IPC supervisor
|
||||
func GetAudioInputIPCSupervisor() *AudioInputSupervisor {
|
||||
ptr := atomic.LoadPointer(&globalInputManager)
|
||||
if ptr == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
manager := (*AudioInputManager)(ptr)
|
||||
return manager.GetSupervisor()
|
||||
}
|
||||
|
||||
// Helper functions
|
||||
|
||||
// ResetAudioInputManagers resets the global manager (for testing)
|
||||
func ResetAudioInputManagers() {
|
||||
// Stop existing manager first
|
||||
if ptr := atomic.LoadPointer(&globalInputManager); ptr != nil {
|
||||
(*AudioInputManager)(ptr).Stop()
|
||||
}
|
||||
|
||||
// Reset pointer
|
||||
atomic.StorePointer(&globalInputManager, nil)
|
||||
}
|
||||
|
|
@ -1,269 +0,0 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/jetkvm/kvm/internal/logging"
|
||||
)
|
||||
|
||||
// Component name constant for logging
|
||||
const (
|
||||
AudioInputManagerComponent = "audio-input-manager"
|
||||
)
|
||||
|
||||
// AudioInputMetrics holds metrics for microphone input
|
||||
// Atomic fields MUST be first for ARM32 alignment (int64 fields need 8-byte alignment)
|
||||
type AudioInputMetrics struct {
|
||||
// Atomic int64 field first for proper ARM32 alignment
|
||||
FramesSent int64 `json:"frames_sent"` // Total frames sent (input-specific)
|
||||
|
||||
// Embedded struct with atomic fields properly aligned
|
||||
BaseAudioMetrics
|
||||
}
|
||||
|
||||
// AudioInputManager manages microphone input stream using IPC mode only
|
||||
type AudioInputManager struct {
|
||||
*BaseAudioManager
|
||||
framesSent int64 // Input-specific metric
|
||||
}
|
||||
|
||||
// NewAudioInputManager creates a new audio input manager
|
||||
func NewAudioInputManager() *AudioInputManager {
|
||||
logger := logging.GetDefaultLogger().With().Str("component", AudioInputManagerComponent).Logger()
|
||||
return &AudioInputManager{
|
||||
BaseAudioManager: NewBaseAudioManager(logger),
|
||||
}
|
||||
}
|
||||
|
||||
// getClient returns the audio input client from the global supervisor
|
||||
func (aim *AudioInputManager) getClient() *AudioInputClient {
|
||||
supervisor := GetAudioInputSupervisor()
|
||||
if supervisor == nil {
|
||||
return nil
|
||||
}
|
||||
return supervisor.GetClient()
|
||||
}
|
||||
|
||||
// Start begins processing microphone input
|
||||
func (aim *AudioInputManager) Start() error {
|
||||
if !aim.setRunning(true) {
|
||||
return fmt.Errorf("audio input manager is already running")
|
||||
}
|
||||
|
||||
aim.logComponentStart(AudioInputManagerComponent)
|
||||
|
||||
// Ensure supervisor and client are available
|
||||
supervisor := GetAudioInputSupervisor()
|
||||
if supervisor == nil {
|
||||
aim.setRunning(false)
|
||||
return fmt.Errorf("audio input supervisor not available")
|
||||
}
|
||||
|
||||
// Start the supervisor if not already running
|
||||
if !supervisor.IsRunning() {
|
||||
err := supervisor.Start()
|
||||
if err != nil {
|
||||
aim.logComponentError(AudioInputManagerComponent, err, "failed to start supervisor")
|
||||
aim.setRunning(false)
|
||||
aim.resetMetrics()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
aim.logComponentStarted(AudioInputManagerComponent)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop stops processing microphone input
|
||||
func (aim *AudioInputManager) Stop() {
|
||||
if !aim.setRunning(false) {
|
||||
return // Already stopped
|
||||
}
|
||||
|
||||
aim.logComponentStop(AudioInputManagerComponent)
|
||||
|
||||
// Note: We don't stop the supervisor here as it may be shared
|
||||
// The supervisor lifecycle is managed by the main process
|
||||
|
||||
aim.logComponentStopped(AudioInputManagerComponent)
|
||||
}
|
||||
|
||||
// resetMetrics resets all metrics to zero
|
||||
func (aim *AudioInputManager) resetMetrics() {
|
||||
aim.BaseAudioManager.resetMetrics()
|
||||
atomic.StoreInt64(&aim.framesSent, 0)
|
||||
}
|
||||
|
||||
// WriteOpusFrame writes an Opus frame to the audio input system with latency tracking
|
||||
func (aim *AudioInputManager) WriteOpusFrame(frame []byte) error {
|
||||
if !aim.IsRunning() {
|
||||
return nil // Not running, silently drop
|
||||
}
|
||||
|
||||
// Check mute state - drop frames if microphone is muted (like audio output)
|
||||
if IsMicrophoneMuted() {
|
||||
return nil // Muted, silently drop
|
||||
}
|
||||
|
||||
// Use ultra-fast validation for critical audio path
|
||||
if err := ValidateAudioFrame(frame); err != nil {
|
||||
aim.logComponentError(AudioInputManagerComponent, err, "Frame validation failed")
|
||||
return fmt.Errorf("input frame validation failed: %w", err)
|
||||
}
|
||||
|
||||
// Get client from supervisor
|
||||
client := aim.getClient()
|
||||
if client == nil {
|
||||
return fmt.Errorf("audio input client not available")
|
||||
}
|
||||
|
||||
// Track end-to-end latency from WebRTC to IPC
|
||||
startTime := time.Now()
|
||||
err := client.SendFrame(frame)
|
||||
processingTime := time.Since(startTime)
|
||||
|
||||
// Log high latency warnings
|
||||
if processingTime > time.Duration(Config.InputProcessingTimeoutMS)*time.Millisecond {
|
||||
latencyMs := float64(processingTime.Milliseconds())
|
||||
aim.logger.Warn().
|
||||
Float64("latency_ms", latencyMs).
|
||||
Msg("High audio processing latency detected")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteOpusFrameZeroCopy writes an Opus frame using zero-copy optimization
|
||||
func (aim *AudioInputManager) WriteOpusFrameZeroCopy(frame *ZeroCopyAudioFrame) error {
|
||||
if !aim.IsRunning() {
|
||||
return nil // Not running, silently drop
|
||||
}
|
||||
|
||||
// Check mute state - drop frames if microphone is muted (like audio output)
|
||||
if IsMicrophoneMuted() {
|
||||
return nil // Muted, silently drop
|
||||
}
|
||||
|
||||
if frame == nil {
|
||||
atomic.AddInt64(&aim.metrics.FramesDropped, 1)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get client from supervisor
|
||||
client := aim.getClient()
|
||||
if client == nil {
|
||||
atomic.AddInt64(&aim.metrics.FramesDropped, 1)
|
||||
return fmt.Errorf("audio input client not available")
|
||||
}
|
||||
|
||||
// Track end-to-end latency from WebRTC to IPC
|
||||
startTime := time.Now()
|
||||
err := client.SendFrameZeroCopy(frame)
|
||||
processingTime := time.Since(startTime)
|
||||
|
||||
// Log high latency warnings
|
||||
if processingTime > time.Duration(Config.InputProcessingTimeoutMS)*time.Millisecond {
|
||||
latencyMs := float64(processingTime.Milliseconds())
|
||||
aim.logger.Warn().
|
||||
Float64("latency_ms", latencyMs).
|
||||
Msg("High audio processing latency detected")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
atomic.AddInt64(&aim.metrics.FramesDropped, 1)
|
||||
return err
|
||||
}
|
||||
|
||||
// Update metrics
|
||||
atomic.AddInt64(&aim.framesSent, 1)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetMetrics returns current metrics
|
||||
func (aim *AudioInputManager) GetMetrics() AudioInputMetrics {
|
||||
return AudioInputMetrics{
|
||||
FramesSent: atomic.LoadInt64(&aim.framesSent),
|
||||
BaseAudioMetrics: aim.getBaseMetrics(),
|
||||
}
|
||||
}
|
||||
|
||||
// GetComprehensiveMetrics returns detailed performance metrics across all components
|
||||
func (aim *AudioInputManager) GetComprehensiveMetrics() map[string]interface{} {
|
||||
// Get base metrics
|
||||
baseMetrics := aim.GetMetrics()
|
||||
|
||||
// Get client stats if available
|
||||
var clientStats map[string]interface{}
|
||||
client := aim.getClient()
|
||||
if client != nil {
|
||||
total, dropped := client.GetFrameStats()
|
||||
clientStats = map[string]interface{}{
|
||||
"frames_sent": total,
|
||||
"frames_dropped": dropped,
|
||||
}
|
||||
} else {
|
||||
clientStats = map[string]interface{}{
|
||||
"frames_sent": 0,
|
||||
"frames_dropped": 0,
|
||||
}
|
||||
}
|
||||
|
||||
comprehensiveMetrics := map[string]interface{}{
|
||||
"manager": map[string]interface{}{
|
||||
"frames_sent": baseMetrics.FramesSent,
|
||||
"frames_dropped": baseMetrics.FramesDropped,
|
||||
"bytes_processed": baseMetrics.BytesProcessed,
|
||||
"average_latency_ms": float64(baseMetrics.AverageLatency.Nanoseconds()) / 1e6,
|
||||
"last_frame_time": baseMetrics.LastFrameTime,
|
||||
"running": aim.IsRunning(),
|
||||
},
|
||||
"client": clientStats,
|
||||
}
|
||||
|
||||
return comprehensiveMetrics
|
||||
}
|
||||
|
||||
// IsRunning returns whether the audio input manager is running
|
||||
// This checks both the internal state and existing system processes
|
||||
func (aim *AudioInputManager) IsRunning() bool {
|
||||
// First check internal state
|
||||
if aim.BaseAudioManager.IsRunning() {
|
||||
return true
|
||||
}
|
||||
|
||||
// If internal state says not running, check supervisor
|
||||
supervisor := GetAudioInputSupervisor()
|
||||
if supervisor != nil {
|
||||
if existingPID, exists := supervisor.HasExistingProcess(); exists {
|
||||
aim.logger.Info().Int("existing_pid", existingPID).Msg("Found existing audio input server process")
|
||||
// Update internal state to reflect reality
|
||||
aim.setRunning(true)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// IsReady returns whether the audio input manager is ready to receive frames
|
||||
// This checks both that it's running and that the IPC connection is established
|
||||
func (aim *AudioInputManager) IsReady() bool {
|
||||
if !aim.IsRunning() {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check if client is connected
|
||||
client := aim.getClient()
|
||||
if client == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return client.IsConnected()
|
||||
}
|
||||
|
|
@ -1,304 +0,0 @@
|
|||
//go:build cgo
|
||||
// +build cgo
|
||||
|
||||
package audio
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
// AudioInputSupervisor manages the audio input server subprocess
|
||||
type AudioInputSupervisor struct {
|
||||
*BaseSupervisor
|
||||
client *AudioInputClient
|
||||
|
||||
// Environment variables for OPUS configuration
|
||||
opusEnv []string
|
||||
}
|
||||
|
||||
// NewAudioInputSupervisor creates a new audio input supervisor
|
||||
func NewAudioInputSupervisor() *AudioInputSupervisor {
|
||||
return &AudioInputSupervisor{
|
||||
BaseSupervisor: NewBaseSupervisor("audio-input-supervisor"),
|
||||
client: NewAudioInputClient(),
|
||||
}
|
||||
}
|
||||
|
||||
// SetOpusConfig sets OPUS configuration parameters as environment variables
|
||||
// for the audio input subprocess
|
||||
func (ais *AudioInputSupervisor) SetOpusConfig(bitrate, complexity, vbr, signalType, bandwidth, dtx int) {
|
||||
ais.mutex.Lock()
|
||||
defer ais.mutex.Unlock()
|
||||
|
||||
// Store OPUS parameters as environment variables for C binary
|
||||
ais.opusEnv = []string{
|
||||
"OPUS_BITRATE=" + strconv.Itoa(bitrate),
|
||||
"OPUS_COMPLEXITY=" + strconv.Itoa(complexity),
|
||||
"OPUS_VBR=" + strconv.Itoa(vbr),
|
||||
"OPUS_SIGNAL_TYPE=" + strconv.Itoa(signalType),
|
||||
"OPUS_BANDWIDTH=" + strconv.Itoa(bandwidth),
|
||||
"OPUS_DTX=" + strconv.Itoa(dtx),
|
||||
"ALSA_PLAYBACK_DEVICE=hw:1,0", // USB Gadget audio playback
|
||||
}
|
||||
}
|
||||
|
||||
// Start begins supervising the audio input server process
|
||||
func (ais *AudioInputSupervisor) Start() error {
|
||||
if !atomic.CompareAndSwapInt32(&ais.running, 0, 1) {
|
||||
return fmt.Errorf("audio input supervisor is already running")
|
||||
}
|
||||
|
||||
ais.logSupervisorStart()
|
||||
ais.createContext()
|
||||
|
||||
// Recreate channels in case they were closed by a previous Stop() call
|
||||
ais.initializeChannels()
|
||||
|
||||
// Start the supervision loop
|
||||
go ais.supervisionLoop()
|
||||
|
||||
ais.logger.Info().Str("component", "audio-input-supervisor").Msg("component started successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
// supervisionLoop is the main supervision loop
|
||||
func (ais *AudioInputSupervisor) supervisionLoop() {
|
||||
// Configure supervision parameters (no restart for input supervisor)
|
||||
config := SupervisionConfig{
|
||||
ProcessType: "audio input server",
|
||||
Timeout: Config.InputSupervisorTimeout,
|
||||
EnableRestart: false, // Input supervisor doesn't restart
|
||||
MaxRestartAttempts: 0,
|
||||
RestartWindow: 0,
|
||||
RestartDelay: 0,
|
||||
MaxRestartDelay: 0,
|
||||
}
|
||||
|
||||
// Configure callbacks (input supervisor doesn't have callbacks currently)
|
||||
callbacks := ProcessCallbacks{
|
||||
OnProcessStart: nil,
|
||||
OnProcessExit: nil,
|
||||
OnRestart: nil,
|
||||
}
|
||||
|
||||
// Use the base supervision loop template
|
||||
ais.SupervisionLoop(
|
||||
config,
|
||||
callbacks,
|
||||
ais.startProcess,
|
||||
func() bool { return false }, // Never restart
|
||||
func() time.Duration { return 0 }, // No restart delay needed
|
||||
)
|
||||
}
|
||||
|
||||
// startProcess starts the audio input server process
|
||||
func (ais *AudioInputSupervisor) startProcess() error {
|
||||
// Use embedded C binary path
|
||||
binaryPath := GetAudioInputBinaryPath()
|
||||
|
||||
ais.mutex.Lock()
|
||||
defer ais.mutex.Unlock()
|
||||
|
||||
// Create new command (no args needed for C binary)
|
||||
ais.cmd = exec.CommandContext(ais.ctx, binaryPath)
|
||||
ais.cmd.Stdout = os.Stdout
|
||||
ais.cmd.Stderr = os.Stderr
|
||||
|
||||
// Set environment variables for OPUS configuration
|
||||
env := append(os.Environ(), ais.opusEnv...)
|
||||
|
||||
// Pass logging environment variables directly to subprocess
|
||||
// The subprocess will inherit all PION_LOG_* variables from os.Environ()
|
||||
// This ensures the audio scope gets the correct trace level
|
||||
|
||||
ais.cmd.Env = env
|
||||
|
||||
// Set process group to allow clean termination
|
||||
ais.cmd.SysProcAttr = &syscall.SysProcAttr{
|
||||
Setpgid: true,
|
||||
}
|
||||
|
||||
// Start the process
|
||||
if err := ais.cmd.Start(); err != nil {
|
||||
return fmt.Errorf("failed to start audio input server process: %w", err)
|
||||
}
|
||||
|
||||
ais.processPID = ais.cmd.Process.Pid
|
||||
ais.logger.Info().Int("pid", ais.processPID).Str("binary", binaryPath).Strs("opus_env", ais.opusEnv).Msg("audio input server process started")
|
||||
|
||||
// Connect client to the server synchronously to avoid race condition
|
||||
ais.connectClient()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop gracefully stops the audio input server and supervisor
|
||||
func (ais *AudioInputSupervisor) Stop() {
|
||||
if !atomic.CompareAndSwapInt32(&ais.running, 1, 0) {
|
||||
return // Already stopped
|
||||
}
|
||||
|
||||
ais.logSupervisorStop()
|
||||
|
||||
// Disconnect client first
|
||||
if ais.client != nil {
|
||||
ais.client.Disconnect()
|
||||
}
|
||||
|
||||
// Signal stop and wait for cleanup
|
||||
ais.closeStopChan()
|
||||
ais.cancelContext()
|
||||
|
||||
// Wait for process to exit
|
||||
select {
|
||||
case <-ais.processDone:
|
||||
ais.logger.Info().Str("component", "audio-input-supervisor").Msg("component stopped gracefully")
|
||||
case <-time.After(Config.InputSupervisorTimeout):
|
||||
ais.logger.Warn().Str("component", "audio-input-supervisor").Msg("component did not stop gracefully, forcing termination")
|
||||
ais.forceKillProcess("audio input server")
|
||||
}
|
||||
|
||||
ais.logger.Info().Str("component", "audio-input-supervisor").Msg("component stopped")
|
||||
}
|
||||
|
||||
// IsConnected returns whether the client is connected to the audio input server
|
||||
func (ais *AudioInputSupervisor) IsConnected() bool {
|
||||
ais.mutex.Lock()
|
||||
defer ais.mutex.Unlock()
|
||||
if !ais.IsRunning() {
|
||||
return false
|
||||
}
|
||||
return ais.client.IsConnected()
|
||||
}
|
||||
|
||||
// GetClient returns the IPC client for sending audio frames
|
||||
func (ais *AudioInputSupervisor) GetClient() *AudioInputClient {
|
||||
return ais.client
|
||||
}
|
||||
|
||||
// connectClient attempts to connect the client to the server
|
||||
func (ais *AudioInputSupervisor) connectClient() {
|
||||
// Wait briefly for the server to start and create socket
|
||||
time.Sleep(Config.DefaultSleepDuration)
|
||||
|
||||
// Additional small delay to ensure socket is ready after restart
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
|
||||
err := ais.client.Connect()
|
||||
if err != nil {
|
||||
ais.logger.Error().Err(err).Msg("Failed to connect to audio input server")
|
||||
return
|
||||
}
|
||||
|
||||
ais.logger.Info().Msg("Connected to audio input server")
|
||||
}
|
||||
|
||||
// SendFrame sends an audio frame to the subprocess (convenience method)
|
||||
func (ais *AudioInputSupervisor) SendFrame(frame []byte) error {
|
||||
if ais.client == nil {
|
||||
return fmt.Errorf("client not initialized")
|
||||
}
|
||||
|
||||
if !ais.client.IsConnected() {
|
||||
return fmt.Errorf("client not connected")
|
||||
}
|
||||
|
||||
return ais.client.SendFrame(frame)
|
||||
}
|
||||
|
||||
// SendFrameZeroCopy sends a zero-copy frame to the subprocess
|
||||
func (ais *AudioInputSupervisor) SendFrameZeroCopy(frame *ZeroCopyAudioFrame) error {
|
||||
if ais.client == nil {
|
||||
return fmt.Errorf("client not initialized")
|
||||
}
|
||||
|
||||
if !ais.client.IsConnected() {
|
||||
return fmt.Errorf("client not connected")
|
||||
}
|
||||
|
||||
return ais.client.SendFrameZeroCopy(frame)
|
||||
}
|
||||
|
||||
// SendConfig sends a configuration update to the subprocess (convenience method)
|
||||
func (ais *AudioInputSupervisor) SendConfig(config UnifiedIPCConfig) error {
|
||||
if ais.client == nil {
|
||||
return fmt.Errorf("client not initialized")
|
||||
}
|
||||
|
||||
if !ais.client.IsConnected() {
|
||||
return fmt.Errorf("client not connected")
|
||||
}
|
||||
|
||||
return ais.client.SendConfig(config)
|
||||
}
|
||||
|
||||
// SendOpusConfig sends a complete Opus encoder configuration to the audio input server
|
||||
func (ais *AudioInputSupervisor) SendOpusConfig(config UnifiedIPCOpusConfig) error {
|
||||
if ais.client == nil {
|
||||
return fmt.Errorf("client not initialized")
|
||||
}
|
||||
|
||||
if !ais.client.IsConnected() {
|
||||
return fmt.Errorf("client not connected")
|
||||
}
|
||||
|
||||
return ais.client.SendOpusConfig(config)
|
||||
}
|
||||
|
||||
// findExistingAudioInputProcess checks if there's already an audio input server process running
|
||||
func (ais *AudioInputSupervisor) findExistingAudioInputProcess() (int, error) {
|
||||
// Look for the C binary name
|
||||
binaryName := "jetkvm_audio_input"
|
||||
|
||||
// Use ps to find processes with C binary name
|
||||
cmd := exec.Command("ps", "aux")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to run ps command: %w", err)
|
||||
}
|
||||
|
||||
// Parse ps output to find audio input server processes
|
||||
lines := strings.Split(string(output), "\n")
|
||||
for _, line := range lines {
|
||||
if strings.Contains(line, binaryName) {
|
||||
// Extract PID from ps output (second column)
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) >= 2 {
|
||||
// PID is the first field
|
||||
if pid, err := strconv.Atoi(fields[0]); err == nil {
|
||||
if ais.isProcessRunning(pid) {
|
||||
return pid, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("no existing audio input server process found")
|
||||
}
|
||||
|
||||
// isProcessRunning checks if a process with the given PID is still running
|
||||
func (ais *AudioInputSupervisor) isProcessRunning(pid int) bool {
|
||||
// Try to send signal 0 to check if process exists
|
||||
process, err := os.FindProcess(pid)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
err = process.Signal(syscall.Signal(0))
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// HasExistingProcess checks if there's already an audio input server process running
|
||||
// This is a public wrapper around findExistingAudioInputProcess for external access
|
||||
func (ais *AudioInputSupervisor) HasExistingProcess() (int, bool) {
|
||||
pid, err := ais.findExistingAudioInputProcess()
|
||||
return pid, err == nil
|
||||
}
|
||||
|
|
@ -0,0 +1,183 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/jetkvm/kvm/internal/logging"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// Buffer pool for zero-allocation writes
|
||||
var writeBufferPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
buf := make([]byte, ipcHeaderSize+ipcMaxFrameSize)
|
||||
return &buf
|
||||
},
|
||||
}
|
||||
|
||||
// IPC Protocol constants (matches C implementation in ipc_protocol.h)
|
||||
const (
|
||||
ipcMagicOutput = 0x4A4B4F55 // "JKOU" - Output (device → browser)
|
||||
ipcMagicInput = 0x4A4B4D49 // "JKMI" - Input (browser → device)
|
||||
ipcHeaderSize = 9 // Reduced from 17 (removed 8-byte timestamp)
|
||||
ipcMaxFrameSize = 1024 // 128kbps @ 20ms = ~600 bytes worst case with VBR+FEC
|
||||
ipcMsgTypeOpus = 0
|
||||
ipcMsgTypeConfig = 1
|
||||
ipcMsgTypeStop = 3
|
||||
connectTimeout = 5 * time.Second
|
||||
readTimeout = 2 * time.Second
|
||||
)
|
||||
|
||||
// IPCClient manages Unix socket communication with audio subprocess
|
||||
type IPCClient struct {
|
||||
socketPath string
|
||||
magicNumber uint32
|
||||
conn net.Conn
|
||||
mu sync.Mutex
|
||||
logger zerolog.Logger
|
||||
readBuf []byte // Reusable buffer for reads (single reader per client)
|
||||
}
|
||||
|
||||
// NewIPCClient creates a new IPC client
|
||||
// For output: socketPath="/var/run/audio_output.sock", magic=ipcMagicOutput
|
||||
// For input: socketPath="/var/run/audio_input.sock", magic=ipcMagicInput
|
||||
func NewIPCClient(name, socketPath string, magicNumber uint32) *IPCClient {
|
||||
logger := logging.GetDefaultLogger().With().Str("component", name+"-ipc").Logger()
|
||||
|
||||
return &IPCClient{
|
||||
socketPath: socketPath,
|
||||
magicNumber: magicNumber,
|
||||
logger: logger,
|
||||
readBuf: make([]byte, ipcMaxFrameSize),
|
||||
}
|
||||
}
|
||||
|
||||
// Connect establishes connection to the subprocess
|
||||
func (c *IPCClient) Connect() error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if c.conn != nil {
|
||||
c.conn.Close()
|
||||
c.conn = nil
|
||||
}
|
||||
|
||||
conn, err := net.DialTimeout("unix", c.socketPath, connectTimeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to connect to %s: %w", c.socketPath, err)
|
||||
}
|
||||
|
||||
c.conn = conn
|
||||
c.logger.Debug().Str("socket", c.socketPath).Msg("connected to subprocess")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Disconnect closes the connection
|
||||
func (c *IPCClient) Disconnect() {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if c.conn != nil {
|
||||
c.conn.Close()
|
||||
c.conn = nil
|
||||
c.logger.Debug().Msg("disconnected from subprocess")
|
||||
}
|
||||
}
|
||||
|
||||
// IsConnected returns true if currently connected
|
||||
func (c *IPCClient) IsConnected() bool {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.conn != nil
|
||||
}
|
||||
|
||||
// ReadMessage reads a complete IPC message (header + payload)
|
||||
// Returns message type, payload data, and error
|
||||
// IMPORTANT: The returned payload slice is only valid until the next ReadMessage call.
|
||||
// Callers must use the data immediately or copy if retention is needed.
|
||||
func (c *IPCClient) ReadMessage() (uint8, []byte, error) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if c.conn == nil {
|
||||
return 0, nil, fmt.Errorf("not connected")
|
||||
}
|
||||
|
||||
// Set read deadline
|
||||
c.conn.SetReadDeadline(time.Now().Add(readTimeout))
|
||||
|
||||
// Read 9-byte header
|
||||
var header [ipcHeaderSize]byte
|
||||
if _, err := io.ReadFull(c.conn, header[:]); err != nil {
|
||||
return 0, nil, fmt.Errorf("failed to read header: %w", err)
|
||||
}
|
||||
|
||||
// Parse header (little-endian)
|
||||
magic := binary.LittleEndian.Uint32(header[0:4])
|
||||
msgType := header[4]
|
||||
length := binary.LittleEndian.Uint32(header[5:9])
|
||||
|
||||
// Validate magic number
|
||||
if magic != c.magicNumber {
|
||||
return 0, nil, fmt.Errorf("invalid magic: got 0x%X, expected 0x%X", magic, c.magicNumber)
|
||||
}
|
||||
|
||||
// Validate length
|
||||
if length > ipcMaxFrameSize {
|
||||
return 0, nil, fmt.Errorf("message too large: %d bytes", length)
|
||||
}
|
||||
|
||||
// Read payload if present
|
||||
if length == 0 {
|
||||
return msgType, nil, nil
|
||||
}
|
||||
|
||||
// Read directly into reusable buffer (zero-allocation)
|
||||
if _, err := io.ReadFull(c.conn, c.readBuf[:length]); err != nil {
|
||||
return 0, nil, fmt.Errorf("failed to read payload: %w", err)
|
||||
}
|
||||
|
||||
// Return slice of readBuf - caller must use immediately, data is only valid until next ReadMessage
|
||||
// This avoids allocation in hot path (50 frames/sec)
|
||||
return msgType, c.readBuf[:length], nil
|
||||
}
|
||||
|
||||
// WriteMessage writes a complete IPC message
|
||||
func (c *IPCClient) WriteMessage(msgType uint8, payload []byte) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if c.conn == nil {
|
||||
return fmt.Errorf("not connected")
|
||||
}
|
||||
|
||||
length := uint32(len(payload))
|
||||
if length > ipcMaxFrameSize {
|
||||
return fmt.Errorf("payload too large: %d bytes", length)
|
||||
}
|
||||
|
||||
// Get buffer from pool for zero-allocation write
|
||||
bufPtr := writeBufferPool.Get().(*[]byte)
|
||||
defer writeBufferPool.Put(bufPtr)
|
||||
buf := *bufPtr
|
||||
|
||||
// Build header in pooled buffer (9 bytes, little-endian)
|
||||
binary.LittleEndian.PutUint32(buf[0:4], c.magicNumber)
|
||||
buf[4] = msgType
|
||||
binary.LittleEndian.PutUint32(buf[5:9], length)
|
||||
|
||||
// Copy payload after header
|
||||
copy(buf[ipcHeaderSize:], payload)
|
||||
|
||||
// Write header + payload atomically
|
||||
if _, err := c.conn.Write(buf[:ipcHeaderSize+length]); err != nil {
|
||||
return fmt.Errorf("failed to write message: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,257 +0,0 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Common IPC message interface
|
||||
type IPCMessage interface {
|
||||
GetMagic() uint32
|
||||
GetType() uint8
|
||||
GetLength() uint32
|
||||
GetTimestamp() int64
|
||||
GetData() []byte
|
||||
}
|
||||
|
||||
// Common optimized message structure
|
||||
type OptimizedMessage struct {
|
||||
header [17]byte // Pre-allocated header buffer
|
||||
data []byte // Reusable data buffer
|
||||
}
|
||||
|
||||
// Generic message pool for both input and output
|
||||
type GenericMessagePool struct {
|
||||
// 64-bit fields must be first for proper alignment on ARM
|
||||
hitCount int64 // Pool hit counter (atomic)
|
||||
missCount int64 // Pool miss counter (atomic)
|
||||
|
||||
pool chan *OptimizedMessage
|
||||
preallocated []*OptimizedMessage // Pre-allocated messages
|
||||
preallocSize int
|
||||
maxPoolSize int
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
// NewGenericMessagePool creates a new generic message pool
|
||||
func NewGenericMessagePool(size int) *GenericMessagePool {
|
||||
pool := &GenericMessagePool{
|
||||
pool: make(chan *OptimizedMessage, size),
|
||||
preallocSize: size / 4, // 25% pre-allocated for immediate use
|
||||
maxPoolSize: size,
|
||||
}
|
||||
|
||||
// Pre-allocate some messages for immediate use
|
||||
pool.preallocated = make([]*OptimizedMessage, pool.preallocSize)
|
||||
for i := 0; i < pool.preallocSize; i++ {
|
||||
pool.preallocated[i] = &OptimizedMessage{
|
||||
data: make([]byte, 0, Config.MaxFrameSize),
|
||||
}
|
||||
}
|
||||
|
||||
// Fill the channel pool
|
||||
for i := 0; i < size-pool.preallocSize; i++ {
|
||||
select {
|
||||
case pool.pool <- &OptimizedMessage{
|
||||
data: make([]byte, 0, Config.MaxFrameSize),
|
||||
}:
|
||||
default:
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return pool
|
||||
}
|
||||
|
||||
// Get retrieves an optimized message from the pool
|
||||
func (mp *GenericMessagePool) Get() *OptimizedMessage {
|
||||
// Try pre-allocated first (fastest path)
|
||||
mp.mutex.Lock()
|
||||
if len(mp.preallocated) > 0 {
|
||||
msg := mp.preallocated[len(mp.preallocated)-1]
|
||||
mp.preallocated = mp.preallocated[:len(mp.preallocated)-1]
|
||||
mp.mutex.Unlock()
|
||||
atomic.AddInt64(&mp.hitCount, 1)
|
||||
return msg
|
||||
}
|
||||
mp.mutex.Unlock()
|
||||
|
||||
// Try channel pool
|
||||
select {
|
||||
case msg := <-mp.pool:
|
||||
atomic.AddInt64(&mp.hitCount, 1)
|
||||
return msg
|
||||
default:
|
||||
// Pool empty, create new message
|
||||
atomic.AddInt64(&mp.missCount, 1)
|
||||
return &OptimizedMessage{
|
||||
data: make([]byte, 0, Config.MaxFrameSize),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Put returns an optimized message to the pool
|
||||
func (mp *GenericMessagePool) Put(msg *OptimizedMessage) {
|
||||
if msg == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Reset the message for reuse
|
||||
msg.data = msg.data[:0]
|
||||
|
||||
// Try to return to pre-allocated slice first
|
||||
mp.mutex.Lock()
|
||||
if len(mp.preallocated) < mp.preallocSize {
|
||||
mp.preallocated = append(mp.preallocated, msg)
|
||||
mp.mutex.Unlock()
|
||||
return
|
||||
}
|
||||
mp.mutex.Unlock()
|
||||
|
||||
// Try to return to channel pool
|
||||
select {
|
||||
case mp.pool <- msg:
|
||||
// Successfully returned to pool
|
||||
default:
|
||||
// Pool full, let GC handle it
|
||||
}
|
||||
}
|
||||
|
||||
// GetStats returns pool statistics
|
||||
func (mp *GenericMessagePool) GetStats() (hitCount, missCount int64, hitRate float64) {
|
||||
hits := atomic.LoadInt64(&mp.hitCount)
|
||||
misses := atomic.LoadInt64(&mp.missCount)
|
||||
total := hits + misses
|
||||
if total > 0 {
|
||||
hitRate = float64(hits) / float64(total) * 100
|
||||
}
|
||||
return hits, misses, hitRate
|
||||
}
|
||||
|
||||
// Helper functions
|
||||
|
||||
// EncodeMessageHeader encodes a message header into a provided byte slice
|
||||
func EncodeMessageHeader(header []byte, magic uint32, msgType uint8, length uint32, timestamp int64) {
|
||||
binary.LittleEndian.PutUint32(header[0:4], magic)
|
||||
header[4] = msgType
|
||||
binary.LittleEndian.PutUint32(header[5:9], length)
|
||||
binary.LittleEndian.PutUint64(header[9:17], uint64(timestamp))
|
||||
}
|
||||
|
||||
// EncodeAudioConfig encodes basic audio configuration to binary format
|
||||
func EncodeAudioConfig(sampleRate, channels, frameSize int) []byte {
|
||||
data := make([]byte, 12) // 3 * int32
|
||||
binary.LittleEndian.PutUint32(data[0:4], uint32(sampleRate))
|
||||
binary.LittleEndian.PutUint32(data[4:8], uint32(channels))
|
||||
binary.LittleEndian.PutUint32(data[8:12], uint32(frameSize))
|
||||
return data
|
||||
}
|
||||
|
||||
// EncodeOpusConfig encodes complete Opus configuration to binary format
|
||||
func EncodeOpusConfig(sampleRate, channels, frameSize, bitrate, complexity, vbr, signalType, bandwidth, dtx int) []byte {
|
||||
data := make([]byte, 36) // 9 * int32
|
||||
binary.LittleEndian.PutUint32(data[0:4], uint32(sampleRate))
|
||||
binary.LittleEndian.PutUint32(data[4:8], uint32(channels))
|
||||
binary.LittleEndian.PutUint32(data[8:12], uint32(frameSize))
|
||||
binary.LittleEndian.PutUint32(data[12:16], uint32(bitrate))
|
||||
binary.LittleEndian.PutUint32(data[16:20], uint32(complexity))
|
||||
binary.LittleEndian.PutUint32(data[20:24], uint32(vbr))
|
||||
binary.LittleEndian.PutUint32(data[24:28], uint32(signalType))
|
||||
binary.LittleEndian.PutUint32(data[28:32], uint32(bandwidth))
|
||||
binary.LittleEndian.PutUint32(data[32:36], uint32(dtx))
|
||||
return data
|
||||
}
|
||||
|
||||
// Common write message function
|
||||
func WriteIPCMessage(conn net.Conn, msg IPCMessage, pool *GenericMessagePool, droppedFramesCounter *int64) error {
|
||||
if conn == nil {
|
||||
return fmt.Errorf("connection is nil")
|
||||
}
|
||||
|
||||
// Get optimized message from pool for header preparation
|
||||
optMsg := pool.Get()
|
||||
defer pool.Put(optMsg)
|
||||
|
||||
// Prepare header in pre-allocated buffer
|
||||
EncodeMessageHeader(optMsg.header[:], msg.GetMagic(), msg.GetType(), msg.GetLength(), msg.GetTimestamp())
|
||||
|
||||
// Set write deadline for timeout handling (more efficient than goroutines)
|
||||
if deadline := time.Now().Add(Config.WriteTimeout); deadline.After(time.Now()) {
|
||||
if err := conn.SetWriteDeadline(deadline); err != nil {
|
||||
// If we can't set deadline, proceed without it
|
||||
_ = err // Explicitly ignore error for linter
|
||||
}
|
||||
}
|
||||
|
||||
// Write header using pre-allocated buffer (synchronous for better performance)
|
||||
_, err := conn.Write(optMsg.header[:])
|
||||
if err != nil {
|
||||
if droppedFramesCounter != nil {
|
||||
atomic.AddInt64(droppedFramesCounter, 1)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Write data if present
|
||||
if msg.GetLength() > 0 && msg.GetData() != nil {
|
||||
_, err = conn.Write(msg.GetData())
|
||||
if err != nil {
|
||||
if droppedFramesCounter != nil {
|
||||
atomic.AddInt64(droppedFramesCounter, 1)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Clear write deadline after successful write
|
||||
_ = conn.SetWriteDeadline(time.Time{}) // Ignore error as this is cleanup
|
||||
return nil
|
||||
}
|
||||
|
||||
// Common connection acceptance with retry logic
|
||||
func AcceptConnectionWithRetry(listener net.Listener, maxRetries int, retryDelay time.Duration) (net.Conn, error) {
|
||||
var lastErr error
|
||||
for i := 0; i < maxRetries; i++ {
|
||||
conn, err := listener.Accept()
|
||||
if err == nil {
|
||||
return conn, nil
|
||||
}
|
||||
lastErr = err
|
||||
if i < maxRetries-1 {
|
||||
time.Sleep(retryDelay)
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("failed to accept connection after %d retries: %w", maxRetries, lastErr)
|
||||
}
|
||||
|
||||
// Common frame statistics structure
|
||||
type FrameStats struct {
|
||||
Total int64
|
||||
Dropped int64
|
||||
}
|
||||
|
||||
// GetFrameStats safely retrieves frame statistics
|
||||
func GetFrameStats(totalCounter, droppedCounter *int64) FrameStats {
|
||||
return FrameStats{
|
||||
Total: atomic.LoadInt64(totalCounter),
|
||||
Dropped: atomic.LoadInt64(droppedCounter),
|
||||
}
|
||||
}
|
||||
|
||||
// CalculateDropRate calculates the drop rate percentage
|
||||
func CalculateDropRate(stats FrameStats) float64 {
|
||||
if stats.Total == 0 {
|
||||
return 0.0
|
||||
}
|
||||
return float64(stats.Dropped) / float64(stats.Total) * 100.0
|
||||
}
|
||||
|
||||
// ResetFrameStats resets frame counters
|
||||
func ResetFrameStats(totalCounter, droppedCounter *int64) {
|
||||
atomic.StoreInt64(totalCounter, 0)
|
||||
atomic.StoreInt64(droppedCounter, 0)
|
||||
}
|
||||
|
|
@ -1,285 +0,0 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/jetkvm/kvm/internal/logging"
|
||||
)
|
||||
|
||||
// Component name constant for logging
|
||||
const (
|
||||
AudioInputClientComponent = "audio-input-client"
|
||||
)
|
||||
|
||||
// Constants are now defined in unified_ipc.go
|
||||
var (
|
||||
maxFrameSize = Config.MaxFrameSize // Maximum Opus frame size
|
||||
messagePoolSize = Config.MessagePoolSize // Pre-allocated message pool size
|
||||
)
|
||||
|
||||
|
||||
// AudioInputClient handles IPC communication from the main process
|
||||
type AudioInputClient struct {
|
||||
// Atomic fields MUST be first for ARM32 alignment (int64 fields need 8-byte alignment)
|
||||
droppedFrames int64 // Atomic counter for dropped frames
|
||||
totalFrames int64 // Atomic counter for total frames
|
||||
|
||||
conn net.Conn
|
||||
mtx sync.Mutex
|
||||
running bool
|
||||
}
|
||||
|
||||
// NewAudioInputClient creates a new audio input client
|
||||
func NewAudioInputClient() *AudioInputClient {
|
||||
return &AudioInputClient{}
|
||||
}
|
||||
|
||||
// Connect connects to the audio input server
|
||||
func (aic *AudioInputClient) Connect() error {
|
||||
aic.mtx.Lock()
|
||||
defer aic.mtx.Unlock()
|
||||
|
||||
if aic.running {
|
||||
return nil // Already connected
|
||||
}
|
||||
|
||||
// Ensure clean state before connecting
|
||||
if aic.conn != nil {
|
||||
aic.conn.Close()
|
||||
aic.conn = nil
|
||||
}
|
||||
|
||||
socketPath := getInputSocketPath()
|
||||
// Try connecting multiple times as the server might not be ready
|
||||
// Reduced retry count and delay for faster startup
|
||||
for i := 0; i < 10; i++ {
|
||||
conn, err := net.Dial("unix", socketPath)
|
||||
if err == nil {
|
||||
aic.conn = conn
|
||||
aic.running = true
|
||||
// Reset frame counters on successful connection
|
||||
atomic.StoreInt64(&aic.totalFrames, 0)
|
||||
atomic.StoreInt64(&aic.droppedFrames, 0)
|
||||
return nil
|
||||
}
|
||||
// Exponential backoff starting from config
|
||||
backoffStart := Config.BackoffStart
|
||||
delay := time.Duration(backoffStart.Nanoseconds()*(1<<uint(i/3))) * time.Nanosecond
|
||||
maxDelay := Config.MaxRetryDelay
|
||||
if delay > maxDelay {
|
||||
delay = maxDelay
|
||||
}
|
||||
time.Sleep(delay)
|
||||
}
|
||||
|
||||
// Ensure clean state on connection failure
|
||||
aic.conn = nil
|
||||
aic.running = false
|
||||
return fmt.Errorf("failed to connect to audio input server after 10 attempts")
|
||||
}
|
||||
|
||||
// Disconnect disconnects from the audio input server
|
||||
func (aic *AudioInputClient) Disconnect() {
|
||||
aic.mtx.Lock()
|
||||
defer aic.mtx.Unlock()
|
||||
|
||||
if !aic.running {
|
||||
return
|
||||
}
|
||||
|
||||
aic.running = false
|
||||
|
||||
if aic.conn != nil {
|
||||
// Send stop message
|
||||
msg := &UnifiedIPCMessage{
|
||||
Magic: inputMagicNumber,
|
||||
Type: MessageTypeStop,
|
||||
Length: 0,
|
||||
Timestamp: time.Now().UnixNano(),
|
||||
}
|
||||
_ = aic.writeMessage(msg) // Ignore errors during shutdown
|
||||
|
||||
aic.conn.Close()
|
||||
aic.conn = nil
|
||||
}
|
||||
}
|
||||
|
||||
// SendFrame sends an Opus frame to the audio input server
|
||||
func (aic *AudioInputClient) SendFrame(frame []byte) error {
|
||||
// Fast path validation
|
||||
if len(frame) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
aic.mtx.Lock()
|
||||
if !aic.running || aic.conn == nil {
|
||||
aic.mtx.Unlock()
|
||||
return fmt.Errorf("not connected")
|
||||
}
|
||||
|
||||
// Direct message creation without timestamp overhead
|
||||
msg := &UnifiedIPCMessage{
|
||||
Magic: inputMagicNumber,
|
||||
Type: MessageTypeOpusFrame,
|
||||
Length: uint32(len(frame)),
|
||||
Data: frame,
|
||||
}
|
||||
|
||||
err := aic.writeMessage(msg)
|
||||
aic.mtx.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
// SendFrameZeroCopy sends a zero-copy Opus frame to the audio input server
|
||||
func (aic *AudioInputClient) SendFrameZeroCopy(frame *ZeroCopyAudioFrame) error {
|
||||
aic.mtx.Lock()
|
||||
defer aic.mtx.Unlock()
|
||||
|
||||
if !aic.running || aic.conn == nil {
|
||||
return fmt.Errorf("not connected to audio input server")
|
||||
}
|
||||
|
||||
if frame == nil {
|
||||
return nil // Nil frame, ignore
|
||||
}
|
||||
|
||||
frameLen := frame.Length()
|
||||
if frameLen == 0 {
|
||||
return nil // Empty frame, ignore
|
||||
}
|
||||
|
||||
// Inline frame validation to reduce function call overhead
|
||||
if frameLen > maxFrameSize {
|
||||
return ErrFrameDataTooLarge
|
||||
}
|
||||
|
||||
// Use zero-copy data directly
|
||||
msg := &UnifiedIPCMessage{
|
||||
Magic: inputMagicNumber,
|
||||
Type: MessageTypeOpusFrame,
|
||||
Length: uint32(frameLen),
|
||||
Timestamp: time.Now().UnixNano(),
|
||||
Data: frame.Data(), // Zero-copy data access
|
||||
}
|
||||
|
||||
return aic.writeMessage(msg)
|
||||
}
|
||||
|
||||
// SendConfig sends a configuration update to the audio input server
|
||||
func (aic *AudioInputClient) SendConfig(config UnifiedIPCConfig) error {
|
||||
aic.mtx.Lock()
|
||||
defer aic.mtx.Unlock()
|
||||
|
||||
if !aic.running || aic.conn == nil {
|
||||
return fmt.Errorf("not connected to audio input server")
|
||||
}
|
||||
|
||||
// Validate configuration parameters
|
||||
if err := ValidateInputIPCConfig(config.SampleRate, config.Channels, config.FrameSize); err != nil {
|
||||
logger := logging.GetDefaultLogger().With().Str("component", AudioInputClientComponent).Logger()
|
||||
logger.Error().Err(err).Msg("Configuration validation failed")
|
||||
return fmt.Errorf("input configuration validation failed: %w", err)
|
||||
}
|
||||
|
||||
// Serialize config using common function
|
||||
data := EncodeAudioConfig(config.SampleRate, config.Channels, config.FrameSize)
|
||||
|
||||
msg := &UnifiedIPCMessage{
|
||||
Magic: inputMagicNumber,
|
||||
Type: MessageTypeConfig,
|
||||
Length: uint32(len(data)),
|
||||
Timestamp: time.Now().UnixNano(),
|
||||
Data: data,
|
||||
}
|
||||
|
||||
return aic.writeMessage(msg)
|
||||
}
|
||||
|
||||
// SendOpusConfig sends a complete Opus encoder configuration update to the audio input server
|
||||
func (aic *AudioInputClient) SendOpusConfig(config UnifiedIPCOpusConfig) error {
|
||||
aic.mtx.Lock()
|
||||
defer aic.mtx.Unlock()
|
||||
|
||||
if !aic.running || aic.conn == nil {
|
||||
return fmt.Errorf("not connected to audio input server")
|
||||
}
|
||||
|
||||
// Validate configuration parameters
|
||||
if config.SampleRate <= 0 || config.Channels <= 0 || config.FrameSize <= 0 || config.Bitrate <= 0 {
|
||||
return fmt.Errorf("invalid Opus configuration: SampleRate=%d, Channels=%d, FrameSize=%d, Bitrate=%d",
|
||||
config.SampleRate, config.Channels, config.FrameSize, config.Bitrate)
|
||||
}
|
||||
|
||||
// Serialize Opus configuration using common function
|
||||
data := EncodeOpusConfig(config.SampleRate, config.Channels, config.FrameSize, config.Bitrate, config.Complexity, config.VBR, config.SignalType, config.Bandwidth, config.DTX)
|
||||
|
||||
msg := &UnifiedIPCMessage{
|
||||
Magic: inputMagicNumber,
|
||||
Type: MessageTypeOpusConfig,
|
||||
Length: uint32(len(data)),
|
||||
Timestamp: time.Now().UnixNano(),
|
||||
Data: data,
|
||||
}
|
||||
|
||||
return aic.writeMessage(msg)
|
||||
}
|
||||
|
||||
// SendHeartbeat sends a heartbeat message
|
||||
func (aic *AudioInputClient) SendHeartbeat() error {
|
||||
aic.mtx.Lock()
|
||||
defer aic.mtx.Unlock()
|
||||
|
||||
if !aic.running || aic.conn == nil {
|
||||
return fmt.Errorf("not connected to audio input server")
|
||||
}
|
||||
|
||||
msg := &UnifiedIPCMessage{
|
||||
Magic: inputMagicNumber,
|
||||
Type: MessageTypeHeartbeat,
|
||||
Length: 0,
|
||||
Timestamp: time.Now().UnixNano(),
|
||||
}
|
||||
|
||||
return aic.writeMessage(msg)
|
||||
}
|
||||
|
||||
// writeMessage writes a message to the server
|
||||
// Global shared message pool for input IPC clients
|
||||
var globalInputMessagePool = NewGenericMessagePool(messagePoolSize)
|
||||
|
||||
func (aic *AudioInputClient) writeMessage(msg *UnifiedIPCMessage) error {
|
||||
// Increment total frames counter
|
||||
atomic.AddInt64(&aic.totalFrames, 1)
|
||||
|
||||
// Use shared WriteIPCMessage function with global message pool
|
||||
return WriteIPCMessage(aic.conn, msg, globalInputMessagePool, &aic.droppedFrames)
|
||||
}
|
||||
|
||||
// IsConnected returns whether the client is connected
|
||||
func (aic *AudioInputClient) IsConnected() bool {
|
||||
aic.mtx.Lock()
|
||||
defer aic.mtx.Unlock()
|
||||
return aic.running && aic.conn != nil
|
||||
}
|
||||
|
||||
// GetFrameStats returns frame statistics
|
||||
func (aic *AudioInputClient) GetFrameStats() (total, dropped int64) {
|
||||
stats := GetFrameStats(&aic.totalFrames, &aic.droppedFrames)
|
||||
return stats.Total, stats.Dropped
|
||||
}
|
||||
|
||||
// GetDropRate returns the current frame drop rate as a percentage
|
||||
func (aic *AudioInputClient) GetDropRate() float64 {
|
||||
stats := GetFrameStats(&aic.totalFrames, &aic.droppedFrames)
|
||||
return CalculateDropRate(stats)
|
||||
}
|
||||
|
||||
// ResetStats resets frame statistics
|
||||
func (aic *AudioInputClient) ResetStats() {
|
||||
ResetFrameStats(&aic.totalFrames, &aic.droppedFrames)
|
||||
}
|
||||
|
||||
|
|
@ -1,213 +0,0 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/jetkvm/kvm/internal/logging"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// Global shared message pool for output IPC client header reading
|
||||
var globalOutputClientMessagePool = NewGenericMessagePool(Config.OutputMessagePoolSize)
|
||||
|
||||
// AudioOutputClient provides audio output IPC client functionality
|
||||
type AudioOutputClient struct {
|
||||
droppedFrames int64
|
||||
totalFrames int64
|
||||
|
||||
conn net.Conn
|
||||
mtx sync.Mutex
|
||||
running bool
|
||||
logger zerolog.Logger
|
||||
socketPath string
|
||||
magicNumber uint32
|
||||
bufferPool *AudioBufferPool
|
||||
|
||||
autoReconnect bool
|
||||
}
|
||||
|
||||
func NewAudioOutputClient() *AudioOutputClient {
|
||||
socketPath := getOutputSocketPath()
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "audio-output-client").Logger()
|
||||
|
||||
return &AudioOutputClient{
|
||||
socketPath: socketPath,
|
||||
magicNumber: Config.OutputMagicNumber,
|
||||
logger: logger,
|
||||
bufferPool: NewAudioBufferPool(Config.MaxFrameSize),
|
||||
autoReconnect: true,
|
||||
}
|
||||
}
|
||||
|
||||
// Connect connects to the audio output server
|
||||
func (c *AudioOutputClient) Connect() error {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
|
||||
if c.running {
|
||||
return fmt.Errorf("audio output client is already connected")
|
||||
}
|
||||
|
||||
conn, err := net.Dial("unix", c.socketPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to connect to audio output server: %w", err)
|
||||
}
|
||||
|
||||
c.conn = conn
|
||||
c.running = true
|
||||
c.logger.Info().Str("socket_path", c.socketPath).Msg("Connected to audio output server")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Disconnect disconnects from the audio output server
|
||||
func (c *AudioOutputClient) Disconnect() {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
|
||||
if !c.running {
|
||||
return
|
||||
}
|
||||
|
||||
c.running = false
|
||||
|
||||
if c.conn != nil {
|
||||
c.conn.Close()
|
||||
c.conn = nil
|
||||
}
|
||||
|
||||
c.logger.Info().Msg("Disconnected from audio output server")
|
||||
}
|
||||
|
||||
// IsConnected returns whether the client is connected
|
||||
func (c *AudioOutputClient) IsConnected() bool {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
return c.running && c.conn != nil
|
||||
}
|
||||
|
||||
func (c *AudioOutputClient) ReceiveFrame() ([]byte, error) {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
|
||||
if !c.running || c.conn == nil {
|
||||
return nil, fmt.Errorf("not connected to audio output server")
|
||||
}
|
||||
|
||||
// Get optimized message from pool for header reading
|
||||
optMsg := globalOutputClientMessagePool.Get()
|
||||
defer globalOutputClientMessagePool.Put(optMsg)
|
||||
|
||||
// Read header
|
||||
if _, err := io.ReadFull(c.conn, optMsg.header[:]); err != nil {
|
||||
return nil, fmt.Errorf("failed to read IPC message header from audio output server: %w", err)
|
||||
}
|
||||
|
||||
// Parse header
|
||||
magic := binary.LittleEndian.Uint32(optMsg.header[0:4])
|
||||
if magic != outputMagicNumber {
|
||||
return nil, fmt.Errorf("invalid magic number in IPC message: got 0x%x, expected 0x%x", magic, outputMagicNumber)
|
||||
}
|
||||
|
||||
msgType := UnifiedMessageType(optMsg.header[4])
|
||||
if msgType != MessageTypeOpusFrame {
|
||||
return nil, fmt.Errorf("unexpected message type: %d", msgType)
|
||||
}
|
||||
|
||||
size := binary.LittleEndian.Uint32(optMsg.header[5:9])
|
||||
timestamp := int64(binary.LittleEndian.Uint64(optMsg.header[9:17]))
|
||||
maxFrameSize := Config.OutputMaxFrameSize
|
||||
if int(size) > maxFrameSize {
|
||||
return nil, fmt.Errorf("received frame size validation failed: got %d bytes, maximum allowed %d bytes", size, maxFrameSize)
|
||||
}
|
||||
|
||||
// Read frame data using buffer pool to avoid allocation
|
||||
frame := c.bufferPool.Get()
|
||||
frame = frame[:size] // Resize to actual frame size
|
||||
if size > 0 {
|
||||
if _, err := io.ReadFull(c.conn, frame); err != nil {
|
||||
c.bufferPool.Put(frame) // Return buffer on error
|
||||
return nil, fmt.Errorf("failed to read frame data: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Note: Caller is responsible for returning frame to pool via PutAudioFrameBuffer()
|
||||
|
||||
atomic.AddInt64(&c.totalFrames, 1)
|
||||
|
||||
// Zero-cost trace logging for frame reception
|
||||
if c.logger.GetLevel() <= zerolog.TraceLevel {
|
||||
totalFrames := atomic.LoadInt64(&c.totalFrames)
|
||||
if totalFrames <= 5 || totalFrames%1000 == 1 {
|
||||
c.logger.Trace().
|
||||
Int("frame_size", int(size)).
|
||||
Int64("timestamp", timestamp).
|
||||
Int64("total_frames_received", totalFrames).
|
||||
Msg("Received audio frame from output server")
|
||||
}
|
||||
}
|
||||
|
||||
return frame, nil
|
||||
}
|
||||
|
||||
// SendOpusConfig sends Opus configuration to the audio output server
|
||||
func (c *AudioOutputClient) SendOpusConfig(config UnifiedIPCOpusConfig) error {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
|
||||
if !c.running || c.conn == nil {
|
||||
return fmt.Errorf("not connected to audio output server")
|
||||
}
|
||||
|
||||
// Validate configuration parameters
|
||||
if config.SampleRate <= 0 || config.Channels <= 0 || config.FrameSize <= 0 || config.Bitrate <= 0 {
|
||||
return fmt.Errorf("invalid Opus configuration: SampleRate=%d, Channels=%d, FrameSize=%d, Bitrate=%d",
|
||||
config.SampleRate, config.Channels, config.FrameSize, config.Bitrate)
|
||||
}
|
||||
|
||||
// Serialize Opus configuration using common function
|
||||
data := EncodeOpusConfig(config.SampleRate, config.Channels, config.FrameSize, config.Bitrate, config.Complexity, config.VBR, config.SignalType, config.Bandwidth, config.DTX)
|
||||
|
||||
msg := &UnifiedIPCMessage{
|
||||
Magic: c.magicNumber,
|
||||
Type: MessageTypeOpusConfig,
|
||||
Length: uint32(len(data)),
|
||||
Timestamp: time.Now().UnixNano(),
|
||||
Data: data,
|
||||
}
|
||||
|
||||
return c.writeMessage(msg)
|
||||
}
|
||||
|
||||
// writeMessage writes a message to the connection
|
||||
func (c *AudioOutputClient) writeMessage(msg *UnifiedIPCMessage) error {
|
||||
header := make([]byte, 17)
|
||||
EncodeMessageHeader(header, msg.Magic, uint8(msg.Type), msg.Length, msg.Timestamp)
|
||||
|
||||
if _, err := c.conn.Write(header); err != nil {
|
||||
return fmt.Errorf("failed to write header: %w", err)
|
||||
}
|
||||
|
||||
if msg.Length > 0 && msg.Data != nil {
|
||||
if _, err := c.conn.Write(msg.Data); err != nil {
|
||||
return fmt.Errorf("failed to write data: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
atomic.AddInt64(&c.totalFrames, 1)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetClientStats returns client performance statistics
|
||||
func (c *AudioOutputClient) GetClientStats() (total, dropped int64) {
|
||||
stats := GetFrameStats(&c.totalFrames, &c.droppedFrames)
|
||||
return stats.Total, stats.Dropped
|
||||
}
|
||||
|
||||
// Helper functions
|
||||
// getOutputSocketPath is defined in ipc_unified.go
|
||||
|
|
@ -1,681 +0,0 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/jetkvm/kvm/internal/logging"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// Unified IPC constants
|
||||
var (
|
||||
outputMagicNumber uint32 = Config.OutputMagicNumber // "JKOU" (JetKVM Output)
|
||||
inputMagicNumber uint32 = Config.InputMagicNumber // "JKMI" (JetKVM Microphone Input)
|
||||
outputSocketName = "audio_output.sock"
|
||||
inputSocketName = "audio_input.sock"
|
||||
headerSize = 17 // Fixed header size: 4+1+4+8 bytes
|
||||
)
|
||||
|
||||
// Header buffer pool to reduce allocation overhead
|
||||
var headerBufferPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
buf := make([]byte, headerSize)
|
||||
return &buf
|
||||
},
|
||||
}
|
||||
|
||||
// UnifiedMessageType represents the type of IPC message for both input and output
|
||||
type UnifiedMessageType uint8
|
||||
|
||||
const (
|
||||
MessageTypeOpusFrame UnifiedMessageType = iota
|
||||
MessageTypeConfig
|
||||
MessageTypeOpusConfig
|
||||
MessageTypeStop
|
||||
MessageTypeHeartbeat
|
||||
MessageTypeAck
|
||||
)
|
||||
|
||||
// UnifiedIPCMessage represents a message sent over IPC for both input and output
|
||||
type UnifiedIPCMessage struct {
|
||||
Magic uint32
|
||||
Type UnifiedMessageType
|
||||
Length uint32
|
||||
Timestamp int64
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// Implement IPCMessage interface
|
||||
func (msg *UnifiedIPCMessage) GetMagic() uint32 {
|
||||
return msg.Magic
|
||||
}
|
||||
|
||||
func (msg *UnifiedIPCMessage) GetType() uint8 {
|
||||
return uint8(msg.Type)
|
||||
}
|
||||
|
||||
func (msg *UnifiedIPCMessage) GetLength() uint32 {
|
||||
return msg.Length
|
||||
}
|
||||
|
||||
func (msg *UnifiedIPCMessage) GetTimestamp() int64 {
|
||||
return msg.Timestamp
|
||||
}
|
||||
|
||||
func (msg *UnifiedIPCMessage) GetData() []byte {
|
||||
return msg.Data
|
||||
}
|
||||
|
||||
// UnifiedIPCConfig represents configuration for audio
|
||||
type UnifiedIPCConfig struct {
|
||||
SampleRate int
|
||||
Channels int
|
||||
FrameSize int
|
||||
}
|
||||
|
||||
// UnifiedIPCOpusConfig represents Opus-specific configuration
|
||||
type UnifiedIPCOpusConfig struct {
|
||||
SampleRate int
|
||||
Channels int
|
||||
FrameSize int
|
||||
Bitrate int
|
||||
Complexity int
|
||||
VBR int
|
||||
SignalType int
|
||||
Bandwidth int
|
||||
DTX int
|
||||
}
|
||||
|
||||
// UnifiedAudioServer provides common functionality for both input and output servers
|
||||
type UnifiedAudioServer struct {
|
||||
// Atomic counters for performance monitoring
|
||||
droppedFrames int64 // Dropped frames counter (atomic)
|
||||
totalFrames int64 // Total frames counter (atomic)
|
||||
|
||||
listener net.Listener
|
||||
conn net.Conn
|
||||
mtx sync.Mutex
|
||||
running bool
|
||||
logger zerolog.Logger
|
||||
|
||||
// Message channels
|
||||
messageChan chan *UnifiedIPCMessage // Buffered channel for incoming messages
|
||||
processChan chan *UnifiedIPCMessage // Buffered channel for processing queue
|
||||
wg sync.WaitGroup // Wait group for goroutine coordination
|
||||
|
||||
// Configuration
|
||||
socketPath string
|
||||
magicNumber uint32
|
||||
sendBufferSize int
|
||||
recvBufferSize int
|
||||
}
|
||||
|
||||
// NewUnifiedAudioServer creates a new unified audio server
|
||||
func NewUnifiedAudioServer(isInput bool) (*UnifiedAudioServer, error) {
|
||||
var socketPath string
|
||||
var magicNumber uint32
|
||||
var componentName string
|
||||
|
||||
if isInput {
|
||||
socketPath = getInputSocketPath()
|
||||
magicNumber = inputMagicNumber
|
||||
componentName = "audio-input-server"
|
||||
} else {
|
||||
socketPath = getOutputSocketPath()
|
||||
magicNumber = outputMagicNumber
|
||||
componentName = "audio-output-server"
|
||||
}
|
||||
|
||||
logger := logging.GetDefaultLogger().With().Str("component", componentName).Logger()
|
||||
|
||||
server := &UnifiedAudioServer{
|
||||
logger: logger,
|
||||
socketPath: socketPath,
|
||||
magicNumber: magicNumber,
|
||||
messageChan: make(chan *UnifiedIPCMessage, Config.ChannelBufferSize),
|
||||
processChan: make(chan *UnifiedIPCMessage, Config.ChannelBufferSize),
|
||||
sendBufferSize: Config.SocketOptimalBuffer,
|
||||
recvBufferSize: Config.SocketOptimalBuffer,
|
||||
}
|
||||
|
||||
return server, nil
|
||||
}
|
||||
|
||||
// Start starts the unified audio server
|
||||
func (s *UnifiedAudioServer) Start() error {
|
||||
s.mtx.Lock()
|
||||
defer s.mtx.Unlock()
|
||||
|
||||
if s.running {
|
||||
return fmt.Errorf("server already running")
|
||||
}
|
||||
|
||||
// Remove existing socket file with retry logic
|
||||
for i := 0; i < 3; i++ {
|
||||
if err := os.Remove(s.socketPath); err != nil && !os.IsNotExist(err) {
|
||||
s.logger.Warn().Err(err).Int("attempt", i+1).Msg("failed to remove existing socket file, retrying")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// Create listener with retry on address already in use
|
||||
var listener net.Listener
|
||||
var err error
|
||||
for i := 0; i < 3; i++ {
|
||||
listener, err = net.Listen("unix", s.socketPath)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// If address is still in use, try to remove socket file again
|
||||
if strings.Contains(err.Error(), "address already in use") {
|
||||
s.logger.Warn().Err(err).Int("attempt", i+1).Msg("socket address in use, attempting cleanup and retry")
|
||||
os.Remove(s.socketPath)
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to create unix socket: %w", err)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create unix socket after retries: %w", err)
|
||||
}
|
||||
|
||||
s.listener = listener
|
||||
s.running = true
|
||||
|
||||
// Start goroutines
|
||||
s.wg.Add(3)
|
||||
go s.acceptConnections()
|
||||
go s.startReaderGoroutine()
|
||||
go s.startProcessorGoroutine()
|
||||
|
||||
s.logger.Info().Str("socket_path", s.socketPath).Msg("Unified audio server started")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop stops the unified audio server
|
||||
func (s *UnifiedAudioServer) Stop() {
|
||||
s.mtx.Lock()
|
||||
defer s.mtx.Unlock()
|
||||
|
||||
if !s.running {
|
||||
return
|
||||
}
|
||||
|
||||
s.running = false
|
||||
|
||||
if s.listener != nil {
|
||||
s.listener.Close()
|
||||
}
|
||||
|
||||
if s.conn != nil {
|
||||
s.conn.Close()
|
||||
}
|
||||
|
||||
// Close channels
|
||||
close(s.messageChan)
|
||||
close(s.processChan)
|
||||
|
||||
// Wait for goroutines to finish
|
||||
s.wg.Wait()
|
||||
|
||||
// Remove socket file
|
||||
os.Remove(s.socketPath)
|
||||
|
||||
s.logger.Info().Msg("Unified audio server stopped")
|
||||
}
|
||||
|
||||
// acceptConnections handles incoming connections
|
||||
func (s *UnifiedAudioServer) acceptConnections() {
|
||||
defer s.wg.Done()
|
||||
|
||||
for s.running {
|
||||
conn, err := AcceptConnectionWithRetry(s.listener, 3, 100*time.Millisecond)
|
||||
if err != nil {
|
||||
if s.running {
|
||||
s.logger.Error().Err(err).Msg("Failed to accept connection")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
s.mtx.Lock()
|
||||
if s.conn != nil {
|
||||
s.conn.Close()
|
||||
}
|
||||
s.conn = conn
|
||||
s.mtx.Unlock()
|
||||
|
||||
s.logger.Info().Msg("Client connected")
|
||||
}
|
||||
}
|
||||
|
||||
// startReaderGoroutine handles reading messages from the connection
|
||||
func (s *UnifiedAudioServer) startReaderGoroutine() {
|
||||
defer s.wg.Done()
|
||||
|
||||
for s.running {
|
||||
s.mtx.Lock()
|
||||
conn := s.conn
|
||||
s.mtx.Unlock()
|
||||
|
||||
if conn == nil {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
|
||||
msg, err := s.readMessage(conn)
|
||||
if err != nil {
|
||||
if s.running {
|
||||
s.logger.Error().Err(err).Msg("Failed to read message")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
select {
|
||||
case s.messageChan <- msg:
|
||||
default:
|
||||
atomic.AddInt64(&s.droppedFrames, 1)
|
||||
s.logger.Warn().Msg("Message channel full, dropping message")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// startProcessorGoroutine handles processing messages
|
||||
func (s *UnifiedAudioServer) startProcessorGoroutine() {
|
||||
defer s.wg.Done()
|
||||
|
||||
for msg := range s.messageChan {
|
||||
select {
|
||||
case s.processChan <- msg:
|
||||
atomic.AddInt64(&s.totalFrames, 1)
|
||||
default:
|
||||
atomic.AddInt64(&s.droppedFrames, 1)
|
||||
s.logger.Warn().Msg("Process channel full, dropping message")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// readMessage reads a message from the connection
|
||||
func (s *UnifiedAudioServer) readMessage(conn net.Conn) (*UnifiedIPCMessage, error) {
|
||||
// Get header buffer from pool
|
||||
headerPtr := headerBufferPool.Get().(*[]byte)
|
||||
header := *headerPtr
|
||||
defer headerBufferPool.Put(headerPtr)
|
||||
|
||||
if _, err := io.ReadFull(conn, header); err != nil {
|
||||
return nil, fmt.Errorf("failed to read header: %w", err)
|
||||
}
|
||||
|
||||
// Parse header
|
||||
magic := binary.LittleEndian.Uint32(header[0:4])
|
||||
if magic != s.magicNumber {
|
||||
return nil, fmt.Errorf("invalid magic number: expected %d, got %d", s.magicNumber, magic)
|
||||
}
|
||||
|
||||
msgType := UnifiedMessageType(header[4])
|
||||
length := binary.LittleEndian.Uint32(header[5:9])
|
||||
timestamp := int64(binary.LittleEndian.Uint64(header[9:17]))
|
||||
|
||||
// Validate length
|
||||
if length > uint32(Config.MaxFrameSize) {
|
||||
return nil, fmt.Errorf("message too large: %d bytes", length)
|
||||
}
|
||||
|
||||
// Read data
|
||||
var data []byte
|
||||
if length > 0 {
|
||||
data = make([]byte, length)
|
||||
if _, err := io.ReadFull(conn, data); err != nil {
|
||||
return nil, fmt.Errorf("failed to read data: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &UnifiedIPCMessage{
|
||||
Magic: magic,
|
||||
Type: msgType,
|
||||
Length: length,
|
||||
Timestamp: timestamp,
|
||||
Data: data,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SendFrame sends a frame to the connected client
|
||||
func (s *UnifiedAudioServer) SendFrame(frame []byte) error {
|
||||
s.mtx.Lock()
|
||||
defer s.mtx.Unlock()
|
||||
|
||||
if !s.running || s.conn == nil {
|
||||
// Silently drop frames when no client is connected
|
||||
// This prevents "no client connected" warnings during startup and quality changes
|
||||
atomic.AddInt64(&s.droppedFrames, 1)
|
||||
return nil // Return nil to avoid flooding logs with connection warnings
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
|
||||
// Create message
|
||||
msg := &UnifiedIPCMessage{
|
||||
Magic: s.magicNumber,
|
||||
Type: MessageTypeOpusFrame,
|
||||
Length: uint32(len(frame)),
|
||||
Timestamp: start.UnixNano(),
|
||||
Data: frame,
|
||||
}
|
||||
|
||||
// Write message to connection
|
||||
err := s.writeMessage(s.conn, msg)
|
||||
if err != nil {
|
||||
atomic.AddInt64(&s.droppedFrames, 1)
|
||||
return err
|
||||
}
|
||||
|
||||
// Record latency for monitoring
|
||||
|
||||
atomic.AddInt64(&s.totalFrames, 1)
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeMessage writes a message to the connection
|
||||
func (s *UnifiedAudioServer) writeMessage(conn net.Conn, msg *UnifiedIPCMessage) error {
|
||||
header := make([]byte, 17)
|
||||
EncodeMessageHeader(header, msg.Magic, uint8(msg.Type), msg.Length, msg.Timestamp)
|
||||
|
||||
// Optimize: Use single write for header+data to reduce system calls
|
||||
if msg.Length > 0 && msg.Data != nil {
|
||||
// Pre-allocate combined buffer to avoid copying
|
||||
combined := make([]byte, len(header)+len(msg.Data))
|
||||
copy(combined, header)
|
||||
copy(combined[len(header):], msg.Data)
|
||||
if _, err := conn.Write(combined); err != nil {
|
||||
return fmt.Errorf("failed to write message: %w", err)
|
||||
}
|
||||
} else {
|
||||
if _, err := conn.Write(header); err != nil {
|
||||
return fmt.Errorf("failed to write header: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnifiedAudioClient provides common functionality for both input and output clients
|
||||
type UnifiedAudioClient struct {
|
||||
// Atomic counters for frame statistics
|
||||
droppedFrames int64 // Atomic counter for dropped frames
|
||||
totalFrames int64 // Atomic counter for total frames
|
||||
|
||||
conn net.Conn
|
||||
mtx sync.Mutex
|
||||
running bool
|
||||
logger zerolog.Logger
|
||||
socketPath string
|
||||
magicNumber uint32
|
||||
bufferPool *AudioBufferPool // Buffer pool for memory optimization
|
||||
|
||||
// Connection health monitoring
|
||||
lastHealthCheck time.Time
|
||||
connectionErrors int64 // Atomic counter for connection errors
|
||||
autoReconnect bool // Enable automatic reconnection
|
||||
healthCheckTicker *time.Ticker
|
||||
stopHealthCheck chan struct{}
|
||||
}
|
||||
|
||||
// NewUnifiedAudioClient creates a new unified audio client
|
||||
func NewUnifiedAudioClient(isInput bool) *UnifiedAudioClient {
|
||||
var socketPath string
|
||||
var magicNumber uint32
|
||||
var componentName string
|
||||
|
||||
if isInput {
|
||||
socketPath = getInputSocketPath()
|
||||
magicNumber = inputMagicNumber
|
||||
componentName = "audio-input-client"
|
||||
} else {
|
||||
socketPath = getOutputSocketPath()
|
||||
magicNumber = outputMagicNumber
|
||||
componentName = "audio-output-client"
|
||||
}
|
||||
|
||||
logger := logging.GetDefaultLogger().With().Str("component", componentName).Logger()
|
||||
|
||||
return &UnifiedAudioClient{
|
||||
logger: logger,
|
||||
socketPath: socketPath,
|
||||
magicNumber: magicNumber,
|
||||
bufferPool: NewAudioBufferPool(Config.MaxFrameSize),
|
||||
autoReconnect: true, // Enable automatic reconnection by default
|
||||
stopHealthCheck: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Connect connects the client to the server
|
||||
func (c *UnifiedAudioClient) Connect() error {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
|
||||
if c.running {
|
||||
return nil // Already connected
|
||||
}
|
||||
|
||||
// Ensure clean state before connecting
|
||||
if c.conn != nil {
|
||||
c.conn.Close()
|
||||
c.conn = nil
|
||||
}
|
||||
|
||||
// Try connecting multiple times as the server might not be ready
|
||||
// Use configurable retry parameters for better control
|
||||
maxAttempts := Config.MaxConnectionAttempts
|
||||
initialDelay := Config.ConnectionRetryDelay
|
||||
maxDelay := Config.MaxConnectionRetryDelay
|
||||
backoffFactor := Config.ConnectionBackoffFactor
|
||||
|
||||
for i := 0; i < maxAttempts; i++ {
|
||||
// Set connection timeout for each attempt
|
||||
conn, err := net.DialTimeout("unix", c.socketPath, Config.ConnectionTimeoutDelay)
|
||||
if err == nil {
|
||||
c.conn = conn
|
||||
c.running = true
|
||||
// Reset frame counters on successful connection
|
||||
atomic.StoreInt64(&c.totalFrames, 0)
|
||||
atomic.StoreInt64(&c.droppedFrames, 0)
|
||||
atomic.StoreInt64(&c.connectionErrors, 0)
|
||||
c.lastHealthCheck = time.Now()
|
||||
// Start health check monitoring if auto-reconnect is enabled
|
||||
if c.autoReconnect {
|
||||
c.startHealthCheck()
|
||||
}
|
||||
c.logger.Info().Str("socket_path", c.socketPath).Int("attempt", i+1).Msg("Connected to server")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Log connection attempt failure
|
||||
c.logger.Debug().Err(err).Str("socket_path", c.socketPath).Int("attempt", i+1).Int("max_attempts", maxAttempts).Msg("Connection attempt failed")
|
||||
|
||||
// Don't sleep after the last attempt
|
||||
if i < maxAttempts-1 {
|
||||
// Calculate adaptive delay based on connection failure patterns
|
||||
delay := c.calculateAdaptiveDelay(i, initialDelay, maxDelay, backoffFactor)
|
||||
time.Sleep(delay)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure clean state on connection failure
|
||||
c.conn = nil
|
||||
c.running = false
|
||||
return fmt.Errorf("failed to connect to audio server after %d attempts", Config.MaxConnectionAttempts)
|
||||
}
|
||||
|
||||
// Disconnect disconnects the client from the server
|
||||
func (c *UnifiedAudioClient) Disconnect() {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
|
||||
if !c.running {
|
||||
return
|
||||
}
|
||||
|
||||
c.running = false
|
||||
|
||||
// Stop health check monitoring
|
||||
c.stopHealthCheckMonitoring()
|
||||
|
||||
if c.conn != nil {
|
||||
c.conn.Close()
|
||||
c.conn = nil
|
||||
}
|
||||
|
||||
c.logger.Info().Msg("Disconnected from server")
|
||||
}
|
||||
|
||||
// IsConnected returns whether the client is connected
|
||||
func (c *UnifiedAudioClient) IsConnected() bool {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
return c.running && c.conn != nil
|
||||
}
|
||||
|
||||
// GetFrameStats returns frame statistics
|
||||
func (c *UnifiedAudioClient) GetFrameStats() (total, dropped int64) {
|
||||
total = atomic.LoadInt64(&c.totalFrames)
|
||||
dropped = atomic.LoadInt64(&c.droppedFrames)
|
||||
return
|
||||
}
|
||||
|
||||
// startHealthCheck starts the connection health monitoring
|
||||
func (c *UnifiedAudioClient) startHealthCheck() {
|
||||
if c.healthCheckTicker != nil {
|
||||
c.healthCheckTicker.Stop()
|
||||
}
|
||||
|
||||
c.healthCheckTicker = time.NewTicker(Config.HealthCheckInterval)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-c.healthCheckTicker.C:
|
||||
c.performHealthCheck()
|
||||
case <-c.stopHealthCheck:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// stopHealthCheckMonitoring stops the health check monitoring
|
||||
func (c *UnifiedAudioClient) stopHealthCheckMonitoring() {
|
||||
if c.healthCheckTicker != nil {
|
||||
c.healthCheckTicker.Stop()
|
||||
c.healthCheckTicker = nil
|
||||
}
|
||||
select {
|
||||
case c.stopHealthCheck <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// performHealthCheck checks the connection health and attempts reconnection if needed
|
||||
func (c *UnifiedAudioClient) performHealthCheck() {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
|
||||
if !c.running || c.conn == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Simple health check: try to get connection info
|
||||
if tcpConn, ok := c.conn.(*net.UnixConn); ok {
|
||||
if _, err := tcpConn.File(); err != nil {
|
||||
// Connection is broken
|
||||
atomic.AddInt64(&c.connectionErrors, 1)
|
||||
c.logger.Warn().Err(err).Msg("Connection health check failed, attempting reconnection")
|
||||
|
||||
// Close the broken connection
|
||||
c.conn.Close()
|
||||
c.conn = nil
|
||||
c.running = false
|
||||
|
||||
// Attempt reconnection
|
||||
go func() {
|
||||
time.Sleep(Config.ReconnectionInterval)
|
||||
if err := c.Connect(); err != nil {
|
||||
c.logger.Error().Err(err).Msg("Failed to reconnect during health check")
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
c.lastHealthCheck = time.Now()
|
||||
}
|
||||
|
||||
// SetAutoReconnect enables or disables automatic reconnection
|
||||
func (c *UnifiedAudioClient) SetAutoReconnect(enabled bool) {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
|
||||
c.autoReconnect = enabled
|
||||
if !enabled {
|
||||
c.stopHealthCheckMonitoring()
|
||||
} else if c.running {
|
||||
c.startHealthCheck()
|
||||
}
|
||||
}
|
||||
|
||||
// GetConnectionErrors returns the number of connection errors
|
||||
func (c *UnifiedAudioClient) GetConnectionErrors() int64 {
|
||||
return atomic.LoadInt64(&c.connectionErrors)
|
||||
}
|
||||
|
||||
// calculateAdaptiveDelay calculates retry delay based on system load and failure patterns
|
||||
func (c *UnifiedAudioClient) calculateAdaptiveDelay(attempt int, initialDelay, maxDelay time.Duration, backoffFactor float64) time.Duration {
|
||||
// Base exponential backoff
|
||||
baseDelay := time.Duration(float64(initialDelay.Nanoseconds()) * math.Pow(backoffFactor, float64(attempt)))
|
||||
|
||||
// Get connection error history for adaptive adjustment
|
||||
errorCount := atomic.LoadInt64(&c.connectionErrors)
|
||||
|
||||
// Adjust delay based on recent connection errors
|
||||
// More errors = longer delays to avoid overwhelming the server
|
||||
adaptiveFactor := 1.0
|
||||
if errorCount > 5 {
|
||||
adaptiveFactor = 1.5 // 50% longer delays after many errors
|
||||
} else if errorCount > 10 {
|
||||
adaptiveFactor = 2.0 // Double delays after excessive errors
|
||||
}
|
||||
|
||||
// Apply adaptive factor
|
||||
adaptiveDelay := time.Duration(float64(baseDelay.Nanoseconds()) * adaptiveFactor)
|
||||
|
||||
// Ensure we don't exceed maximum delay
|
||||
if adaptiveDelay > maxDelay {
|
||||
adaptiveDelay = maxDelay
|
||||
}
|
||||
|
||||
// Add small random jitter to avoid thundering herd
|
||||
jitter := time.Duration(float64(adaptiveDelay.Nanoseconds()) * 0.1 * (0.5 + float64(attempt%3)/6.0))
|
||||
adaptiveDelay += jitter
|
||||
|
||||
return adaptiveDelay
|
||||
}
|
||||
|
||||
// Helper functions for socket paths
|
||||
func getInputSocketPath() string {
|
||||
return filepath.Join("/var/run", inputSocketName)
|
||||
}
|
||||
|
||||
func getOutputSocketPath() string {
|
||||
return filepath.Join("/var/run", outputSocketName)
|
||||
}
|
||||
|
|
@ -1,97 +0,0 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// BaseAudioMetrics provides common metrics fields for both input and output
|
||||
// Atomic fields MUST be first for ARM32 alignment (int64 fields need 8-byte alignment)
|
||||
type BaseAudioMetrics struct {
|
||||
// Atomic int64 fields first for proper ARM32 alignment
|
||||
FramesProcessed int64 `json:"frames_processed"`
|
||||
FramesDropped int64 `json:"frames_dropped"`
|
||||
BytesProcessed int64 `json:"bytes_processed"`
|
||||
ConnectionDrops int64 `json:"connection_drops"`
|
||||
|
||||
// Non-atomic fields after atomic fields
|
||||
LastFrameTime time.Time `json:"last_frame_time"`
|
||||
AverageLatency time.Duration `json:"average_latency"`
|
||||
}
|
||||
|
||||
// BaseAudioManager provides common functionality for audio managers
|
||||
type BaseAudioManager struct {
|
||||
// Core metrics and state
|
||||
metrics BaseAudioMetrics
|
||||
logger zerolog.Logger
|
||||
running int32
|
||||
}
|
||||
|
||||
// NewBaseAudioManager creates a new base audio manager
|
||||
func NewBaseAudioManager(logger zerolog.Logger) *BaseAudioManager {
|
||||
return &BaseAudioManager{
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// IsRunning returns whether the manager is running
|
||||
func (bam *BaseAudioManager) IsRunning() bool {
|
||||
return atomic.LoadInt32(&bam.running) == 1
|
||||
}
|
||||
|
||||
// setRunning atomically sets the running state
|
||||
func (bam *BaseAudioManager) setRunning(running bool) bool {
|
||||
if running {
|
||||
return atomic.CompareAndSwapInt32(&bam.running, 0, 1)
|
||||
}
|
||||
return atomic.CompareAndSwapInt32(&bam.running, 1, 0)
|
||||
}
|
||||
|
||||
// resetMetrics resets all metrics to zero
|
||||
func (bam *BaseAudioManager) resetMetrics() {
|
||||
atomic.StoreInt64(&bam.metrics.FramesProcessed, 0)
|
||||
atomic.StoreInt64(&bam.metrics.FramesDropped, 0)
|
||||
atomic.StoreInt64(&bam.metrics.BytesProcessed, 0)
|
||||
atomic.StoreInt64(&bam.metrics.ConnectionDrops, 0)
|
||||
bam.metrics.LastFrameTime = time.Time{}
|
||||
bam.metrics.AverageLatency = 0
|
||||
}
|
||||
|
||||
// getBaseMetrics returns a copy of the base metrics
|
||||
func (bam *BaseAudioManager) getBaseMetrics() BaseAudioMetrics {
|
||||
return BaseAudioMetrics{
|
||||
FramesProcessed: atomic.LoadInt64(&bam.metrics.FramesProcessed),
|
||||
FramesDropped: atomic.LoadInt64(&bam.metrics.FramesDropped),
|
||||
BytesProcessed: atomic.LoadInt64(&bam.metrics.BytesProcessed),
|
||||
ConnectionDrops: atomic.LoadInt64(&bam.metrics.ConnectionDrops),
|
||||
LastFrameTime: bam.metrics.LastFrameTime,
|
||||
AverageLatency: bam.metrics.AverageLatency,
|
||||
}
|
||||
}
|
||||
|
||||
// logComponentStart logs component start with consistent format
|
||||
func (bam *BaseAudioManager) logComponentStart(component string) {
|
||||
bam.logger.Debug().Str("component", component).Msg("starting component")
|
||||
}
|
||||
|
||||
// logComponentStarted logs component started with consistent format
|
||||
func (bam *BaseAudioManager) logComponentStarted(component string) {
|
||||
bam.logger.Debug().Str("component", component).Msg("component started successfully")
|
||||
}
|
||||
|
||||
// logComponentStop logs component stop with consistent format
|
||||
func (bam *BaseAudioManager) logComponentStop(component string) {
|
||||
bam.logger.Debug().Str("component", component).Msg("stopping component")
|
||||
}
|
||||
|
||||
// logComponentStopped logs component stopped with consistent format
|
||||
func (bam *BaseAudioManager) logComponentStopped(component string) {
|
||||
bam.logger.Debug().Str("component", component).Msg("component stopped")
|
||||
}
|
||||
|
||||
// logComponentError logs component error with consistent format
|
||||
func (bam *BaseAudioManager) logComponentError(component string, err error, msg string) {
|
||||
bam.logger.Error().Err(err).Str("component", component).Msg(msg)
|
||||
}
|
||||
|
|
@ -1,342 +0,0 @@
|
|||
//go:build cgo
|
||||
// +build cgo
|
||||
|
||||
package audio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os/exec"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/jetkvm/kvm/internal/logging"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// BaseSupervisor provides common functionality for audio supervisors
|
||||
type BaseSupervisor struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
logger *zerolog.Logger
|
||||
mutex sync.RWMutex
|
||||
running int32
|
||||
|
||||
// Process management
|
||||
cmd *exec.Cmd
|
||||
processPID int
|
||||
|
||||
// Process monitoring
|
||||
|
||||
// Exit tracking
|
||||
lastExitCode int
|
||||
lastExitTime time.Time
|
||||
|
||||
// Channel management
|
||||
stopChan chan struct{}
|
||||
processDone chan struct{}
|
||||
stopChanClosed bool
|
||||
processDoneClosed bool
|
||||
}
|
||||
|
||||
// NewBaseSupervisor creates a new base supervisor
|
||||
func NewBaseSupervisor(componentName string) *BaseSupervisor {
|
||||
logger := logging.GetDefaultLogger().With().Str("component", componentName).Logger()
|
||||
return &BaseSupervisor{
|
||||
logger: &logger,
|
||||
|
||||
stopChan: make(chan struct{}),
|
||||
processDone: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// IsRunning returns whether the supervisor is currently running
|
||||
func (bs *BaseSupervisor) IsRunning() bool {
|
||||
return atomic.LoadInt32(&bs.running) == 1
|
||||
}
|
||||
|
||||
// GetProcessPID returns the current process PID
|
||||
func (bs *BaseSupervisor) GetProcessPID() int {
|
||||
bs.mutex.RLock()
|
||||
defer bs.mutex.RUnlock()
|
||||
return bs.processPID
|
||||
}
|
||||
|
||||
// GetLastExitInfo returns the last exit code and time
|
||||
func (bs *BaseSupervisor) GetLastExitInfo() (exitCode int, exitTime time.Time) {
|
||||
bs.mutex.RLock()
|
||||
defer bs.mutex.RUnlock()
|
||||
return bs.lastExitCode, bs.lastExitTime
|
||||
}
|
||||
|
||||
// logSupervisorStart logs supervisor start event
|
||||
func (bs *BaseSupervisor) logSupervisorStart() {
|
||||
bs.logger.Info().Msg("Supervisor starting")
|
||||
}
|
||||
|
||||
// logSupervisorStop logs supervisor stop event
|
||||
func (bs *BaseSupervisor) logSupervisorStop() {
|
||||
bs.logger.Info().Msg("Supervisor stopping")
|
||||
}
|
||||
|
||||
// createContext creates a new context for the supervisor
|
||||
func (bs *BaseSupervisor) createContext() {
|
||||
bs.ctx, bs.cancel = context.WithCancel(context.Background())
|
||||
}
|
||||
|
||||
// cancelContext cancels the supervisor context
|
||||
func (bs *BaseSupervisor) cancelContext() {
|
||||
if bs.cancel != nil {
|
||||
bs.cancel()
|
||||
}
|
||||
}
|
||||
|
||||
// initializeChannels recreates channels for a new supervision cycle
|
||||
func (bs *BaseSupervisor) initializeChannels() {
|
||||
bs.mutex.Lock()
|
||||
defer bs.mutex.Unlock()
|
||||
|
||||
bs.stopChan = make(chan struct{})
|
||||
bs.processDone = make(chan struct{})
|
||||
bs.stopChanClosed = false
|
||||
bs.processDoneClosed = false
|
||||
}
|
||||
|
||||
// closeStopChan safely closes the stop channel
|
||||
func (bs *BaseSupervisor) closeStopChan() {
|
||||
bs.mutex.Lock()
|
||||
defer bs.mutex.Unlock()
|
||||
|
||||
if !bs.stopChanClosed {
|
||||
close(bs.stopChan)
|
||||
bs.stopChanClosed = true
|
||||
}
|
||||
}
|
||||
|
||||
// closeProcessDone safely closes the process done channel
|
||||
func (bs *BaseSupervisor) closeProcessDone() {
|
||||
bs.mutex.Lock()
|
||||
defer bs.mutex.Unlock()
|
||||
|
||||
if !bs.processDoneClosed {
|
||||
close(bs.processDone)
|
||||
bs.processDoneClosed = true
|
||||
}
|
||||
}
|
||||
|
||||
// terminateProcess gracefully terminates the current process with configurable timeout
|
||||
func (bs *BaseSupervisor) terminateProcess(timeout time.Duration, processType string) {
|
||||
bs.mutex.RLock()
|
||||
cmd := bs.cmd
|
||||
pid := bs.processPID
|
||||
bs.mutex.RUnlock()
|
||||
|
||||
if cmd == nil || cmd.Process == nil {
|
||||
return
|
||||
}
|
||||
|
||||
bs.logger.Info().Int("pid", pid).Msgf("terminating %s process", processType)
|
||||
|
||||
// Send SIGTERM first
|
||||
if err := cmd.Process.Signal(syscall.SIGTERM); err != nil {
|
||||
bs.logger.Warn().Err(err).Int("pid", pid).Msgf("failed to send SIGTERM to %s process", processType)
|
||||
}
|
||||
|
||||
// Wait for graceful shutdown
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
_ = cmd.Wait()
|
||||
close(done)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
bs.logger.Info().Int("pid", pid).Msgf("%s process terminated gracefully", processType)
|
||||
case <-time.After(timeout):
|
||||
bs.logger.Warn().Int("pid", pid).Msg("process did not terminate gracefully, sending SIGKILL")
|
||||
bs.forceKillProcess(processType)
|
||||
}
|
||||
}
|
||||
|
||||
// forceKillProcess forcefully kills the current process
|
||||
func (bs *BaseSupervisor) forceKillProcess(processType string) {
|
||||
bs.mutex.RLock()
|
||||
cmd := bs.cmd
|
||||
pid := bs.processPID
|
||||
bs.mutex.RUnlock()
|
||||
|
||||
if cmd == nil || cmd.Process == nil {
|
||||
return
|
||||
}
|
||||
|
||||
bs.logger.Warn().Int("pid", pid).Msgf("force killing %s process", processType)
|
||||
if err := cmd.Process.Kill(); err != nil {
|
||||
bs.logger.Error().Err(err).Int("pid", pid).Msg("failed to kill process")
|
||||
}
|
||||
}
|
||||
|
||||
// waitForProcessExit waits for the current process to exit and logs the result
|
||||
func (bs *BaseSupervisor) waitForProcessExit(processType string) {
|
||||
bs.mutex.RLock()
|
||||
cmd := bs.cmd
|
||||
pid := bs.processPID
|
||||
bs.mutex.RUnlock()
|
||||
|
||||
if cmd == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Wait for process to exit
|
||||
err := cmd.Wait()
|
||||
|
||||
bs.mutex.Lock()
|
||||
bs.lastExitTime = time.Now()
|
||||
bs.processPID = 0
|
||||
|
||||
var exitCode int
|
||||
if err != nil {
|
||||
if exitError, ok := err.(*exec.ExitError); ok {
|
||||
exitCode = exitError.ExitCode()
|
||||
} else {
|
||||
// Process was killed or other error
|
||||
exitCode = -1
|
||||
}
|
||||
} else {
|
||||
exitCode = 0
|
||||
}
|
||||
|
||||
bs.lastExitCode = exitCode
|
||||
bs.mutex.Unlock()
|
||||
|
||||
// Remove process from monitoring
|
||||
|
||||
if exitCode != 0 {
|
||||
bs.logger.Error().Int("pid", pid).Int("exit_code", exitCode).Msgf("%s process exited with error", processType)
|
||||
} else {
|
||||
bs.logger.Info().Int("pid", pid).Msgf("%s process exited gracefully", processType)
|
||||
}
|
||||
}
|
||||
|
||||
// SupervisionConfig holds configuration for the supervision loop
|
||||
type SupervisionConfig struct {
|
||||
ProcessType string
|
||||
Timeout time.Duration
|
||||
EnableRestart bool
|
||||
MaxRestartAttempts int
|
||||
RestartWindow time.Duration
|
||||
RestartDelay time.Duration
|
||||
MaxRestartDelay time.Duration
|
||||
}
|
||||
|
||||
// ProcessCallbacks holds callback functions for process lifecycle events
|
||||
type ProcessCallbacks struct {
|
||||
OnProcessStart func(pid int)
|
||||
OnProcessExit func(pid int, exitCode int, crashed bool)
|
||||
OnRestart func(attempt int, delay time.Duration)
|
||||
}
|
||||
|
||||
// SupervisionLoop provides a template for supervision loops that can be extended by specific supervisors
|
||||
func (bs *BaseSupervisor) SupervisionLoop(
|
||||
config SupervisionConfig,
|
||||
callbacks ProcessCallbacks,
|
||||
startProcessFunc func() error,
|
||||
shouldRestartFunc func() bool,
|
||||
calculateDelayFunc func() time.Duration,
|
||||
) {
|
||||
defer func() {
|
||||
bs.closeProcessDone()
|
||||
bs.logger.Info().Msgf("%s supervision ended", config.ProcessType)
|
||||
}()
|
||||
|
||||
for atomic.LoadInt32(&bs.running) == 1 {
|
||||
select {
|
||||
case <-bs.stopChan:
|
||||
bs.logger.Info().Msg("received stop signal")
|
||||
bs.terminateProcess(config.Timeout, config.ProcessType)
|
||||
return
|
||||
case <-bs.ctx.Done():
|
||||
bs.logger.Info().Msg("context cancelled")
|
||||
bs.terminateProcess(config.Timeout, config.ProcessType)
|
||||
return
|
||||
default:
|
||||
// Start or restart the process
|
||||
if err := startProcessFunc(); err != nil {
|
||||
bs.logger.Error().Err(err).Msgf("failed to start %s process", config.ProcessType)
|
||||
|
||||
// Check if we should attempt restart (only if restart is enabled)
|
||||
if !config.EnableRestart || !shouldRestartFunc() {
|
||||
bs.logger.Error().Msgf("maximum restart attempts exceeded or restart disabled, stopping %s supervisor", config.ProcessType)
|
||||
return
|
||||
}
|
||||
|
||||
delay := calculateDelayFunc()
|
||||
bs.logger.Warn().Dur("delay", delay).Msgf("retrying %s process start after delay", config.ProcessType)
|
||||
|
||||
if callbacks.OnRestart != nil {
|
||||
callbacks.OnRestart(0, delay) // 0 indicates start failure, not exit restart
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(delay):
|
||||
case <-bs.stopChan:
|
||||
return
|
||||
case <-bs.ctx.Done():
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Wait for process to exit
|
||||
bs.waitForProcessExitWithCallback(config.ProcessType, callbacks)
|
||||
|
||||
// Check if we should restart (only if restart is enabled)
|
||||
if !config.EnableRestart {
|
||||
bs.logger.Info().Msgf("%s process completed, restart disabled", config.ProcessType)
|
||||
return
|
||||
}
|
||||
|
||||
if !shouldRestartFunc() {
|
||||
bs.logger.Error().Msgf("maximum restart attempts exceeded, stopping %s supervisor", config.ProcessType)
|
||||
return
|
||||
}
|
||||
|
||||
// Calculate restart delay
|
||||
delay := calculateDelayFunc()
|
||||
bs.logger.Info().Dur("delay", delay).Msgf("restarting %s process after delay", config.ProcessType)
|
||||
|
||||
if callbacks.OnRestart != nil {
|
||||
callbacks.OnRestart(1, delay) // 1 indicates restart after exit
|
||||
}
|
||||
|
||||
// Wait for restart delay
|
||||
select {
|
||||
case <-time.After(delay):
|
||||
case <-bs.stopChan:
|
||||
return
|
||||
case <-bs.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// waitForProcessExitWithCallback extends waitForProcessExit with callback support
|
||||
func (bs *BaseSupervisor) waitForProcessExitWithCallback(processType string, callbacks ProcessCallbacks) {
|
||||
bs.mutex.RLock()
|
||||
pid := bs.processPID
|
||||
bs.mutex.RUnlock()
|
||||
|
||||
// Use the base waitForProcessExit logic
|
||||
bs.waitForProcessExit(processType)
|
||||
|
||||
// Handle callbacks if provided
|
||||
if callbacks.OnProcessExit != nil {
|
||||
bs.mutex.RLock()
|
||||
exitCode := bs.lastExitCode
|
||||
bs.mutex.RUnlock()
|
||||
|
||||
crashed := exitCode != 0
|
||||
callbacks.OnProcessExit(pid, exitCode, crashed)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,316 +0,0 @@
|
|||
//go:build cgo
|
||||
// +build cgo
|
||||
|
||||
package audio
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Component name constants for logging
|
||||
const (
|
||||
AudioOutputSupervisorComponent = "audio-output-supervisor"
|
||||
)
|
||||
|
||||
// AudioOutputSupervisor manages the audio output server subprocess lifecycle
|
||||
type AudioOutputSupervisor struct {
|
||||
*BaseSupervisor
|
||||
|
||||
// Restart management
|
||||
restartAttempts []time.Time
|
||||
|
||||
// Environment variables for OPUS configuration
|
||||
opusEnv []string
|
||||
|
||||
// Callbacks
|
||||
onProcessStart func(pid int)
|
||||
onProcessExit func(pid int, exitCode int, crashed bool)
|
||||
onRestart func(attempt int, delay time.Duration)
|
||||
}
|
||||
|
||||
// NewAudioOutputSupervisor creates a new audio output server supervisor
|
||||
func NewAudioOutputSupervisor() *AudioOutputSupervisor {
|
||||
return &AudioOutputSupervisor{
|
||||
BaseSupervisor: NewBaseSupervisor("audio-output-supervisor"),
|
||||
restartAttempts: make([]time.Time, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// SetCallbacks sets optional callbacks for process lifecycle events
|
||||
func (s *AudioOutputSupervisor) SetCallbacks(
|
||||
onStart func(pid int),
|
||||
onExit func(pid int, exitCode int, crashed bool),
|
||||
onRestart func(attempt int, delay time.Duration),
|
||||
) {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
|
||||
s.onProcessStart = onStart
|
||||
|
||||
// Wrap the exit callback to include restart tracking
|
||||
if onExit != nil {
|
||||
s.onProcessExit = func(pid int, exitCode int, crashed bool) {
|
||||
if crashed {
|
||||
s.recordRestartAttempt()
|
||||
}
|
||||
onExit(pid, exitCode, crashed)
|
||||
}
|
||||
} else {
|
||||
s.onProcessExit = func(pid int, exitCode int, crashed bool) {
|
||||
if crashed {
|
||||
s.recordRestartAttempt()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
s.onRestart = onRestart
|
||||
}
|
||||
|
||||
// SetOpusConfig sets OPUS configuration parameters as environment variables
|
||||
// for the audio output subprocess
|
||||
func (s *AudioOutputSupervisor) SetOpusConfig(bitrate, complexity, vbr, signalType, bandwidth, dtx int) {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
|
||||
// Store OPUS parameters as environment variables for C binary
|
||||
s.opusEnv = []string{
|
||||
"OPUS_BITRATE=" + strconv.Itoa(bitrate),
|
||||
"OPUS_COMPLEXITY=" + strconv.Itoa(complexity),
|
||||
"OPUS_VBR=" + strconv.Itoa(vbr),
|
||||
"OPUS_SIGNAL_TYPE=" + strconv.Itoa(signalType),
|
||||
"OPUS_BANDWIDTH=" + strconv.Itoa(bandwidth),
|
||||
"OPUS_DTX=" + strconv.Itoa(dtx),
|
||||
"ALSA_CAPTURE_DEVICE=hw:0,0", // TC358743 HDMI audio capture
|
||||
}
|
||||
}
|
||||
|
||||
// Start begins supervising the audio output server process
|
||||
func (s *AudioOutputSupervisor) Start() error {
|
||||
if !atomic.CompareAndSwapInt32(&s.running, 0, 1) {
|
||||
return fmt.Errorf("audio output supervisor is already running")
|
||||
}
|
||||
|
||||
s.logSupervisorStart()
|
||||
s.createContext()
|
||||
|
||||
// Recreate channels in case they were closed by a previous Stop() call
|
||||
s.initializeChannels()
|
||||
|
||||
// Reset restart tracking on start
|
||||
s.mutex.Lock()
|
||||
s.restartAttempts = s.restartAttempts[:0]
|
||||
s.mutex.Unlock()
|
||||
|
||||
// Start the supervision loop
|
||||
go s.supervisionLoop()
|
||||
|
||||
// Establish IPC connection to subprocess after a brief delay
|
||||
go func() {
|
||||
time.Sleep(500 * time.Millisecond) // Wait for subprocess to start
|
||||
s.connectClient()
|
||||
}()
|
||||
|
||||
s.logger.Info().Str("component", AudioOutputSupervisorComponent).Msg("component started successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop gracefully stops the audio server and supervisor
|
||||
func (s *AudioOutputSupervisor) Stop() {
|
||||
if !atomic.CompareAndSwapInt32(&s.running, 1, 0) {
|
||||
return // Already stopped
|
||||
}
|
||||
|
||||
s.logSupervisorStop()
|
||||
|
||||
// Signal stop and wait for cleanup
|
||||
s.closeStopChan()
|
||||
s.cancelContext()
|
||||
|
||||
// Wait for process to exit
|
||||
select {
|
||||
case <-s.processDone:
|
||||
s.logger.Info().Str("component", AudioOutputSupervisorComponent).Msg("component stopped gracefully")
|
||||
case <-time.After(Config.OutputSupervisorTimeout):
|
||||
s.logger.Warn().Str("component", AudioOutputSupervisorComponent).Msg("component did not stop gracefully, forcing termination")
|
||||
s.forceKillProcess("audio output server")
|
||||
}
|
||||
|
||||
// Ensure socket file cleanup even if subprocess didn't clean up properly
|
||||
// This prevents "address already in use" errors on restart
|
||||
outputSocketPath := getOutputSocketPath()
|
||||
if err := os.Remove(outputSocketPath); err != nil && !os.IsNotExist(err) {
|
||||
s.logger.Warn().Err(err).Str("socket_path", outputSocketPath).Msg("failed to remove output socket file during supervisor stop")
|
||||
} else if err == nil {
|
||||
s.logger.Debug().Str("socket_path", outputSocketPath).Msg("cleaned up output socket file")
|
||||
}
|
||||
|
||||
s.logger.Info().Str("component", AudioOutputSupervisorComponent).Msg("component stopped")
|
||||
}
|
||||
|
||||
// supervisionLoop is the main loop that manages the audio output process
|
||||
func (s *AudioOutputSupervisor) supervisionLoop() {
|
||||
// Configure supervision parameters
|
||||
config := SupervisionConfig{
|
||||
ProcessType: "audio output server",
|
||||
Timeout: Config.OutputSupervisorTimeout,
|
||||
EnableRestart: true,
|
||||
MaxRestartAttempts: Config.MaxRestartAttempts,
|
||||
RestartWindow: Config.RestartWindow,
|
||||
RestartDelay: Config.RestartDelay,
|
||||
MaxRestartDelay: Config.MaxRestartDelay,
|
||||
}
|
||||
|
||||
// Configure callbacks
|
||||
callbacks := ProcessCallbacks{
|
||||
OnProcessStart: s.onProcessStart,
|
||||
OnProcessExit: s.onProcessExit,
|
||||
OnRestart: s.onRestart,
|
||||
}
|
||||
|
||||
// Use the base supervision loop template
|
||||
s.SupervisionLoop(
|
||||
config,
|
||||
callbacks,
|
||||
s.startProcess,
|
||||
s.shouldRestart,
|
||||
s.calculateRestartDelay,
|
||||
)
|
||||
}
|
||||
|
||||
// startProcess starts the audio server process
|
||||
func (s *AudioOutputSupervisor) startProcess() error {
|
||||
// Use embedded C binary path
|
||||
binaryPath := GetAudioOutputBinaryPath()
|
||||
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
|
||||
// Create new command (no args needed for C binary)
|
||||
s.cmd = exec.CommandContext(s.ctx, binaryPath)
|
||||
s.cmd.Stdout = os.Stdout
|
||||
s.cmd.Stderr = os.Stderr
|
||||
|
||||
// Set environment variables for OPUS configuration
|
||||
env := append(os.Environ(), s.opusEnv...)
|
||||
|
||||
// Pass logging environment variables directly to subprocess
|
||||
// The subprocess will inherit all PION_LOG_* variables from os.Environ()
|
||||
// This ensures the audio scope gets the correct trace level
|
||||
|
||||
s.cmd.Env = env
|
||||
|
||||
// Start the process
|
||||
if err := s.cmd.Start(); err != nil {
|
||||
return fmt.Errorf("failed to start audio output server process: %w", err)
|
||||
}
|
||||
|
||||
s.processPID = s.cmd.Process.Pid
|
||||
s.logger.Info().Int("pid", s.processPID).Str("binary", binaryPath).Strs("opus_env", s.opusEnv).Msg("audio server process started")
|
||||
|
||||
// Add process to monitoring
|
||||
|
||||
if s.onProcessStart != nil {
|
||||
s.onProcessStart(s.processPID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// shouldRestart determines if the process should be restarted
|
||||
func (s *AudioOutputSupervisor) shouldRestart() bool {
|
||||
if atomic.LoadInt32(&s.running) == 0 {
|
||||
return false // Supervisor is stopping
|
||||
}
|
||||
|
||||
s.mutex.RLock()
|
||||
defer s.mutex.RUnlock()
|
||||
|
||||
// Clean up old restart attempts outside the window
|
||||
now := time.Now()
|
||||
var recentAttempts []time.Time
|
||||
for _, attempt := range s.restartAttempts {
|
||||
if now.Sub(attempt) < Config.RestartWindow {
|
||||
recentAttempts = append(recentAttempts, attempt)
|
||||
}
|
||||
}
|
||||
s.restartAttempts = recentAttempts
|
||||
|
||||
return len(s.restartAttempts) < Config.MaxRestartAttempts
|
||||
}
|
||||
|
||||
// recordRestartAttempt records a restart attempt
|
||||
func (s *AudioOutputSupervisor) recordRestartAttempt() {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
|
||||
s.restartAttempts = append(s.restartAttempts, time.Now())
|
||||
}
|
||||
|
||||
// calculateRestartDelay calculates the delay before next restart attempt
|
||||
func (s *AudioOutputSupervisor) calculateRestartDelay() time.Duration {
|
||||
s.mutex.RLock()
|
||||
defer s.mutex.RUnlock()
|
||||
|
||||
// Exponential backoff based on recent restart attempts
|
||||
attempts := len(s.restartAttempts)
|
||||
if attempts == 0 {
|
||||
return Config.RestartDelay
|
||||
}
|
||||
|
||||
// Calculate exponential backoff: 2^attempts * base delay
|
||||
delay := Config.RestartDelay
|
||||
for i := 0; i < attempts && delay < Config.MaxRestartDelay; i++ {
|
||||
delay *= 2
|
||||
}
|
||||
|
||||
if delay > Config.MaxRestartDelay {
|
||||
delay = Config.MaxRestartDelay
|
||||
}
|
||||
|
||||
return delay
|
||||
}
|
||||
|
||||
// client holds the IPC client for communicating with the subprocess
|
||||
var outputClient *AudioOutputClient
|
||||
|
||||
// IsConnected returns whether the supervisor has an active connection to the subprocess
|
||||
func (s *AudioOutputSupervisor) IsConnected() bool {
|
||||
return outputClient != nil && outputClient.IsConnected()
|
||||
}
|
||||
|
||||
// GetClient returns the IPC client for the subprocess
|
||||
func (s *AudioOutputSupervisor) GetClient() *AudioOutputClient {
|
||||
return outputClient
|
||||
}
|
||||
|
||||
// connectClient establishes connection to the audio output subprocess
|
||||
func (s *AudioOutputSupervisor) connectClient() {
|
||||
if outputClient == nil {
|
||||
outputClient = NewAudioOutputClient()
|
||||
}
|
||||
|
||||
// Try to connect to the subprocess
|
||||
if err := outputClient.Connect(); err != nil {
|
||||
s.logger.Warn().Err(err).Msg("Failed to connect to audio output subprocess")
|
||||
} else {
|
||||
s.logger.Info().Msg("Connected to audio output subprocess")
|
||||
}
|
||||
}
|
||||
|
||||
// SendOpusConfig sends Opus configuration to the audio output subprocess
|
||||
func (aos *AudioOutputSupervisor) SendOpusConfig(config UnifiedIPCOpusConfig) error {
|
||||
if outputClient == nil {
|
||||
return fmt.Errorf("client not initialized")
|
||||
}
|
||||
|
||||
if !outputClient.IsConnected() {
|
||||
return fmt.Errorf("client not connected")
|
||||
}
|
||||
|
||||
return outputClient.SendOpusConfig(config)
|
||||
}
|
||||
|
|
@ -0,0 +1,152 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/jetkvm/kvm/internal/logging"
|
||||
"github.com/pion/webrtc/v4"
|
||||
"github.com/pion/webrtc/v4/pkg/media"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// OutputRelay forwards audio from subprocess (HDMI) to WebRTC (browser)
|
||||
type OutputRelay struct {
|
||||
client *IPCClient
|
||||
audioTrack *webrtc.TrackLocalStaticSample
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
logger zerolog.Logger
|
||||
running atomic.Bool
|
||||
sample media.Sample // Reusable sample for zero-allocation hot path
|
||||
|
||||
// Stats (Uint32: overflows after 2.7 years @ 50fps, faster atomics on 32-bit ARM)
|
||||
framesRelayed atomic.Uint32
|
||||
framesDropped atomic.Uint32
|
||||
}
|
||||
|
||||
// NewOutputRelay creates a relay for output audio (device → browser)
|
||||
func NewOutputRelay(client *IPCClient, audioTrack *webrtc.TrackLocalStaticSample) *OutputRelay {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "audio-output-relay").Logger()
|
||||
|
||||
return &OutputRelay{
|
||||
client: client,
|
||||
audioTrack: audioTrack,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
logger: logger,
|
||||
sample: media.Sample{
|
||||
Duration: 20 * time.Millisecond, // Constant for all Opus frames
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Start begins relaying audio frames
|
||||
func (r *OutputRelay) Start() error {
|
||||
if r.running.Swap(true) {
|
||||
return fmt.Errorf("output relay already running")
|
||||
}
|
||||
|
||||
go r.relayLoop()
|
||||
r.logger.Debug().Msg("output relay started")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop stops the relay
|
||||
func (r *OutputRelay) Stop() {
|
||||
if !r.running.Swap(false) {
|
||||
return
|
||||
}
|
||||
|
||||
r.cancel()
|
||||
r.logger.Debug().
|
||||
Uint32("frames_relayed", r.framesRelayed.Load()).
|
||||
Uint32("frames_dropped", r.framesDropped.Load()).
|
||||
Msg("output relay stopped")
|
||||
}
|
||||
|
||||
// relayLoop continuously reads from IPC and writes to WebRTC
|
||||
func (r *OutputRelay) relayLoop() {
|
||||
const reconnectDelay = 1 * time.Second
|
||||
|
||||
for r.running.Load() {
|
||||
// Ensure connected
|
||||
if !r.client.IsConnected() {
|
||||
if err := r.client.Connect(); err != nil {
|
||||
r.logger.Debug().Err(err).Msg("failed to connect, will retry")
|
||||
time.Sleep(reconnectDelay)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Read message from subprocess
|
||||
msgType, payload, err := r.client.ReadMessage()
|
||||
if err != nil {
|
||||
// Connection error - reconnect
|
||||
if r.running.Load() {
|
||||
r.logger.Warn().Err(err).Msg("read error, reconnecting")
|
||||
r.client.Disconnect()
|
||||
time.Sleep(reconnectDelay)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle message
|
||||
if msgType == ipcMsgTypeOpus && len(payload) > 0 {
|
||||
// Reuse sample struct (zero-allocation hot path)
|
||||
r.sample.Data = payload
|
||||
|
||||
if err := r.audioTrack.WriteSample(r.sample); err != nil {
|
||||
r.framesDropped.Add(1)
|
||||
r.logger.Warn().Err(err).Msg("failed to write sample to WebRTC")
|
||||
} else {
|
||||
r.framesRelayed.Add(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// InputRelay forwards audio from WebRTC (browser microphone) to subprocess (USB audio)
|
||||
type InputRelay struct {
|
||||
client *IPCClient
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
logger zerolog.Logger
|
||||
running atomic.Bool
|
||||
}
|
||||
|
||||
// NewInputRelay creates a relay for input audio (browser → device)
|
||||
func NewInputRelay(client *IPCClient) *InputRelay {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "audio-input-relay").Logger()
|
||||
|
||||
return &InputRelay{
|
||||
client: client,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// Start begins relaying audio frames
|
||||
func (r *InputRelay) Start() error {
|
||||
if r.running.Swap(true) {
|
||||
return fmt.Errorf("input relay already running")
|
||||
}
|
||||
|
||||
r.logger.Debug().Msg("input relay started")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop stops the relay
|
||||
func (r *InputRelay) Stop() {
|
||||
if !r.running.Swap(false) {
|
||||
return
|
||||
}
|
||||
|
||||
r.cancel()
|
||||
r.logger.Debug().Msg("input relay stopped")
|
||||
}
|
||||
|
|
@ -1,219 +0,0 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Global relay instance for the main process
|
||||
var (
|
||||
globalRelay *AudioRelay
|
||||
relayMutex sync.RWMutex
|
||||
)
|
||||
|
||||
// StartAudioRelay starts the audio relay system for the main process
|
||||
// This replaces the CGO-based audio system when running in main process mode
|
||||
// audioTrack can be nil initially and updated later via UpdateAudioRelayTrack
|
||||
func StartAudioRelay(audioTrack AudioTrackWriter) error {
|
||||
relayMutex.Lock()
|
||||
defer relayMutex.Unlock()
|
||||
|
||||
if globalRelay != nil {
|
||||
return nil // Already running
|
||||
}
|
||||
|
||||
// Create new relay
|
||||
relay := NewAudioRelay()
|
||||
|
||||
// Retry starting the relay with exponential backoff
|
||||
// This handles cases where the subprocess hasn't created its socket yet
|
||||
maxAttempts := 5
|
||||
baseDelay := 200 * time.Millisecond
|
||||
maxDelay := 2 * time.Second
|
||||
|
||||
var lastErr error
|
||||
for i := 0; i < maxAttempts; i++ {
|
||||
if err := relay.Start(audioTrack); err != nil {
|
||||
lastErr = err
|
||||
if i < maxAttempts-1 {
|
||||
// Calculate exponential backoff delay
|
||||
delay := time.Duration(float64(baseDelay) * (1.5 * float64(i+1)))
|
||||
if delay > maxDelay {
|
||||
delay = maxDelay
|
||||
}
|
||||
time.Sleep(delay)
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("failed to start audio relay after %d attempts: %w", maxAttempts, lastErr)
|
||||
}
|
||||
|
||||
// Success
|
||||
globalRelay = relay
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to start audio relay after %d attempts: %w", maxAttempts, lastErr)
|
||||
}
|
||||
|
||||
// StopAudioRelay stops the audio relay system
|
||||
func StopAudioRelay() {
|
||||
relayMutex.Lock()
|
||||
defer relayMutex.Unlock()
|
||||
|
||||
if globalRelay != nil {
|
||||
globalRelay.Stop()
|
||||
globalRelay = nil
|
||||
}
|
||||
}
|
||||
|
||||
// SetAudioRelayMuted sets the mute state for the audio relay
|
||||
func SetAudioRelayMuted(muted bool) {
|
||||
relayMutex.RLock()
|
||||
defer relayMutex.RUnlock()
|
||||
|
||||
if globalRelay != nil {
|
||||
globalRelay.SetMuted(muted)
|
||||
}
|
||||
}
|
||||
|
||||
// IsAudioRelayMuted returns the current mute state of the audio relay
|
||||
func IsAudioRelayMuted() bool {
|
||||
relayMutex.RLock()
|
||||
defer relayMutex.RUnlock()
|
||||
|
||||
if globalRelay != nil {
|
||||
return globalRelay.IsMuted()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetAudioRelayStats returns statistics from the audio relay
|
||||
func GetAudioRelayStats() (framesRelayed, framesDropped int64) {
|
||||
relayMutex.RLock()
|
||||
defer relayMutex.RUnlock()
|
||||
|
||||
if globalRelay != nil {
|
||||
return globalRelay.GetStats()
|
||||
}
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
// IsAudioRelayRunning returns whether the audio relay is currently running
|
||||
func IsAudioRelayRunning() bool {
|
||||
relayMutex.RLock()
|
||||
defer relayMutex.RUnlock()
|
||||
|
||||
return globalRelay != nil
|
||||
}
|
||||
|
||||
// UpdateAudioRelayTrack updates the WebRTC audio track for the relay
|
||||
// This function is refactored to prevent mutex deadlocks during quality changes
|
||||
func UpdateAudioRelayTrack(audioTrack AudioTrackWriter) error {
|
||||
var needsCallback bool
|
||||
var callbackFunc TrackReplacementCallback
|
||||
|
||||
// Critical section: minimize time holding the mutex
|
||||
relayMutex.Lock()
|
||||
if globalRelay == nil {
|
||||
// No relay running, start one with the provided track
|
||||
relay := NewAudioRelay()
|
||||
if err := relay.Start(audioTrack); err != nil {
|
||||
relayMutex.Unlock()
|
||||
return err
|
||||
}
|
||||
globalRelay = relay
|
||||
} else {
|
||||
// Update the track in the existing relay
|
||||
globalRelay.UpdateTrack(audioTrack)
|
||||
}
|
||||
|
||||
// Capture callback state while holding mutex
|
||||
needsCallback = trackReplacementCallback != nil
|
||||
if needsCallback {
|
||||
callbackFunc = trackReplacementCallback
|
||||
}
|
||||
relayMutex.Unlock()
|
||||
|
||||
// Execute callback outside of mutex to prevent deadlock
|
||||
if needsCallback && callbackFunc != nil {
|
||||
// Use goroutine with timeout to prevent blocking
|
||||
done := make(chan error, 1)
|
||||
go func() {
|
||||
done <- callbackFunc(audioTrack)
|
||||
}()
|
||||
|
||||
// Wait for callback with timeout
|
||||
select {
|
||||
case err := <-done:
|
||||
if err != nil {
|
||||
// Log error but don't fail the relay operation
|
||||
// The relay can still work even if WebRTC track replacement fails
|
||||
_ = err // Suppress linter warning
|
||||
}
|
||||
case <-time.After(5 * time.Second):
|
||||
// Timeout: log warning but continue
|
||||
// This prevents indefinite blocking during quality changes
|
||||
_ = fmt.Errorf("track replacement callback timed out")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CurrentSessionCallback is a function type for getting the current session's audio track
|
||||
type CurrentSessionCallback func() AudioTrackWriter
|
||||
|
||||
// TrackReplacementCallback is a function type for replacing the WebRTC audio track
|
||||
type TrackReplacementCallback func(AudioTrackWriter) error
|
||||
|
||||
// currentSessionCallback holds the callback function to get the current session's audio track
|
||||
var currentSessionCallback CurrentSessionCallback
|
||||
|
||||
// trackReplacementCallback holds the callback function to replace the WebRTC audio track
|
||||
var trackReplacementCallback TrackReplacementCallback
|
||||
|
||||
// SetCurrentSessionCallback sets the callback function to get the current session's audio track
|
||||
func SetCurrentSessionCallback(callback CurrentSessionCallback) {
|
||||
currentSessionCallback = callback
|
||||
}
|
||||
|
||||
// SetTrackReplacementCallback sets the callback function to replace the WebRTC audio track
|
||||
func SetTrackReplacementCallback(callback TrackReplacementCallback) {
|
||||
trackReplacementCallback = callback
|
||||
}
|
||||
|
||||
// UpdateAudioRelayTrackAsync performs async track update to prevent blocking
|
||||
// This is used during WebRTC session creation to avoid deadlocks
|
||||
func UpdateAudioRelayTrackAsync(audioTrack AudioTrackWriter) {
|
||||
go func() {
|
||||
if err := UpdateAudioRelayTrack(audioTrack); err != nil {
|
||||
// Log error but don't block session creation
|
||||
_ = err // Suppress linter warning
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// connectRelayToCurrentSession connects the audio relay to the current WebRTC session's audio track
|
||||
// This is used when restarting the relay during unmute operations
|
||||
func connectRelayToCurrentSession() error {
|
||||
if currentSessionCallback == nil {
|
||||
return errors.New("no current session callback set")
|
||||
}
|
||||
|
||||
track := currentSessionCallback()
|
||||
if track == nil {
|
||||
return errors.New("no current session audio track available")
|
||||
}
|
||||
|
||||
relayMutex.Lock()
|
||||
defer relayMutex.Unlock()
|
||||
|
||||
if globalRelay != nil {
|
||||
globalRelay.UpdateTrack(track)
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.New("no global relay running")
|
||||
}
|
||||
|
|
@ -1,102 +0,0 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// RPC wrapper functions for audio control
|
||||
// These functions bridge the RPC layer to the AudioControlService
|
||||
|
||||
// This variable will be set by the main package to provide access to the global service
|
||||
var (
|
||||
getAudioControlServiceFunc func() *AudioControlService
|
||||
)
|
||||
|
||||
// SetRPCCallbacks sets the callback function for RPC operations
|
||||
func SetRPCCallbacks(getService func() *AudioControlService) {
|
||||
getAudioControlServiceFunc = getService
|
||||
}
|
||||
|
||||
// RPCAudioMute handles audio mute/unmute RPC requests
|
||||
func RPCAudioMute(muted bool) error {
|
||||
if getAudioControlServiceFunc == nil {
|
||||
return fmt.Errorf("audio control service not available")
|
||||
}
|
||||
service := getAudioControlServiceFunc()
|
||||
if service == nil {
|
||||
return fmt.Errorf("audio control service not initialized")
|
||||
}
|
||||
return service.MuteAudio(muted)
|
||||
}
|
||||
|
||||
// RPCMicrophoneStart handles microphone start RPC requests
|
||||
func RPCMicrophoneStart() error {
|
||||
if getAudioControlServiceFunc == nil {
|
||||
return fmt.Errorf("audio control service not available")
|
||||
}
|
||||
service := getAudioControlServiceFunc()
|
||||
if service == nil {
|
||||
return fmt.Errorf("audio control service not initialized")
|
||||
}
|
||||
return service.StartMicrophone()
|
||||
}
|
||||
|
||||
// RPCMicrophoneStop handles microphone stop RPC requests
|
||||
func RPCMicrophoneStop() error {
|
||||
if getAudioControlServiceFunc == nil {
|
||||
return fmt.Errorf("audio control service not available")
|
||||
}
|
||||
service := getAudioControlServiceFunc()
|
||||
if service == nil {
|
||||
return fmt.Errorf("audio control service not initialized")
|
||||
}
|
||||
return service.StopMicrophone()
|
||||
}
|
||||
|
||||
// RPCAudioStatus handles audio status RPC requests (read-only)
|
||||
func RPCAudioStatus() (map[string]interface{}, error) {
|
||||
if getAudioControlServiceFunc == nil {
|
||||
return nil, fmt.Errorf("audio control service not available")
|
||||
}
|
||||
service := getAudioControlServiceFunc()
|
||||
if service == nil {
|
||||
return nil, fmt.Errorf("audio control service not initialized")
|
||||
}
|
||||
return service.GetAudioStatus(), nil
|
||||
}
|
||||
|
||||
// RPCMicrophoneStatus handles microphone status RPC requests (read-only)
|
||||
func RPCMicrophoneStatus() (map[string]interface{}, error) {
|
||||
if getAudioControlServiceFunc == nil {
|
||||
return nil, fmt.Errorf("audio control service not available")
|
||||
}
|
||||
service := getAudioControlServiceFunc()
|
||||
if service == nil {
|
||||
return nil, fmt.Errorf("audio control service not initialized")
|
||||
}
|
||||
return service.GetMicrophoneStatus(), nil
|
||||
}
|
||||
|
||||
// RPCMicrophoneReset handles microphone reset RPC requests
|
||||
func RPCMicrophoneReset() error {
|
||||
if getAudioControlServiceFunc == nil {
|
||||
return fmt.Errorf("audio control service not available")
|
||||
}
|
||||
service := getAudioControlServiceFunc()
|
||||
if service == nil {
|
||||
return fmt.Errorf("audio control service not initialized")
|
||||
}
|
||||
return service.ResetMicrophone()
|
||||
}
|
||||
|
||||
// RPCMicrophoneMute handles microphone mute RPC requests
|
||||
func RPCMicrophoneMute(muted bool) error {
|
||||
if getAudioControlServiceFunc == nil {
|
||||
return fmt.Errorf("audio control service not available")
|
||||
}
|
||||
service := getAudioControlServiceFunc()
|
||||
if service == nil {
|
||||
return fmt.Errorf("audio control service not initialized")
|
||||
}
|
||||
return service.MuteMicrophone(muted)
|
||||
}
|
||||
|
|
@ -1,30 +0,0 @@
|
|||
package audio
|
||||
|
||||
// SessionProvider interface abstracts session management for audio events
|
||||
type SessionProvider interface {
|
||||
IsSessionActive() bool
|
||||
GetAudioInputManager() *AudioInputManager
|
||||
}
|
||||
|
||||
// DefaultSessionProvider is a no-op implementation
|
||||
type DefaultSessionProvider struct{}
|
||||
|
||||
func (d *DefaultSessionProvider) IsSessionActive() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (d *DefaultSessionProvider) GetAudioInputManager() *AudioInputManager {
|
||||
return nil
|
||||
}
|
||||
|
||||
var sessionProvider SessionProvider = &DefaultSessionProvider{}
|
||||
|
||||
// SetSessionProvider allows the main package to inject session management
|
||||
func SetSessionProvider(provider SessionProvider) {
|
||||
sessionProvider = provider
|
||||
}
|
||||
|
||||
// GetSessionProvider returns the current session provider
|
||||
func GetSessionProvider() SessionProvider {
|
||||
return sessionProvider
|
||||
}
|
||||
|
|
@ -0,0 +1,187 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/jetkvm/kvm/internal/logging"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// Supervisor manages a subprocess lifecycle with automatic restart
|
||||
type Supervisor struct {
|
||||
name string
|
||||
binaryPath string
|
||||
socketPath string
|
||||
env []string
|
||||
|
||||
cmd *exec.Cmd
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
running atomic.Bool
|
||||
done chan struct{} // Closed when supervision loop exits
|
||||
logger zerolog.Logger
|
||||
|
||||
// Restart state
|
||||
restartCount uint8
|
||||
lastRestartAt time.Time
|
||||
restartBackoff time.Duration
|
||||
}
|
||||
|
||||
const (
|
||||
minRestartDelay = 1 * time.Second
|
||||
maxRestartDelay = 30 * time.Second
|
||||
restartWindow = 5 * time.Minute // Reset backoff if process runs this long
|
||||
)
|
||||
|
||||
// NewSupervisor creates a new subprocess supervisor
|
||||
func NewSupervisor(name, binaryPath, socketPath string, env []string) *Supervisor {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
logger := logging.GetDefaultLogger().With().Str("component", name).Logger()
|
||||
|
||||
return &Supervisor{
|
||||
name: name,
|
||||
binaryPath: binaryPath,
|
||||
socketPath: socketPath,
|
||||
env: env,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
done: make(chan struct{}),
|
||||
logger: logger,
|
||||
restartBackoff: minRestartDelay,
|
||||
}
|
||||
}
|
||||
|
||||
// Start begins supervising the subprocess
|
||||
func (s *Supervisor) Start() error {
|
||||
if s.running.Load() {
|
||||
return fmt.Errorf("%s: already running", s.name)
|
||||
}
|
||||
|
||||
s.running.Store(true)
|
||||
go s.supervisionLoop()
|
||||
s.logger.Debug().Msg("supervisor started")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop gracefully stops the subprocess
|
||||
func (s *Supervisor) Stop() {
|
||||
if !s.running.Swap(false) {
|
||||
return // Already stopped
|
||||
}
|
||||
|
||||
s.logger.Debug().Msg("stopping supervisor")
|
||||
s.cancel()
|
||||
|
||||
// Kill process if running
|
||||
if s.cmd != nil && s.cmd.Process != nil {
|
||||
s.cmd.Process.Kill()
|
||||
}
|
||||
|
||||
// Wait for supervision loop to exit
|
||||
<-s.done
|
||||
|
||||
// Clean up socket file
|
||||
os.Remove(s.socketPath)
|
||||
s.logger.Debug().Msg("supervisor stopped")
|
||||
}
|
||||
|
||||
// supervisionLoop manages the subprocess lifecycle
|
||||
func (s *Supervisor) supervisionLoop() {
|
||||
defer close(s.done)
|
||||
|
||||
for s.running.Load() {
|
||||
// Check if we should reset backoff (process ran long enough)
|
||||
if !s.lastRestartAt.IsZero() && time.Since(s.lastRestartAt) > restartWindow {
|
||||
s.restartCount = 0
|
||||
s.restartBackoff = minRestartDelay
|
||||
s.logger.Debug().Msg("reset restart backoff after stable run")
|
||||
}
|
||||
|
||||
// Start the process
|
||||
if err := s.startProcess(); err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to start process")
|
||||
} else {
|
||||
// Wait for process to exit
|
||||
err := s.cmd.Wait()
|
||||
|
||||
if s.running.Load() {
|
||||
// Process crashed (not intentional shutdown)
|
||||
s.logger.Warn().
|
||||
Err(err).
|
||||
Uint8("restart_count", s.restartCount).
|
||||
Dur("backoff", s.restartBackoff).
|
||||
Msg("process exited unexpectedly, will restart")
|
||||
|
||||
s.restartCount++
|
||||
s.lastRestartAt = time.Now()
|
||||
|
||||
// Calculate next backoff (exponential: 1s, 2s, 4s, 8s, 16s, 30s)
|
||||
s.restartBackoff *= 2
|
||||
if s.restartBackoff > maxRestartDelay {
|
||||
s.restartBackoff = maxRestartDelay
|
||||
}
|
||||
|
||||
// Wait before restart
|
||||
select {
|
||||
case <-time.After(s.restartBackoff):
|
||||
// Continue to next iteration
|
||||
case <-s.ctx.Done():
|
||||
return // Shutting down
|
||||
}
|
||||
} else {
|
||||
// Intentional shutdown
|
||||
s.logger.Debug().Msg("process exited cleanly")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// logPipe reads from a pipe and logs each line at debug level
|
||||
func (s *Supervisor) logPipe(reader io.ReadCloser, stream string) {
|
||||
scanner := bufio.NewScanner(reader)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
s.logger.Debug().Str("stream", stream).Msg(line)
|
||||
}
|
||||
reader.Close()
|
||||
}
|
||||
|
||||
// startProcess starts the subprocess
|
||||
func (s *Supervisor) startProcess() error {
|
||||
s.cmd = exec.CommandContext(s.ctx, s.binaryPath)
|
||||
s.cmd.Env = append(os.Environ(), s.env...)
|
||||
|
||||
// Create pipes for subprocess output
|
||||
stdout, err := s.cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create stdout pipe: %w", err)
|
||||
}
|
||||
stderr, err := s.cmd.StderrPipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create stderr pipe: %w", err)
|
||||
}
|
||||
|
||||
if err := s.cmd.Start(); err != nil {
|
||||
return fmt.Errorf("failed to start %s: %w", s.name, err)
|
||||
}
|
||||
|
||||
// Start goroutines to log subprocess output at debug level
|
||||
go s.logPipe(stdout, "stdout")
|
||||
go s.logPipe(stderr, "stderr")
|
||||
|
||||
s.logger.Debug().
|
||||
Int("pid", s.cmd.Process.Pid).
|
||||
Str("binary", s.binaryPath).
|
||||
Strs("custom_env", s.env).
|
||||
Msg("process started")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,39 +0,0 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
globalOutputSupervisor unsafe.Pointer // *AudioOutputSupervisor
|
||||
globalInputSupervisor unsafe.Pointer // *AudioInputSupervisor
|
||||
)
|
||||
|
||||
// SetAudioOutputSupervisor sets the global audio output supervisor
|
||||
func SetAudioOutputSupervisor(supervisor *AudioOutputSupervisor) {
|
||||
atomic.StorePointer(&globalOutputSupervisor, unsafe.Pointer(supervisor))
|
||||
}
|
||||
|
||||
// GetAudioOutputSupervisor returns the global audio output supervisor
|
||||
func GetAudioOutputSupervisor() *AudioOutputSupervisor {
|
||||
ptr := atomic.LoadPointer(&globalOutputSupervisor)
|
||||
if ptr == nil {
|
||||
return nil
|
||||
}
|
||||
return (*AudioOutputSupervisor)(ptr)
|
||||
}
|
||||
|
||||
// SetAudioInputSupervisor sets the global audio input supervisor
|
||||
func SetAudioInputSupervisor(supervisor *AudioInputSupervisor) {
|
||||
atomic.StorePointer(&globalInputSupervisor, unsafe.Pointer(supervisor))
|
||||
}
|
||||
|
||||
// GetAudioInputSupervisor returns the global audio input supervisor
|
||||
func GetAudioInputSupervisor() *AudioInputSupervisor {
|
||||
ptr := atomic.LoadPointer(&globalInputSupervisor)
|
||||
if ptr == nil {
|
||||
return nil
|
||||
}
|
||||
return (*AudioInputSupervisor)(ptr)
|
||||
}
|
||||
|
|
@ -1,141 +0,0 @@
|
|||
//go:build cgo
|
||||
// +build cgo
|
||||
|
||||
package audio
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// AudioBufferPool provides a simple buffer pool for audio processing
|
||||
type AudioBufferPool struct {
|
||||
// Atomic counters
|
||||
hitCount int64 // Pool hit counter (atomic)
|
||||
missCount int64 // Pool miss counter (atomic)
|
||||
|
||||
// Pool configuration
|
||||
bufferSize int
|
||||
pool chan []byte
|
||||
maxSize int
|
||||
}
|
||||
|
||||
// NewAudioBufferPool creates a new simple audio buffer pool
|
||||
func NewAudioBufferPool(bufferSize int) *AudioBufferPool {
|
||||
maxSize := Config.MaxPoolSize
|
||||
if maxSize <= 0 {
|
||||
maxSize = Config.BufferPoolDefaultSize
|
||||
}
|
||||
|
||||
pool := &AudioBufferPool{
|
||||
bufferSize: bufferSize,
|
||||
pool: make(chan []byte, maxSize),
|
||||
maxSize: maxSize,
|
||||
}
|
||||
|
||||
// Pre-populate the pool
|
||||
for i := 0; i < maxSize/2; i++ {
|
||||
buf := make([]byte, bufferSize)
|
||||
select {
|
||||
case pool.pool <- buf:
|
||||
default:
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return pool
|
||||
}
|
||||
|
||||
// Get retrieves a buffer from the pool
|
||||
func (p *AudioBufferPool) Get() []byte {
|
||||
select {
|
||||
case buf := <-p.pool:
|
||||
atomic.AddInt64(&p.hitCount, 1)
|
||||
return buf[:0] // Reset length but keep capacity
|
||||
default:
|
||||
atomic.AddInt64(&p.missCount, 1)
|
||||
return make([]byte, 0, p.bufferSize)
|
||||
}
|
||||
}
|
||||
|
||||
// Put returns a buffer to the pool
|
||||
func (p *AudioBufferPool) Put(buf []byte) {
|
||||
if buf == nil || cap(buf) != p.bufferSize {
|
||||
return // Invalid buffer
|
||||
}
|
||||
|
||||
// Reset the buffer
|
||||
buf = buf[:0]
|
||||
|
||||
// Try to return to pool
|
||||
select {
|
||||
case p.pool <- buf:
|
||||
// Successfully returned to pool
|
||||
default:
|
||||
// Pool is full, discard buffer
|
||||
}
|
||||
}
|
||||
|
||||
// GetStats returns pool statistics
|
||||
func (p *AudioBufferPool) GetStats() AudioBufferPoolStats {
|
||||
hitCount := atomic.LoadInt64(&p.hitCount)
|
||||
missCount := atomic.LoadInt64(&p.missCount)
|
||||
totalRequests := hitCount + missCount
|
||||
|
||||
var hitRate float64
|
||||
if totalRequests > 0 {
|
||||
hitRate = float64(hitCount) / float64(totalRequests) * Config.BufferPoolHitRateBase
|
||||
}
|
||||
|
||||
return AudioBufferPoolStats{
|
||||
BufferSize: p.bufferSize,
|
||||
MaxPoolSize: p.maxSize,
|
||||
CurrentSize: int64(len(p.pool)),
|
||||
HitCount: hitCount,
|
||||
MissCount: missCount,
|
||||
HitRate: hitRate,
|
||||
}
|
||||
}
|
||||
|
||||
// AudioBufferPoolStats represents pool statistics
|
||||
type AudioBufferPoolStats struct {
|
||||
BufferSize int
|
||||
MaxPoolSize int
|
||||
CurrentSize int64
|
||||
HitCount int64
|
||||
MissCount int64
|
||||
HitRate float64
|
||||
}
|
||||
|
||||
// Global buffer pools
|
||||
var (
|
||||
audioFramePool = NewAudioBufferPool(Config.AudioFramePoolSize)
|
||||
audioControlPool = NewAudioBufferPool(Config.BufferPoolControlSize)
|
||||
)
|
||||
|
||||
// GetAudioFrameBuffer gets a buffer for audio frames
|
||||
func GetAudioFrameBuffer() []byte {
|
||||
return audioFramePool.Get()
|
||||
}
|
||||
|
||||
// PutAudioFrameBuffer returns a buffer to the frame pool
|
||||
func PutAudioFrameBuffer(buf []byte) {
|
||||
audioFramePool.Put(buf)
|
||||
}
|
||||
|
||||
// GetAudioControlBuffer gets a buffer for control messages
|
||||
func GetAudioControlBuffer() []byte {
|
||||
return audioControlPool.Get()
|
||||
}
|
||||
|
||||
// PutAudioControlBuffer returns a buffer to the control pool
|
||||
func PutAudioControlBuffer(buf []byte) {
|
||||
audioControlPool.Put(buf)
|
||||
}
|
||||
|
||||
// GetAudioBufferPoolStats returns statistics for all pools
|
||||
func GetAudioBufferPoolStats() map[string]AudioBufferPoolStats {
|
||||
return map[string]AudioBufferPoolStats{
|
||||
"frame_pool": audioFramePool.GetStats(),
|
||||
"control_pool": audioControlPool.GetStats(),
|
||||
}
|
||||
}
|
||||
|
|
@ -1,247 +0,0 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/jetkvm/kvm/internal/logging"
|
||||
"github.com/pion/webrtc/v4/pkg/media"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// AudioRelay handles forwarding audio frames from the audio server subprocess
|
||||
// to WebRTC without any CGO audio processing. This runs in the main process.
|
||||
type AudioRelay struct {
|
||||
// Atomic fields MUST be first for ARM32 alignment (int64 fields need 8-byte alignment)
|
||||
framesRelayed int64
|
||||
framesDropped int64
|
||||
|
||||
client *AudioOutputClient
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
wg sync.WaitGroup
|
||||
logger *zerolog.Logger
|
||||
running bool
|
||||
mutex sync.RWMutex
|
||||
bufferPool *AudioBufferPool // Buffer pool for memory optimization
|
||||
|
||||
// WebRTC integration
|
||||
audioTrack AudioTrackWriter
|
||||
muted bool
|
||||
}
|
||||
|
||||
// AudioTrackWriter interface for WebRTC audio track
|
||||
type AudioTrackWriter interface {
|
||||
WriteSample(sample media.Sample) error
|
||||
}
|
||||
|
||||
// NewAudioRelay creates a new audio relay for the main process
|
||||
func NewAudioRelay() *AudioRelay {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
logger := logging.GetDefaultLogger().With().Str("component", "audio-relay").Logger()
|
||||
|
||||
return &AudioRelay{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
logger: &logger,
|
||||
bufferPool: NewAudioBufferPool(Config.MaxAudioFrameSize),
|
||||
}
|
||||
}
|
||||
|
||||
// Start begins the audio relay process
|
||||
func (r *AudioRelay) Start(audioTrack AudioTrackWriter) error {
|
||||
r.mutex.Lock()
|
||||
defer r.mutex.Unlock()
|
||||
|
||||
if r.running {
|
||||
return nil // Already running
|
||||
}
|
||||
|
||||
// Create audio client to connect to subprocess
|
||||
client := NewAudioOutputClient()
|
||||
r.client = client
|
||||
r.audioTrack = audioTrack
|
||||
|
||||
// Connect to the audio output server
|
||||
if err := client.Connect(); err != nil {
|
||||
return fmt.Errorf("failed to connect to audio output server: %w", err)
|
||||
}
|
||||
|
||||
// Start relay goroutine
|
||||
r.wg.Add(1)
|
||||
go r.relayLoop()
|
||||
|
||||
r.running = true
|
||||
r.logger.Info().Msg("Audio relay connected to output server")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop stops the audio relay
|
||||
func (r *AudioRelay) Stop() {
|
||||
r.mutex.Lock()
|
||||
defer r.mutex.Unlock()
|
||||
|
||||
if !r.running {
|
||||
return
|
||||
}
|
||||
|
||||
r.cancel()
|
||||
r.wg.Wait()
|
||||
|
||||
if r.client != nil {
|
||||
r.client.Disconnect()
|
||||
r.client = nil
|
||||
}
|
||||
|
||||
r.running = false
|
||||
r.logger.Info().Msgf("Audio relay stopped after relaying %d frames", r.framesRelayed)
|
||||
}
|
||||
|
||||
// SetMuted sets the mute state
|
||||
func (r *AudioRelay) SetMuted(muted bool) {
|
||||
r.mutex.Lock()
|
||||
defer r.mutex.Unlock()
|
||||
r.muted = muted
|
||||
}
|
||||
|
||||
// IsMuted returns the current mute state (checks both relay and global mute)
|
||||
func (r *AudioRelay) IsMuted() bool {
|
||||
r.mutex.RLock()
|
||||
defer r.mutex.RUnlock()
|
||||
return r.muted || IsAudioMuted()
|
||||
}
|
||||
|
||||
// GetStats returns relay statistics
|
||||
func (r *AudioRelay) GetStats() (framesRelayed, framesDropped int64) {
|
||||
return atomic.LoadInt64(&r.framesRelayed), atomic.LoadInt64(&r.framesDropped)
|
||||
}
|
||||
|
||||
// UpdateTrack updates the WebRTC audio track for the relay
|
||||
func (r *AudioRelay) UpdateTrack(audioTrack AudioTrackWriter) {
|
||||
r.mutex.Lock()
|
||||
defer r.mutex.Unlock()
|
||||
r.audioTrack = audioTrack
|
||||
}
|
||||
|
||||
func (r *AudioRelay) relayLoop() {
|
||||
defer r.wg.Done()
|
||||
|
||||
var maxConsecutiveErrors = Config.MaxConsecutiveErrors
|
||||
consecutiveErrors := 0
|
||||
backoffDelay := time.Millisecond * 10
|
||||
maxBackoff := time.Second * 5
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-r.ctx.Done():
|
||||
return
|
||||
default:
|
||||
frame, err := r.client.ReceiveFrame()
|
||||
if err != nil {
|
||||
consecutiveErrors++
|
||||
r.incrementDropped()
|
||||
|
||||
// Exponential backoff for stability
|
||||
if consecutiveErrors >= maxConsecutiveErrors {
|
||||
// Attempt reconnection
|
||||
if r.attemptReconnection() {
|
||||
consecutiveErrors = 0
|
||||
backoffDelay = time.Millisecond * 10
|
||||
continue
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
time.Sleep(backoffDelay)
|
||||
if backoffDelay < maxBackoff {
|
||||
backoffDelay *= 2
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
consecutiveErrors = 0
|
||||
backoffDelay = time.Millisecond * 10
|
||||
if err := r.forwardToWebRTC(frame); err != nil {
|
||||
r.incrementDropped()
|
||||
} else {
|
||||
r.incrementRelayed()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// forwardToWebRTC forwards a frame to the WebRTC audio track
|
||||
func (r *AudioRelay) forwardToWebRTC(frame []byte) error {
|
||||
// Use ultra-fast validation for critical audio path
|
||||
if err := ValidateAudioFrame(frame); err != nil {
|
||||
r.incrementDropped()
|
||||
r.logger.Debug().Err(err).Msg("invalid frame data in relay")
|
||||
return err
|
||||
}
|
||||
|
||||
r.mutex.RLock()
|
||||
defer r.mutex.RUnlock()
|
||||
|
||||
audioTrack := r.audioTrack
|
||||
muted := r.muted
|
||||
|
||||
// Comprehensive nil check for audioTrack to prevent panic
|
||||
if audioTrack == nil {
|
||||
return nil // No audio track available
|
||||
}
|
||||
|
||||
// Check if interface contains nil pointer using reflection
|
||||
if reflect.ValueOf(audioTrack).IsNil() {
|
||||
return nil // Audio track interface contains nil pointer
|
||||
}
|
||||
|
||||
// Prepare sample data
|
||||
var sampleData []byte
|
||||
if muted {
|
||||
// Send silence when muted - use buffer pool to avoid allocation
|
||||
sampleData = r.bufferPool.Get()
|
||||
sampleData = sampleData[:len(frame)] // Resize to frame length
|
||||
// Clear the buffer to create silence
|
||||
for i := range sampleData {
|
||||
sampleData[i] = 0
|
||||
}
|
||||
defer r.bufferPool.Put(sampleData) // Return to pool after use
|
||||
} else {
|
||||
sampleData = frame
|
||||
}
|
||||
|
||||
// Write sample to WebRTC track while holding the read lock
|
||||
// Frame size is fixed at 20ms for HDMI audio
|
||||
return audioTrack.WriteSample(media.Sample{
|
||||
Data: sampleData,
|
||||
Duration: 20 * time.Millisecond,
|
||||
})
|
||||
}
|
||||
|
||||
// incrementRelayed atomically increments the relayed frames counter
|
||||
func (r *AudioRelay) incrementRelayed() {
|
||||
atomic.AddInt64(&r.framesRelayed, 1)
|
||||
}
|
||||
|
||||
// incrementDropped atomically increments the dropped frames counter
|
||||
func (r *AudioRelay) incrementDropped() {
|
||||
atomic.AddInt64(&r.framesDropped, 1)
|
||||
}
|
||||
|
||||
// attemptReconnection tries to reconnect the audio client for stability
|
||||
func (r *AudioRelay) attemptReconnection() bool {
|
||||
if r.client == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Disconnect and reconnect
|
||||
r.client.Disconnect()
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
|
||||
err := r.client.Connect()
|
||||
return err == nil
|
||||
}
|
||||
|
|
@ -1,244 +0,0 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/coder/websocket"
|
||||
"github.com/coder/websocket/wsjson"
|
||||
"github.com/jetkvm/kvm/internal/logging"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// AudioEventType represents different types of audio events
|
||||
type AudioEventType string
|
||||
|
||||
const (
|
||||
AudioEventMuteChanged AudioEventType = "audio-mute-changed"
|
||||
AudioEventMicrophoneState AudioEventType = "microphone-state-changed"
|
||||
AudioEventDeviceChanged AudioEventType = "audio-device-changed"
|
||||
)
|
||||
|
||||
// AudioEvent represents a WebSocket audio event
|
||||
type AudioEvent struct {
|
||||
Type AudioEventType `json:"type"`
|
||||
Data interface{} `json:"data"`
|
||||
}
|
||||
|
||||
// AudioMuteData represents audio mute state change data
|
||||
type AudioMuteData struct {
|
||||
Muted bool `json:"muted"`
|
||||
}
|
||||
|
||||
// MicrophoneStateData represents microphone state data
|
||||
type MicrophoneStateData struct {
|
||||
Running bool `json:"running"`
|
||||
SessionActive bool `json:"session_active"`
|
||||
}
|
||||
|
||||
// AudioDeviceChangedData represents audio device configuration change data
|
||||
type AudioDeviceChangedData struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
Reason string `json:"reason"`
|
||||
}
|
||||
|
||||
// AudioEventSubscriber represents a WebSocket connection subscribed to audio events
|
||||
type AudioEventSubscriber struct {
|
||||
conn *websocket.Conn
|
||||
ctx context.Context
|
||||
logger *zerolog.Logger
|
||||
}
|
||||
|
||||
// AudioEventBroadcaster manages audio event subscriptions and broadcasting
|
||||
type AudioEventBroadcaster struct {
|
||||
subscribers map[string]*AudioEventSubscriber
|
||||
mutex sync.RWMutex
|
||||
logger *zerolog.Logger
|
||||
}
|
||||
|
||||
var (
|
||||
audioEventBroadcaster *AudioEventBroadcaster
|
||||
audioEventOnce sync.Once
|
||||
)
|
||||
|
||||
// initializeBroadcaster creates and initializes the audio event broadcaster
|
||||
func initializeBroadcaster() {
|
||||
l := logging.GetDefaultLogger().With().Str("component", "audio-events").Logger()
|
||||
audioEventBroadcaster = &AudioEventBroadcaster{
|
||||
subscribers: make(map[string]*AudioEventSubscriber),
|
||||
logger: &l,
|
||||
}
|
||||
}
|
||||
|
||||
// InitializeAudioEventBroadcaster initializes the global audio event broadcaster
|
||||
func InitializeAudioEventBroadcaster() {
|
||||
audioEventOnce.Do(initializeBroadcaster)
|
||||
}
|
||||
|
||||
// GetAudioEventBroadcaster returns the singleton audio event broadcaster
|
||||
func GetAudioEventBroadcaster() *AudioEventBroadcaster {
|
||||
audioEventOnce.Do(initializeBroadcaster)
|
||||
return audioEventBroadcaster
|
||||
}
|
||||
|
||||
// Subscribe adds a WebSocket connection to receive audio events
|
||||
func (aeb *AudioEventBroadcaster) Subscribe(connectionID string, conn *websocket.Conn, ctx context.Context, logger *zerolog.Logger) {
|
||||
aeb.mutex.Lock()
|
||||
defer aeb.mutex.Unlock()
|
||||
|
||||
// Check if there's already a subscription for this connectionID
|
||||
if _, exists := aeb.subscribers[connectionID]; exists {
|
||||
aeb.logger.Debug().Str("connectionID", connectionID).Msg("duplicate audio events subscription detected; replacing existing entry")
|
||||
// Do NOT close the existing WebSocket connection here because it's shared
|
||||
// with the signaling channel. Just replace the subscriber map entry.
|
||||
delete(aeb.subscribers, connectionID)
|
||||
}
|
||||
|
||||
aeb.subscribers[connectionID] = &AudioEventSubscriber{
|
||||
conn: conn,
|
||||
ctx: ctx,
|
||||
logger: logger,
|
||||
}
|
||||
|
||||
aeb.logger.Debug().Str("connectionID", connectionID).Msg("audio events subscription added")
|
||||
|
||||
// Send initial state to new subscriber
|
||||
go aeb.sendInitialState(connectionID)
|
||||
}
|
||||
|
||||
// Unsubscribe removes a WebSocket connection from audio events
|
||||
func (aeb *AudioEventBroadcaster) Unsubscribe(connectionID string) {
|
||||
aeb.mutex.Lock()
|
||||
defer aeb.mutex.Unlock()
|
||||
|
||||
delete(aeb.subscribers, connectionID)
|
||||
aeb.logger.Debug().Str("connectionID", connectionID).Msg("audio events subscription removed")
|
||||
}
|
||||
|
||||
// BroadcastAudioMuteChanged broadcasts audio mute state changes
|
||||
func (aeb *AudioEventBroadcaster) BroadcastAudioMuteChanged(muted bool) {
|
||||
event := createAudioEvent(AudioEventMuteChanged, AudioMuteData{Muted: muted})
|
||||
aeb.broadcast(event)
|
||||
}
|
||||
|
||||
// BroadcastMicrophoneStateChanged broadcasts microphone state changes
|
||||
func (aeb *AudioEventBroadcaster) BroadcastMicrophoneStateChanged(running, sessionActive bool) {
|
||||
event := createAudioEvent(AudioEventMicrophoneState, MicrophoneStateData{
|
||||
Running: running,
|
||||
SessionActive: sessionActive,
|
||||
})
|
||||
aeb.broadcast(event)
|
||||
}
|
||||
|
||||
// BroadcastAudioDeviceChanged broadcasts audio device configuration changes
|
||||
func (aeb *AudioEventBroadcaster) BroadcastAudioDeviceChanged(enabled bool, reason string) {
|
||||
event := createAudioEvent(AudioEventDeviceChanged, AudioDeviceChangedData{
|
||||
Enabled: enabled,
|
||||
Reason: reason,
|
||||
})
|
||||
aeb.broadcast(event)
|
||||
}
|
||||
|
||||
// sendInitialState sends current audio state to a new subscriber
|
||||
func (aeb *AudioEventBroadcaster) sendInitialState(connectionID string) {
|
||||
aeb.mutex.RLock()
|
||||
subscriber, exists := aeb.subscribers[connectionID]
|
||||
aeb.mutex.RUnlock()
|
||||
|
||||
if !exists {
|
||||
return
|
||||
}
|
||||
|
||||
// Send current audio mute state
|
||||
muteEvent := AudioEvent{
|
||||
Type: AudioEventMuteChanged,
|
||||
Data: AudioMuteData{Muted: IsAudioMuted()},
|
||||
}
|
||||
aeb.sendToSubscriber(subscriber, muteEvent)
|
||||
|
||||
// Send current microphone state using session provider
|
||||
sessionProvider := GetSessionProvider()
|
||||
sessionActive := sessionProvider.IsSessionActive()
|
||||
var running bool
|
||||
if sessionActive {
|
||||
if inputManager := sessionProvider.GetAudioInputManager(); inputManager != nil {
|
||||
running = inputManager.IsRunning()
|
||||
}
|
||||
}
|
||||
|
||||
micStateEvent := AudioEvent{
|
||||
Type: AudioEventMicrophoneState,
|
||||
Data: MicrophoneStateData{
|
||||
Running: running,
|
||||
SessionActive: sessionActive,
|
||||
},
|
||||
}
|
||||
aeb.sendToSubscriber(subscriber, micStateEvent)
|
||||
}
|
||||
|
||||
// createAudioEvent creates an AudioEvent
|
||||
func createAudioEvent(eventType AudioEventType, data interface{}) AudioEvent {
|
||||
return AudioEvent{
|
||||
Type: eventType,
|
||||
Data: data,
|
||||
}
|
||||
}
|
||||
|
||||
// broadcast sends an event to all subscribers
|
||||
func (aeb *AudioEventBroadcaster) broadcast(event AudioEvent) {
|
||||
aeb.mutex.RLock()
|
||||
// Create a copy of subscribers to avoid holding the lock during sending
|
||||
subscribersCopy := make(map[string]*AudioEventSubscriber)
|
||||
for id, sub := range aeb.subscribers {
|
||||
subscribersCopy[id] = sub
|
||||
}
|
||||
aeb.mutex.RUnlock()
|
||||
|
||||
// Track failed subscribers to remove them after sending
|
||||
var failedSubscribers []string
|
||||
|
||||
// Send to all subscribers without holding the lock
|
||||
for connectionID, subscriber := range subscribersCopy {
|
||||
if !aeb.sendToSubscriber(subscriber, event) {
|
||||
failedSubscribers = append(failedSubscribers, connectionID)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove failed subscribers if any
|
||||
if len(failedSubscribers) > 0 {
|
||||
aeb.mutex.Lock()
|
||||
for _, connectionID := range failedSubscribers {
|
||||
delete(aeb.subscribers, connectionID)
|
||||
aeb.logger.Warn().Str("connectionID", connectionID).Msg("removed failed audio events subscriber")
|
||||
}
|
||||
aeb.mutex.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// sendToSubscriber sends an event to a specific subscriber
|
||||
func (aeb *AudioEventBroadcaster) sendToSubscriber(subscriber *AudioEventSubscriber, event AudioEvent) bool {
|
||||
// Check if subscriber context is already cancelled
|
||||
if subscriber.ctx.Err() != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(subscriber.ctx, time.Duration(Config.EventTimeoutSeconds)*time.Second)
|
||||
defer cancel()
|
||||
|
||||
err := wsjson.Write(ctx, subscriber.conn, event)
|
||||
if err != nil {
|
||||
// Don't log network errors for closed connections as warnings, they're expected
|
||||
if strings.Contains(err.Error(), "use of closed network connection") ||
|
||||
strings.Contains(err.Error(), "connection reset by peer") ||
|
||||
strings.Contains(err.Error(), "context canceled") {
|
||||
subscriber.logger.Debug().Err(err).Msg("websocket connection closed during audio event send")
|
||||
} else {
|
||||
subscriber.logger.Warn().Err(err).Msg("failed to send audio event to subscriber")
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
|
@ -1,377 +0,0 @@
|
|||
package audio
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// ZeroCopyAudioFrame represents a reference-counted audio frame for zero-copy operations.
|
||||
//
|
||||
// This structure implements a sophisticated memory management system designed to minimize
|
||||
// allocations and memory copying in the audio pipeline:
|
||||
//
|
||||
// Key Features:
|
||||
//
|
||||
// 1. Reference Counting: Multiple components can safely share the same frame data
|
||||
// without copying. The frame is automatically returned to the pool when the last
|
||||
// reference is released.
|
||||
//
|
||||
// 2. Thread Safety: All operations are protected by RWMutex, allowing concurrent
|
||||
// reads while ensuring exclusive access for modifications.
|
||||
//
|
||||
// 3. Pool Integration: Frames are automatically managed by ZeroCopyFramePool,
|
||||
// enabling efficient reuse and preventing memory fragmentation.
|
||||
//
|
||||
// 4. Unsafe Pointer Access: For performance-critical CGO operations, direct
|
||||
// memory access is provided while maintaining safety through reference counting.
|
||||
//
|
||||
// Usage Pattern:
|
||||
//
|
||||
// frame := pool.Get() // Acquire frame (refCount = 1)
|
||||
// frame.AddRef() // Share with another component (refCount = 2)
|
||||
// data := frame.Data() // Access data safely
|
||||
// frame.Release() // Release reference (refCount = 1)
|
||||
// frame.Release() // Final release, returns to pool (refCount = 0)
|
||||
//
|
||||
// Memory Safety:
|
||||
// - Frames cannot be modified while shared (refCount > 1)
|
||||
// - Data access is bounds-checked to prevent buffer overruns
|
||||
// - Pool management prevents use-after-free scenarios
|
||||
type ZeroCopyAudioFrame struct {
|
||||
data []byte
|
||||
length int
|
||||
capacity int
|
||||
refCount int32
|
||||
mutex sync.RWMutex
|
||||
pooled bool
|
||||
}
|
||||
|
||||
// ZeroCopyFramePool manages a pool of reusable zero-copy audio frames.
|
||||
//
|
||||
// This pool implements a three-tier memory management strategy optimized for
|
||||
// real-time audio processing with minimal allocation overhead:
|
||||
//
|
||||
// Tier 1 - Pre-allocated Frames:
|
||||
//
|
||||
// A small number of frames are pre-allocated at startup and kept ready
|
||||
// for immediate use. This provides the fastest possible allocation for
|
||||
// the most common case and eliminates allocation latency spikes.
|
||||
//
|
||||
// Tier 2 - sync.Pool Cache:
|
||||
//
|
||||
// The standard Go sync.Pool provides efficient reuse of frames with
|
||||
// automatic garbage collection integration. Frames are automatically
|
||||
// returned here when memory pressure is low.
|
||||
//
|
||||
// Tier 3 - Memory Guard:
|
||||
//
|
||||
// A configurable limit prevents excessive memory usage by limiting
|
||||
// the total number of allocated frames. When the limit is reached,
|
||||
// allocation requests are denied to prevent OOM conditions.
|
||||
//
|
||||
// Performance Characteristics:
|
||||
// - Pre-allocated tier: ~10ns allocation time
|
||||
// - sync.Pool tier: ~50ns allocation time
|
||||
// - Memory guard: Prevents unbounded growth
|
||||
// - Metrics tracking: Hit/miss rates for optimization
|
||||
//
|
||||
// The pool is designed for embedded systems with limited memory (256MB)
|
||||
// where predictable memory usage is more important than absolute performance.
|
||||
type ZeroCopyFramePool struct {
|
||||
// Atomic fields MUST be first for ARM32 alignment (int64 fields need 8-byte alignment)
|
||||
counter int64 // Frame counter (atomic)
|
||||
hitCount int64 // Pool hit counter (atomic)
|
||||
missCount int64 // Pool miss counter (atomic)
|
||||
allocationCount int64 // Total allocations counter (atomic)
|
||||
|
||||
// Other fields
|
||||
pool sync.Pool
|
||||
maxSize int
|
||||
mutex sync.RWMutex
|
||||
// Memory optimization fields
|
||||
preallocated []*ZeroCopyAudioFrame // Pre-allocated frames for immediate use
|
||||
preallocSize int // Number of pre-allocated frames
|
||||
maxPoolSize int // Maximum pool size to prevent memory bloat
|
||||
}
|
||||
|
||||
// NewZeroCopyFramePool creates a new zero-copy frame pool
|
||||
func NewZeroCopyFramePool(maxFrameSize int) *ZeroCopyFramePool {
|
||||
// Pre-allocate frames for immediate availability
|
||||
preallocSizeBytes := Config.ZeroCopyPreallocSizeBytes
|
||||
maxPoolSize := Config.MaxPoolSize // Limit total pool size
|
||||
|
||||
// Calculate number of frames based on memory budget, not frame count
|
||||
preallocFrameCount := preallocSizeBytes / maxFrameSize
|
||||
if preallocFrameCount > maxPoolSize {
|
||||
preallocFrameCount = maxPoolSize
|
||||
}
|
||||
if preallocFrameCount < Config.ZeroCopyMinPreallocFrames {
|
||||
preallocFrameCount = Config.ZeroCopyMinPreallocFrames
|
||||
}
|
||||
|
||||
preallocated := make([]*ZeroCopyAudioFrame, 0, preallocFrameCount)
|
||||
|
||||
// Pre-allocate frames to reduce initial allocation overhead
|
||||
for i := 0; i < preallocFrameCount; i++ {
|
||||
frame := &ZeroCopyAudioFrame{
|
||||
data: make([]byte, 0, maxFrameSize),
|
||||
capacity: maxFrameSize,
|
||||
pooled: true,
|
||||
}
|
||||
preallocated = append(preallocated, frame)
|
||||
}
|
||||
|
||||
return &ZeroCopyFramePool{
|
||||
maxSize: maxFrameSize,
|
||||
preallocated: preallocated,
|
||||
preallocSize: preallocFrameCount,
|
||||
maxPoolSize: maxPoolSize,
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &ZeroCopyAudioFrame{
|
||||
data: make([]byte, 0, maxFrameSize),
|
||||
capacity: maxFrameSize,
|
||||
pooled: true,
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Get retrieves a zero-copy frame from the pool
|
||||
func (p *ZeroCopyFramePool) Get() *ZeroCopyAudioFrame {
|
||||
// Memory guard: Track allocation count to prevent excessive memory usage
|
||||
allocationCount := atomic.LoadInt64(&p.allocationCount)
|
||||
if allocationCount > int64(p.maxPoolSize*2) {
|
||||
// If we've allocated too many frames, force pool reuse
|
||||
frame := p.pool.Get().(*ZeroCopyAudioFrame)
|
||||
frame.mutex.Lock()
|
||||
atomic.StoreInt32(&frame.refCount, 1)
|
||||
frame.length = 0
|
||||
frame.data = frame.data[:0]
|
||||
frame.mutex.Unlock()
|
||||
|
||||
return frame
|
||||
}
|
||||
|
||||
// First try pre-allocated frames for fastest access
|
||||
p.mutex.Lock()
|
||||
if len(p.preallocated) > 0 {
|
||||
frame := p.preallocated[len(p.preallocated)-1]
|
||||
p.preallocated = p.preallocated[:len(p.preallocated)-1]
|
||||
p.mutex.Unlock()
|
||||
|
||||
frame.mutex.Lock()
|
||||
atomic.StoreInt32(&frame.refCount, 1)
|
||||
frame.length = 0
|
||||
frame.data = frame.data[:0]
|
||||
frame.mutex.Unlock()
|
||||
|
||||
atomic.AddInt64(&p.hitCount, 1)
|
||||
return frame
|
||||
}
|
||||
p.mutex.Unlock()
|
||||
|
||||
// Try sync.Pool next and track allocation
|
||||
frame := p.pool.Get().(*ZeroCopyAudioFrame)
|
||||
frame.mutex.Lock()
|
||||
atomic.StoreInt32(&frame.refCount, 1)
|
||||
frame.length = 0
|
||||
frame.data = frame.data[:0]
|
||||
frame.mutex.Unlock()
|
||||
|
||||
atomic.AddInt64(&p.hitCount, 1)
|
||||
|
||||
return frame
|
||||
}
|
||||
|
||||
// Put returns a zero-copy frame to the pool
|
||||
func (p *ZeroCopyFramePool) Put(frame *ZeroCopyAudioFrame) {
|
||||
if frame == nil || !frame.pooled {
|
||||
return
|
||||
}
|
||||
|
||||
// Reset frame state for reuse
|
||||
frame.mutex.Lock()
|
||||
atomic.StoreInt32(&frame.refCount, 0)
|
||||
frame.length = 0
|
||||
frame.data = frame.data[:0]
|
||||
frame.mutex.Unlock()
|
||||
|
||||
// First try to return to pre-allocated pool for fastest reuse
|
||||
p.mutex.Lock()
|
||||
if len(p.preallocated) < p.preallocSize {
|
||||
p.preallocated = append(p.preallocated, frame)
|
||||
p.mutex.Unlock()
|
||||
return
|
||||
}
|
||||
p.mutex.Unlock()
|
||||
|
||||
// Check pool size limit to prevent excessive memory usage
|
||||
p.mutex.RLock()
|
||||
currentCount := atomic.LoadInt64(&p.counter)
|
||||
p.mutex.RUnlock()
|
||||
|
||||
if currentCount >= int64(p.maxPoolSize) {
|
||||
return // Pool is full, let GC handle this frame
|
||||
}
|
||||
|
||||
// Return to sync.Pool
|
||||
p.pool.Put(frame)
|
||||
atomic.AddInt64(&p.counter, 1)
|
||||
}
|
||||
|
||||
// Data returns the frame data as a slice (zero-copy view)
|
||||
func (f *ZeroCopyAudioFrame) Data() []byte {
|
||||
f.mutex.RLock()
|
||||
defer f.mutex.RUnlock()
|
||||
return f.data[:f.length]
|
||||
}
|
||||
|
||||
// SetData sets the frame data (zero-copy if possible)
|
||||
func (f *ZeroCopyAudioFrame) SetData(data []byte) error {
|
||||
f.mutex.Lock()
|
||||
defer f.mutex.Unlock()
|
||||
|
||||
if len(data) > f.capacity {
|
||||
// Need to reallocate - not zero-copy but necessary
|
||||
f.data = make([]byte, len(data))
|
||||
f.capacity = len(data)
|
||||
f.pooled = false // Can't return to pool anymore
|
||||
}
|
||||
|
||||
// Zero-copy assignment when data fits in existing buffer
|
||||
if cap(f.data) >= len(data) {
|
||||
f.data = f.data[:len(data)]
|
||||
copy(f.data, data)
|
||||
} else {
|
||||
f.data = append(f.data[:0], data...)
|
||||
}
|
||||
f.length = len(data)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetDataDirect sets frame data using direct buffer assignment (true zero-copy)
|
||||
// WARNING: The caller must ensure the buffer remains valid for the frame's lifetime
|
||||
func (f *ZeroCopyAudioFrame) SetDataDirect(data []byte) {
|
||||
f.mutex.Lock()
|
||||
defer f.mutex.Unlock()
|
||||
f.data = data
|
||||
f.length = len(data)
|
||||
f.capacity = cap(data)
|
||||
f.pooled = false // Direct assignment means we can't pool this frame
|
||||
}
|
||||
|
||||
// AddRef increments the reference count atomically
|
||||
func (f *ZeroCopyAudioFrame) AddRef() {
|
||||
atomic.AddInt32(&f.refCount, 1)
|
||||
}
|
||||
|
||||
// Release decrements the reference count atomically
|
||||
// Returns true if this was the final reference
|
||||
func (f *ZeroCopyAudioFrame) Release() bool {
|
||||
newCount := atomic.AddInt32(&f.refCount, -1)
|
||||
if newCount == 0 {
|
||||
// Final reference released, return to pool if pooled
|
||||
if f.pooled {
|
||||
globalZeroCopyPool.Put(f)
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// RefCount returns the current reference count atomically
|
||||
func (f *ZeroCopyAudioFrame) RefCount() int32 {
|
||||
return atomic.LoadInt32(&f.refCount)
|
||||
}
|
||||
|
||||
// Length returns the current data length
|
||||
func (f *ZeroCopyAudioFrame) Length() int {
|
||||
f.mutex.RLock()
|
||||
defer f.mutex.RUnlock()
|
||||
return f.length
|
||||
}
|
||||
|
||||
// Capacity returns the buffer capacity
|
||||
func (f *ZeroCopyAudioFrame) Capacity() int {
|
||||
f.mutex.RLock()
|
||||
defer f.mutex.RUnlock()
|
||||
return f.capacity
|
||||
}
|
||||
|
||||
// UnsafePointer returns an unsafe pointer to the data for CGO calls
|
||||
// WARNING: Only use this for CGO interop, ensure frame lifetime
|
||||
func (f *ZeroCopyAudioFrame) UnsafePointer() unsafe.Pointer {
|
||||
f.mutex.RLock()
|
||||
defer f.mutex.RUnlock()
|
||||
if len(f.data) == 0 {
|
||||
return nil
|
||||
}
|
||||
return unsafe.Pointer(&f.data[0])
|
||||
}
|
||||
|
||||
// Global zero-copy frame pool
|
||||
// GetZeroCopyPoolStats returns detailed statistics about the zero-copy frame pool
|
||||
func (p *ZeroCopyFramePool) GetZeroCopyPoolStats() ZeroCopyFramePoolStats {
|
||||
p.mutex.RLock()
|
||||
preallocatedCount := len(p.preallocated)
|
||||
currentCount := atomic.LoadInt64(&p.counter)
|
||||
p.mutex.RUnlock()
|
||||
|
||||
hitCount := atomic.LoadInt64(&p.hitCount)
|
||||
missCount := atomic.LoadInt64(&p.missCount)
|
||||
allocationCount := atomic.LoadInt64(&p.allocationCount)
|
||||
totalRequests := hitCount + missCount
|
||||
|
||||
var hitRate float64
|
||||
if totalRequests > 0 {
|
||||
hitRate = float64(hitCount) / float64(totalRequests) * Config.PercentageMultiplier
|
||||
}
|
||||
|
||||
return ZeroCopyFramePoolStats{
|
||||
MaxFrameSize: p.maxSize,
|
||||
MaxPoolSize: p.maxPoolSize,
|
||||
CurrentPoolSize: currentCount,
|
||||
PreallocatedCount: int64(preallocatedCount),
|
||||
PreallocatedMax: int64(p.preallocSize),
|
||||
HitCount: hitCount,
|
||||
MissCount: missCount,
|
||||
AllocationCount: allocationCount,
|
||||
HitRate: hitRate,
|
||||
}
|
||||
}
|
||||
|
||||
// ZeroCopyFramePoolStats provides detailed zero-copy pool statistics
|
||||
type ZeroCopyFramePoolStats struct {
|
||||
MaxFrameSize int
|
||||
MaxPoolSize int
|
||||
CurrentPoolSize int64
|
||||
PreallocatedCount int64
|
||||
PreallocatedMax int64
|
||||
HitCount int64
|
||||
MissCount int64
|
||||
AllocationCount int64
|
||||
HitRate float64 // Percentage
|
||||
}
|
||||
|
||||
var (
|
||||
globalZeroCopyPool = NewZeroCopyFramePool(Config.MaxAudioFrameSize)
|
||||
)
|
||||
|
||||
// GetZeroCopyFrame gets a frame from the global pool
|
||||
func GetZeroCopyFrame() *ZeroCopyAudioFrame {
|
||||
return globalZeroCopyPool.Get()
|
||||
}
|
||||
|
||||
// GetGlobalZeroCopyPoolStats returns statistics for the global zero-copy pool
|
||||
func GetGlobalZeroCopyPoolStats() ZeroCopyFramePoolStats {
|
||||
return globalZeroCopyPool.GetZeroCopyPoolStats()
|
||||
}
|
||||
|
||||
// PutZeroCopyFrame returns a frame to the global pool
|
||||
func PutZeroCopyFrame(frame *ZeroCopyAudioFrame) {
|
||||
globalZeroCopyPool.Put(frame)
|
||||
}
|
||||
|
||||
|
|
@ -118,7 +118,6 @@ func uiInit(rotation uint16) {
|
|||
defer cgoLock.Unlock()
|
||||
|
||||
cRotation := C.u_int16_t(rotation)
|
||||
defer C.free(unsafe.Pointer(&cRotation))
|
||||
|
||||
C.jetkvm_ui_init(cRotation)
|
||||
}
|
||||
|
|
@ -350,7 +349,6 @@ func uiDispSetRotation(rotation uint16) (bool, error) {
|
|||
nativeLogger.Info().Uint16("rotation", rotation).Msg("setting rotation")
|
||||
|
||||
cRotation := C.u_int16_t(rotation)
|
||||
defer C.free(unsafe.Pointer(&cRotation))
|
||||
|
||||
C.jetkvm_ui_set_rotation(cRotation)
|
||||
return true, nil
|
||||
|
|
|
|||
|
|
@ -59,21 +59,21 @@ var defaultGadgetConfig = map[string]gadgetConfigItem{
|
|||
// mass storage
|
||||
"mass_storage_base": massStorageBaseConfig,
|
||||
"mass_storage_lun0": massStorageLun0Config,
|
||||
// audio
|
||||
// audio (UAC1 - USB Audio Class 1)
|
||||
"audio": {
|
||||
order: 4000,
|
||||
device: "uac1.usb0",
|
||||
path: []string{"functions", "uac1.usb0"},
|
||||
configPath: []string{"uac1.usb0"},
|
||||
attrs: gadgetAttributes{
|
||||
"p_chmask": "3",
|
||||
"p_srate": "48000",
|
||||
"p_ssize": "2",
|
||||
"p_volume_present": "0",
|
||||
"c_chmask": "3",
|
||||
"c_srate": "48000",
|
||||
"c_ssize": "2",
|
||||
"c_volume_present": "0",
|
||||
"p_chmask": "3", // Playback: stereo (2 channels)
|
||||
"p_srate": "48000", // Playback: 48kHz sample rate
|
||||
"p_ssize": "2", // Playback: 16-bit (2 bytes)
|
||||
"p_volume_present": "0", // Playback: no volume control
|
||||
"c_chmask": "3", // Capture: stereo (2 channels)
|
||||
"c_srate": "48000", // Capture: 48kHz sample rate
|
||||
"c_ssize": "2", // Capture: 16-bit (2 bytes)
|
||||
"c_volume_present": "0", // Capture: no volume control
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -95,7 +95,6 @@ func (u *UsbGadget) WithTransactionTimeout(fn func() error, timeout time.Duratio
|
|||
case err := <-done:
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
u.log.Error().Dur("timeout", timeout).Msg("USB gadget transaction timed out")
|
||||
return fmt.Errorf("USB gadget transaction timed out after %v: %w", timeout, ctx.Err())
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,12 +1,10 @@
|
|||
package usbgadget
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func getUdcs() []string {
|
||||
|
|
@ -28,44 +26,17 @@ func getUdcs() []string {
|
|||
}
|
||||
|
||||
func rebindUsb(udc string, ignoreUnbindError bool) error {
|
||||
return rebindUsbWithTimeout(udc, ignoreUnbindError, 10*time.Second)
|
||||
}
|
||||
|
||||
func rebindUsbWithTimeout(udc string, ignoreUnbindError bool, timeout time.Duration) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
|
||||
// Unbind with timeout
|
||||
err := writeFileWithTimeout(ctx, path.Join(dwc3Path, "unbind"), []byte(udc), 0644)
|
||||
err := os.WriteFile(path.Join(dwc3Path, "unbind"), []byte(udc), 0644)
|
||||
if err != nil && !ignoreUnbindError {
|
||||
return fmt.Errorf("failed to unbind UDC: %w", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Small delay to allow unbind to complete
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Bind with timeout
|
||||
err = writeFileWithTimeout(ctx, path.Join(dwc3Path, "bind"), []byte(udc), 0644)
|
||||
err = os.WriteFile(path.Join(dwc3Path, "bind"), []byte(udc), 0644)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to bind UDC: %w", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeFileWithTimeout(ctx context.Context, filename string, data []byte, perm os.FileMode) error {
|
||||
done := make(chan error, 1)
|
||||
go func() {
|
||||
done <- os.WriteFile(filename, data, perm)
|
||||
}()
|
||||
|
||||
select {
|
||||
case err := <-done:
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
return fmt.Errorf("write operation timed out: %w", ctx.Err())
|
||||
}
|
||||
}
|
||||
|
||||
func (u *UsbGadget) rebindUsb(ignoreUnbindError bool) error {
|
||||
u.log.Info().Str("udc", u.udc).Msg("rebinding USB gadget to UDC")
|
||||
return rebindUsb(u.udc, ignoreUnbindError)
|
||||
|
|
|
|||
|
|
@ -40,6 +40,7 @@ var defaultUsbGadgetDevices = Devices{
|
|||
RelativeMouse: true,
|
||||
Keyboard: true,
|
||||
MassStorage: true,
|
||||
Audio: true,
|
||||
}
|
||||
|
||||
type KeysDownState struct {
|
||||
|
|
@ -107,66 +108,6 @@ func NewUsbGadget(name string, enabledDevices *Devices, config *Config, logger *
|
|||
return newUsbGadget(name, defaultGadgetConfig, enabledDevices, config, logger)
|
||||
}
|
||||
|
||||
// CloseHidFiles closes all open HID files
|
||||
func (u *UsbGadget) CloseHidFiles() {
|
||||
u.log.Debug().Msg("closing HID files")
|
||||
|
||||
// Close keyboard HID file
|
||||
if u.keyboardHidFile != nil {
|
||||
if err := u.keyboardHidFile.Close(); err != nil {
|
||||
u.log.Debug().Err(err).Msg("failed to close keyboard HID file")
|
||||
}
|
||||
u.keyboardHidFile = nil
|
||||
}
|
||||
|
||||
// Close absolute mouse HID file
|
||||
if u.absMouseHidFile != nil {
|
||||
if err := u.absMouseHidFile.Close(); err != nil {
|
||||
u.log.Debug().Err(err).Msg("failed to close absolute mouse HID file")
|
||||
}
|
||||
u.absMouseHidFile = nil
|
||||
}
|
||||
|
||||
// Close relative mouse HID file
|
||||
if u.relMouseHidFile != nil {
|
||||
if err := u.relMouseHidFile.Close(); err != nil {
|
||||
u.log.Debug().Err(err).Msg("failed to close relative mouse HID file")
|
||||
}
|
||||
u.relMouseHidFile = nil
|
||||
}
|
||||
}
|
||||
|
||||
// PreOpenHidFiles opens all HID files to reduce input latency
|
||||
func (u *UsbGadget) PreOpenHidFiles() {
|
||||
// Add a small delay to allow USB gadget reconfiguration to complete
|
||||
// This prevents "no such device or address" errors when trying to open HID files
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
if u.enabledDevices.Keyboard {
|
||||
if err := u.openKeyboardHidFile(); err != nil {
|
||||
u.log.Debug().Err(err).Msg("failed to pre-open keyboard HID file")
|
||||
}
|
||||
}
|
||||
if u.enabledDevices.AbsoluteMouse {
|
||||
if u.absMouseHidFile == nil {
|
||||
var err error
|
||||
u.absMouseHidFile, err = os.OpenFile("/dev/hidg1", os.O_RDWR, 0666)
|
||||
if err != nil {
|
||||
u.log.Debug().Err(err).Msg("failed to pre-open absolute mouse HID file")
|
||||
}
|
||||
}
|
||||
}
|
||||
if u.enabledDevices.RelativeMouse {
|
||||
if u.relMouseHidFile == nil {
|
||||
var err error
|
||||
u.relMouseHidFile, err = os.OpenFile("/dev/hidg2", os.O_RDWR, 0666)
|
||||
if err != nil {
|
||||
u.log.Debug().Err(err).Msg("failed to pre-open relative mouse HID file")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newUsbGadget(name string, configMap map[string]gadgetConfigItem, enabledDevices *Devices, config *Config, logger *zerolog.Logger) *UsbGadget {
|
||||
if logger == nil {
|
||||
logger = defaultLogger
|
||||
|
|
@ -249,3 +190,63 @@ func (u *UsbGadget) Close() error {
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CloseHidFiles closes all open HID files
|
||||
func (u *UsbGadget) CloseHidFiles() {
|
||||
u.log.Debug().Msg("closing HID files")
|
||||
|
||||
// Close keyboard HID file
|
||||
if u.keyboardHidFile != nil {
|
||||
if err := u.keyboardHidFile.Close(); err != nil {
|
||||
u.log.Debug().Err(err).Msg("failed to close keyboard HID file")
|
||||
}
|
||||
u.keyboardHidFile = nil
|
||||
}
|
||||
|
||||
// Close absolute mouse HID file
|
||||
if u.absMouseHidFile != nil {
|
||||
if err := u.absMouseHidFile.Close(); err != nil {
|
||||
u.log.Debug().Err(err).Msg("failed to close absolute mouse HID file")
|
||||
}
|
||||
u.absMouseHidFile = nil
|
||||
}
|
||||
|
||||
// Close relative mouse HID file
|
||||
if u.relMouseHidFile != nil {
|
||||
if err := u.relMouseHidFile.Close(); err != nil {
|
||||
u.log.Debug().Err(err).Msg("failed to close relative mouse HID file")
|
||||
}
|
||||
u.relMouseHidFile = nil
|
||||
}
|
||||
}
|
||||
|
||||
// PreOpenHidFiles opens all HID files to reduce input latency
|
||||
func (u *UsbGadget) PreOpenHidFiles() {
|
||||
// Add a small delay to allow USB gadget reconfiguration to complete
|
||||
// This prevents "no such device or address" errors when trying to open HID files
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
if u.enabledDevices.Keyboard {
|
||||
if err := u.openKeyboardHidFile(); err != nil {
|
||||
u.log.Debug().Err(err).Msg("failed to pre-open keyboard HID file")
|
||||
}
|
||||
}
|
||||
if u.enabledDevices.AbsoluteMouse {
|
||||
if u.absMouseHidFile == nil {
|
||||
var err error
|
||||
u.absMouseHidFile, err = os.OpenFile("/dev/hidg1", os.O_RDWR, 0666)
|
||||
if err != nil {
|
||||
u.log.Debug().Err(err).Msg("failed to pre-open absolute mouse HID file")
|
||||
}
|
||||
}
|
||||
}
|
||||
if u.enabledDevices.RelativeMouse {
|
||||
if u.relMouseHidFile == nil {
|
||||
var err error
|
||||
u.relMouseHidFile, err = os.OpenFile("/dev/hidg2", os.O_RDWR, 0666)
|
||||
if err != nil {
|
||||
u.log.Debug().Err(err).Msg("failed to pre-open relative mouse HID file")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
259
jsonrpc.go
259
jsonrpc.go
|
|
@ -18,7 +18,6 @@ import (
|
|||
"github.com/rs/zerolog"
|
||||
"go.bug.st/serial"
|
||||
|
||||
"github.com/jetkvm/kvm/internal/audio"
|
||||
"github.com/jetkvm/kvm/internal/hidrpc"
|
||||
"github.com/jetkvm/kvm/internal/usbgadget"
|
||||
"github.com/jetkvm/kvm/internal/utils"
|
||||
|
|
@ -701,7 +700,8 @@ func rpcSetUsbConfig(usbConfig usbgadget.Config) error {
|
|||
LoadConfig()
|
||||
config.UsbConfig = &usbConfig
|
||||
gadget.SetGadgetConfig(config.UsbConfig)
|
||||
return updateUsbRelatedConfig()
|
||||
wasAudioEnabled := config.UsbDevices != nil && config.UsbDevices.Audio
|
||||
return updateUsbRelatedConfig(wasAudioEnabled)
|
||||
}
|
||||
|
||||
func rpcGetWakeOnLanDevices() ([]WakeOnLanDevice, error) {
|
||||
|
|
@ -912,101 +912,67 @@ func rpcGetUsbDevices() (usbgadget.Devices, error) {
|
|||
return *config.UsbDevices, nil
|
||||
}
|
||||
|
||||
func updateUsbRelatedConfig() error {
|
||||
func updateUsbRelatedConfig(wasAudioEnabled bool) error {
|
||||
ensureConfigLoaded()
|
||||
|
||||
audioSourceChanged := false
|
||||
|
||||
// If USB audio is being disabled and audio output source is USB, switch to HDMI
|
||||
if config.UsbDevices != nil && !config.UsbDevices.Audio && config.AudioOutputSource == "usb" {
|
||||
audioMutex.Lock()
|
||||
config.AudioOutputSource = "hdmi"
|
||||
useUSBForAudioOutput = false
|
||||
audioSourceChanged = true
|
||||
audioMutex.Unlock()
|
||||
}
|
||||
|
||||
// If USB audio is being enabled (was disabled, now enabled), switch to USB
|
||||
if config.UsbDevices != nil && config.UsbDevices.Audio && !wasAudioEnabled {
|
||||
audioMutex.Lock()
|
||||
config.AudioOutputSource = "usb"
|
||||
useUSBForAudioOutput = true
|
||||
audioSourceChanged = true
|
||||
audioMutex.Unlock()
|
||||
}
|
||||
|
||||
// Stop audio subprocesses before USB reconfiguration
|
||||
// Input always uses USB, output depends on audioSourceChanged
|
||||
audioMutex.Lock()
|
||||
stopInputSubprocessLocked()
|
||||
if audioSourceChanged {
|
||||
stopOutputSubprocessLocked()
|
||||
}
|
||||
audioMutex.Unlock()
|
||||
|
||||
if err := gadget.UpdateGadgetConfig(); err != nil {
|
||||
return fmt.Errorf("failed to write gadget config: %w", err)
|
||||
}
|
||||
|
||||
if err := SaveConfig(); err != nil {
|
||||
return fmt.Errorf("failed to save config: %w", err)
|
||||
}
|
||||
|
||||
// Restart audio if source changed or USB audio is enabled with active connections
|
||||
// The subprocess supervisor and relay handle device readiness via retry logic
|
||||
if activeConnections.Load() > 0 && (audioSourceChanged || (config.UsbDevices != nil && config.UsbDevices.Audio)) {
|
||||
if err := startAudioSubprocesses(); err != nil {
|
||||
logger.Warn().Err(err).Msg("Failed to restart audio after USB reconfiguration")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func rpcSetUsbDevices(usbDevices usbgadget.Devices) error {
|
||||
// Check if audio state is changing
|
||||
previousAudioEnabled := config.UsbDevices != nil && config.UsbDevices.Audio
|
||||
newAudioEnabled := usbDevices.Audio
|
||||
|
||||
// Handle audio process management if state is changing
|
||||
if previousAudioEnabled != newAudioEnabled {
|
||||
if !newAudioEnabled {
|
||||
// Stop audio processes when audio is disabled
|
||||
logger.Info().Msg("stopping audio processes due to audio device being disabled")
|
||||
|
||||
// Stop audio input manager if active
|
||||
if currentSession != nil && currentSession.AudioInputManager != nil && currentSession.AudioInputManager.IsRunning() {
|
||||
logger.Info().Msg("stopping audio input manager")
|
||||
currentSession.AudioInputManager.Stop()
|
||||
// Wait for audio input to fully stop
|
||||
for i := 0; i < 50; i++ { // Wait up to 5 seconds
|
||||
if !currentSession.AudioInputManager.IsRunning() {
|
||||
break
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
logger.Info().Msg("audio input manager stopped")
|
||||
}
|
||||
|
||||
// Stop audio output supervisor
|
||||
if audioSupervisor != nil && audioSupervisor.IsRunning() {
|
||||
logger.Info().Msg("stopping audio output supervisor")
|
||||
audioSupervisor.Stop()
|
||||
// Wait for audio processes to fully stop before proceeding
|
||||
for i := 0; i < 50; i++ { // Wait up to 5 seconds
|
||||
if !audioSupervisor.IsRunning() {
|
||||
break
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
logger.Info().Msg("audio output supervisor stopped")
|
||||
}
|
||||
|
||||
logger.Info().Msg("audio processes stopped, proceeding with USB gadget reconfiguration")
|
||||
} else if newAudioEnabled && audioSupervisor != nil && !audioSupervisor.IsRunning() {
|
||||
// Start audio processes when audio is enabled (after USB reconfiguration)
|
||||
logger.Info().Msg("audio will be started after USB gadget reconfiguration")
|
||||
}
|
||||
}
|
||||
|
||||
wasAudioEnabled := config.UsbDevices != nil && config.UsbDevices.Audio
|
||||
config.UsbDevices = &usbDevices
|
||||
gadget.SetGadgetDevices(config.UsbDevices)
|
||||
|
||||
// Apply USB gadget configuration changes
|
||||
err := updateUsbRelatedConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Start audio processes after successful USB reconfiguration if needed
|
||||
if previousAudioEnabled != newAudioEnabled && newAudioEnabled && audioSupervisor != nil {
|
||||
// Ensure supervisor is fully stopped before starting
|
||||
for i := 0; i < 50; i++ { // Wait up to 5 seconds
|
||||
if !audioSupervisor.IsRunning() {
|
||||
break
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
logger.Info().Msg("starting audio processes after USB gadget reconfiguration")
|
||||
if err := audioSupervisor.Start(); err != nil {
|
||||
logger.Error().Err(err).Msg("failed to start audio supervisor")
|
||||
// Don't return error here as USB reconfiguration was successful
|
||||
} else {
|
||||
// Broadcast audio device change event to notify WebRTC session
|
||||
broadcaster := audio.GetAudioEventBroadcaster()
|
||||
broadcaster.BroadcastAudioDeviceChanged(true, "usb_reconfiguration")
|
||||
logger.Info().Msg("broadcasted audio device change event after USB reconfiguration")
|
||||
}
|
||||
} else if previousAudioEnabled != newAudioEnabled {
|
||||
// Broadcast audio device change event for disabling audio
|
||||
broadcaster := audio.GetAudioEventBroadcaster()
|
||||
broadcaster.BroadcastAudioDeviceChanged(newAudioEnabled, "usb_reconfiguration")
|
||||
logger.Info().Bool("enabled", newAudioEnabled).Msg("broadcasted audio device change event after USB reconfiguration")
|
||||
}
|
||||
|
||||
return nil
|
||||
return updateUsbRelatedConfig(wasAudioEnabled)
|
||||
}
|
||||
|
||||
func rpcSetUsbDeviceState(device string, enabled bool) error {
|
||||
wasAudioEnabled := config.UsbDevices != nil && config.UsbDevices.Audio
|
||||
|
||||
switch device {
|
||||
case "absoluteMouse":
|
||||
config.UsbDevices.AbsoluteMouse = enabled
|
||||
|
|
@ -1017,67 +983,42 @@ func rpcSetUsbDeviceState(device string, enabled bool) error {
|
|||
case "massStorage":
|
||||
config.UsbDevices.MassStorage = enabled
|
||||
case "audio":
|
||||
// Handle audio process management
|
||||
if !enabled {
|
||||
// Stop audio processes when audio is disabled
|
||||
logger.Info().Msg("stopping audio processes due to audio device being disabled")
|
||||
|
||||
// Stop audio input manager if active
|
||||
if currentSession != nil && currentSession.AudioInputManager != nil && currentSession.AudioInputManager.IsRunning() {
|
||||
logger.Info().Msg("stopping audio input manager")
|
||||
currentSession.AudioInputManager.Stop()
|
||||
// Wait for audio input to fully stop
|
||||
for i := 0; i < 50; i++ { // Wait up to 5 seconds
|
||||
if !currentSession.AudioInputManager.IsRunning() {
|
||||
break
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
logger.Info().Msg("audio input manager stopped")
|
||||
}
|
||||
|
||||
// Stop audio output supervisor
|
||||
if audioSupervisor != nil && audioSupervisor.IsRunning() {
|
||||
logger.Info().Msg("stopping audio output supervisor")
|
||||
audioSupervisor.Stop()
|
||||
// Wait for audio processes to fully stop
|
||||
for i := 0; i < 50; i++ { // Wait up to 5 seconds
|
||||
if !audioSupervisor.IsRunning() {
|
||||
break
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
logger.Info().Msg("audio output supervisor stopped")
|
||||
}
|
||||
} else if enabled && audioSupervisor != nil {
|
||||
// Ensure supervisor is fully stopped before starting
|
||||
for i := 0; i < 50; i++ { // Wait up to 5 seconds
|
||||
if !audioSupervisor.IsRunning() {
|
||||
break
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
// Start audio processes when audio is enabled
|
||||
logger.Info().Msg("starting audio processes due to audio device being enabled")
|
||||
if err := audioSupervisor.Start(); err != nil {
|
||||
logger.Error().Err(err).Msg("failed to start audio supervisor")
|
||||
} else {
|
||||
// Broadcast audio device change event to notify WebRTC session
|
||||
broadcaster := audio.GetAudioEventBroadcaster()
|
||||
broadcaster.BroadcastAudioDeviceChanged(true, "device_enabled")
|
||||
logger.Info().Msg("broadcasted audio device change event after enabling audio device")
|
||||
}
|
||||
// Always broadcast the audio device change event regardless of enable/disable
|
||||
broadcaster := audio.GetAudioEventBroadcaster()
|
||||
broadcaster.BroadcastAudioDeviceChanged(enabled, "device_state_changed")
|
||||
logger.Info().Bool("enabled", enabled).Msg("broadcasted audio device state change event")
|
||||
}
|
||||
config.UsbDevices.Audio = enabled
|
||||
default:
|
||||
return fmt.Errorf("invalid device: %s", device)
|
||||
}
|
||||
gadget.SetGadgetDevices(config.UsbDevices)
|
||||
return updateUsbRelatedConfig()
|
||||
return updateUsbRelatedConfig(wasAudioEnabled)
|
||||
}
|
||||
|
||||
func rpcGetAudioOutputSource() (string, error) {
|
||||
ensureConfigLoaded()
|
||||
return config.AudioOutputSource, nil
|
||||
}
|
||||
|
||||
func rpcSetAudioOutputSource(source string) error {
|
||||
if source != "hdmi" && source != "usb" {
|
||||
return fmt.Errorf("invalid audio output source: %s (must be 'hdmi' or 'usb')", source)
|
||||
}
|
||||
|
||||
useUSB := source == "usb"
|
||||
return SetAudioOutputSource(useUSB)
|
||||
}
|
||||
|
||||
func rpcGetAudioOutputEnabled() (bool, error) {
|
||||
return audioOutputEnabled.Load(), nil
|
||||
}
|
||||
|
||||
func rpcSetAudioOutputEnabled(enabled bool) error {
|
||||
return SetAudioOutputEnabled(enabled)
|
||||
}
|
||||
|
||||
func rpcGetAudioInputEnabled() (bool, error) {
|
||||
return audioInputEnabled.Load(), nil
|
||||
}
|
||||
|
||||
func rpcSetAudioInputEnabled(enabled bool) error {
|
||||
return SetAudioInputEnabled(enabled)
|
||||
}
|
||||
|
||||
func rpcSetCloudUrl(apiUrl string, appUrl string) error {
|
||||
|
|
@ -1317,35 +1258,6 @@ func rpcDoExecuteKeyboardMacro(ctx context.Context, macro []hidrpc.KeyboardMacro
|
|||
return nil
|
||||
}
|
||||
|
||||
// Audio control RPC handlers - delegated to audio package
|
||||
func rpcAudioMute(muted bool) error {
|
||||
return audio.RPCAudioMute(muted)
|
||||
}
|
||||
|
||||
func rpcMicrophoneStart() error {
|
||||
return audio.RPCMicrophoneStart()
|
||||
}
|
||||
|
||||
func rpcMicrophoneStop() error {
|
||||
return audio.RPCMicrophoneStop()
|
||||
}
|
||||
|
||||
func rpcAudioStatus() (map[string]interface{}, error) {
|
||||
return audio.RPCAudioStatus()
|
||||
}
|
||||
|
||||
func rpcMicrophoneStatus() (map[string]interface{}, error) {
|
||||
return audio.RPCMicrophoneStatus()
|
||||
}
|
||||
|
||||
func rpcMicrophoneReset() error {
|
||||
return audio.RPCMicrophoneReset()
|
||||
}
|
||||
|
||||
func rpcMicrophoneMute(muted bool) error {
|
||||
return audio.RPCMicrophoneMute(muted)
|
||||
}
|
||||
|
||||
var rpcHandlers = map[string]RPCHandler{
|
||||
"ping": {Func: rpcPing},
|
||||
"reboot": {Func: rpcReboot, Params: []string{"force"}},
|
||||
|
|
@ -1396,13 +1308,6 @@ var rpcHandlers = map[string]RPCHandler{
|
|||
"isUpdatePending": {Func: rpcIsUpdatePending},
|
||||
"getUsbEmulationState": {Func: rpcGetUsbEmulationState},
|
||||
"setUsbEmulationState": {Func: rpcSetUsbEmulationState, Params: []string{"enabled"}},
|
||||
"audioMute": {Func: rpcAudioMute, Params: []string{"muted"}},
|
||||
"audioStatus": {Func: rpcAudioStatus},
|
||||
"microphoneStart": {Func: rpcMicrophoneStart},
|
||||
"microphoneStop": {Func: rpcMicrophoneStop},
|
||||
"microphoneStatus": {Func: rpcMicrophoneStatus},
|
||||
"microphoneReset": {Func: rpcMicrophoneReset},
|
||||
"microphoneMute": {Func: rpcMicrophoneMute, Params: []string{"muted"}},
|
||||
"getUsbConfig": {Func: rpcGetUsbConfig},
|
||||
"setUsbConfig": {Func: rpcSetUsbConfig, Params: []string{"usbConfig"}},
|
||||
"checkMountUrl": {Func: rpcCheckMountUrl, Params: []string{"url"}},
|
||||
|
|
@ -1432,6 +1337,12 @@ var rpcHandlers = map[string]RPCHandler{
|
|||
"getUsbDevices": {Func: rpcGetUsbDevices},
|
||||
"setUsbDevices": {Func: rpcSetUsbDevices, Params: []string{"devices"}},
|
||||
"setUsbDeviceState": {Func: rpcSetUsbDeviceState, Params: []string{"device", "enabled"}},
|
||||
"getAudioOutputSource": {Func: rpcGetAudioOutputSource},
|
||||
"setAudioOutputSource": {Func: rpcSetAudioOutputSource, Params: []string{"source"}},
|
||||
"getAudioOutputEnabled": {Func: rpcGetAudioOutputEnabled},
|
||||
"setAudioOutputEnabled": {Func: rpcSetAudioOutputEnabled, Params: []string{"enabled"}},
|
||||
"getAudioInputEnabled": {Func: rpcGetAudioInputEnabled},
|
||||
"setAudioInputEnabled": {Func: rpcSetAudioInputEnabled, Params: []string{"enabled"}},
|
||||
"setCloudUrl": {Func: rpcSetCloudUrl, Params: []string{"apiUrl", "appUrl"}},
|
||||
"getKeyboardLayout": {Func: rpcGetKeyboardLayout},
|
||||
"setKeyboardLayout": {Func: rpcSetKeyboardLayout, Params: []string{"layout"}},
|
||||
|
|
|
|||
138
main.go
138
main.go
|
|
@ -2,7 +2,6 @@ package kvm
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
|
@ -10,123 +9,11 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/gwatts/rootcerts"
|
||||
"github.com/jetkvm/kvm/internal/audio"
|
||||
"github.com/pion/webrtc/v4"
|
||||
)
|
||||
|
||||
var (
|
||||
appCtx context.Context
|
||||
audioProcessDone chan struct{}
|
||||
audioSupervisor *audio.AudioOutputSupervisor
|
||||
)
|
||||
|
||||
func startAudioSubprocess() error {
|
||||
// Initialize validation cache for optimal performance
|
||||
audio.InitValidationCache()
|
||||
|
||||
// Create audio server supervisor
|
||||
audioSupervisor = audio.NewAudioOutputSupervisor()
|
||||
|
||||
// Set the global supervisor for access from audio package
|
||||
audio.SetAudioOutputSupervisor(audioSupervisor)
|
||||
|
||||
// Create and register audio input supervisor (but don't start it)
|
||||
// Audio input will be started on-demand through the UI
|
||||
audioInputSupervisor := audio.NewAudioInputSupervisor()
|
||||
audio.SetAudioInputSupervisor(audioInputSupervisor)
|
||||
|
||||
// Set optimal OPUS configuration for audio input supervisor (48 kbps mono mic)
|
||||
audioConfig := audio.Config
|
||||
audioInputSupervisor.SetOpusConfig(
|
||||
audioConfig.OptimalInputBitrate*1000, // Convert kbps to bps (48 kbps)
|
||||
audioConfig.OptimalOpusComplexity, // Complexity 1 for minimal CPU
|
||||
audioConfig.OptimalOpusVBR, // VBR enabled
|
||||
audioConfig.OptimalOpusSignalType, // MUSIC signal type
|
||||
audioConfig.OptimalOpusBandwidth, // WIDEBAND for 48kHz
|
||||
audioConfig.OptimalOpusDTX, // DTX disabled
|
||||
)
|
||||
|
||||
// Note: Audio input supervisor is NOT started here - it will be started on-demand
|
||||
// when the user activates microphone input through the UI
|
||||
|
||||
// Set up callbacks for process lifecycle events
|
||||
audioSupervisor.SetCallbacks(
|
||||
// onProcessStart
|
||||
func(pid int) {
|
||||
logger.Info().Int("pid", pid).Msg("audio server process started")
|
||||
|
||||
// Wait for audio output server to be fully ready before starting relay
|
||||
// This prevents "no client connected" errors during quality changes
|
||||
go func() {
|
||||
// Give the audio output server time to initialize and start listening
|
||||
// Increased delay to reduce frame drops during connection establishment
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// Start audio relay system for main process
|
||||
// If there's an active WebRTC session, use its audio track
|
||||
var audioTrack *webrtc.TrackLocalStaticSample
|
||||
if currentSession != nil && currentSession.AudioTrack != nil {
|
||||
audioTrack = currentSession.AudioTrack
|
||||
logger.Info().Msg("restarting audio relay with existing WebRTC audio track")
|
||||
} else {
|
||||
logger.Info().Msg("starting audio relay without WebRTC track (will be updated when session is created)")
|
||||
}
|
||||
|
||||
if err := audio.StartAudioRelay(audioTrack); err != nil {
|
||||
logger.Error().Err(err).Msg("failed to start audio relay")
|
||||
// Retry once after additional delay if initial attempt fails
|
||||
time.Sleep(1 * time.Second)
|
||||
if err := audio.StartAudioRelay(audioTrack); err != nil {
|
||||
logger.Error().Err(err).Msg("failed to start audio relay after retry")
|
||||
}
|
||||
}
|
||||
}()
|
||||
},
|
||||
// onProcessExit
|
||||
func(pid int, exitCode int, crashed bool) {
|
||||
if crashed {
|
||||
logger.Error().Int("pid", pid).Int("exit_code", exitCode).Msg("audio server process crashed")
|
||||
} else {
|
||||
logger.Info().Int("pid", pid).Msg("audio server process exited gracefully")
|
||||
}
|
||||
|
||||
// Stop audio relay when process exits
|
||||
audio.StopAudioRelay()
|
||||
},
|
||||
// onRestart
|
||||
func(attempt int, delay time.Duration) {
|
||||
logger.Warn().Int("attempt", attempt).Dur("delay", delay).Msg("restarting audio server process")
|
||||
},
|
||||
)
|
||||
|
||||
// Check if USB audio device is enabled before starting audio processes
|
||||
if config.UsbDevices == nil || !config.UsbDevices.Audio {
|
||||
logger.Info().Msg("USB audio device disabled - skipping audio supervisor startup")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start the supervisor
|
||||
if err := audioSupervisor.Start(); err != nil {
|
||||
return fmt.Errorf("failed to start audio supervisor: %w", err)
|
||||
}
|
||||
|
||||
// Monitor supervisor and handle cleanup
|
||||
go func() {
|
||||
defer close(audioProcessDone)
|
||||
|
||||
// Wait for supervisor to stop
|
||||
for audioSupervisor.IsRunning() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
|
||||
logger.Info().Msg("audio supervisor stopped")
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
var appCtx context.Context
|
||||
|
||||
func Main() {
|
||||
audioProcessDone = make(chan struct{})
|
||||
LoadConfig()
|
||||
|
||||
var cancel context.CancelFunc
|
||||
|
|
@ -147,6 +34,7 @@ func Main() {
|
|||
go confirmCurrentSystem()
|
||||
|
||||
initNative(systemVersionLocal, appVersionLocal)
|
||||
initAudio()
|
||||
|
||||
http.DefaultClient.Timeout = 1 * time.Minute
|
||||
|
||||
|
|
@ -178,20 +66,6 @@ func Main() {
|
|||
|
||||
// initialize usb gadget
|
||||
initUsbGadget()
|
||||
|
||||
// Start audio subprocess
|
||||
err = startAudioSubprocess()
|
||||
if err != nil {
|
||||
logger.Warn().Err(err).Msg("failed to start audio subprocess")
|
||||
}
|
||||
|
||||
// Initialize session provider for audio events
|
||||
initializeAudioSessionProvider()
|
||||
|
||||
// Initialize audio event broadcaster for WebSocket-based real-time updates
|
||||
audio.InitializeAudioEventBroadcaster()
|
||||
logger.Info().Msg("audio event broadcaster initialized")
|
||||
|
||||
if err := setInitialVirtualMediaState(); err != nil {
|
||||
logger.Warn().Err(err).Msg("failed to set initial virtual media state")
|
||||
}
|
||||
|
|
@ -251,12 +125,8 @@ func Main() {
|
|||
<-sigs
|
||||
logger.Info().Msg("JetKVM Shutting Down")
|
||||
|
||||
// Stop audio supervisor and wait for cleanup
|
||||
if audioSupervisor != nil {
|
||||
logger.Info().Msg("stopping audio supervisor")
|
||||
audioSupervisor.Stop()
|
||||
}
|
||||
<-audioProcessDone
|
||||
stopAudioSubprocesses()
|
||||
|
||||
//if fuseServer != nil {
|
||||
// err := setMassStorageImage(" ")
|
||||
// if err != nil {
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
package kvm
|
||||
|
||||
import (
|
||||
"github.com/jetkvm/kvm/internal/audio"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
versioncollector "github.com/prometheus/client_golang/prometheus/collectors/version"
|
||||
"github.com/prometheus/common/version"
|
||||
|
|
@ -11,7 +10,4 @@ func initPrometheus() {
|
|||
// A Prometheus metrics endpoint.
|
||||
version.Version = builtAppVersion
|
||||
prometheus.MustRegister(versioncollector.NewCollector("jetkvm"))
|
||||
|
||||
// Start audio metrics collection
|
||||
audio.StartMetricsUpdater()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
#!/bin/bash
|
||||
#!/bin/sh
|
||||
JSON_OUTPUT=false
|
||||
GET_COMMANDS=false
|
||||
if [ "$1" = "-json" ]; then
|
||||
|
|
|
|||
|
|
@ -15,52 +15,27 @@ if [ "$CLEAN_ALL" -eq 1 ]; then
|
|||
fi
|
||||
|
||||
TMP_DIR=$(mktemp -d)
|
||||
# Ensure temp directory persists and is cleaned up properly
|
||||
# Also handle SIGINT (CTRL+C) and SIGTERM - kill all child processes
|
||||
trap 'pkill -P $$; rm -rf "${TMP_DIR}"; exit 1' INT TERM
|
||||
pushd "${CGO_PATH}" > /dev/null
|
||||
|
||||
msg_info "▶ Generating UI index"
|
||||
./ui_index.gen.sh
|
||||
|
||||
msg_info "▶ Building native library"
|
||||
|
||||
# Fix clock skew issues by resetting file timestamps
|
||||
find "${CGO_PATH}" -type f -exec touch {} +
|
||||
|
||||
# Only clean CMake cache if the build configuration files don't exist
|
||||
# This prevents re-running expensive compiler detection on every build
|
||||
if [ ! -f "${BUILD_DIR}/CMakeCache.txt" ]; then
|
||||
msg_info "First build - CMake will configure the project"
|
||||
fi
|
||||
|
||||
VERBOSE=1 cmake -B "${BUILD_DIR}" \
|
||||
-DCMAKE_SYSTEM_PROCESSOR=armv7l \
|
||||
-DCMAKE_SYSTEM_NAME=Linux \
|
||||
-DCMAKE_CROSSCOMPILING=1 \
|
||||
-DCMAKE_TOOLCHAIN_FILE=$CMAKE_TOOLCHAIN_FILE \
|
||||
-DCMAKE_C_COMPILER_WORKS=1 \
|
||||
-DCMAKE_CXX_COMPILER_WORKS=1 \
|
||||
-DCMAKE_C_ABI_COMPILED=1 \
|
||||
-DCMAKE_CXX_ABI_COMPILED=1 \
|
||||
-DCMAKE_TRY_COMPILE_TARGET_TYPE=STATIC_LIBRARY \
|
||||
-DLV_BUILD_USE_KCONFIG=ON \
|
||||
-DLV_BUILD_DEFCONFIG_PATH=${CGO_PATH}/lvgl_defconfig \
|
||||
-DCONFIG_LV_BUILD_EXAMPLES=OFF \
|
||||
-DCONFIG_LV_BUILD_DEMOS=OFF \
|
||||
-DSKIP_GLIBC_NAMES=ON \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DCMAKE_INSTALL_PREFIX="${TMP_DIR}"
|
||||
|
||||
msg_info "▶ Copying built library and header files"
|
||||
# Clock skew can cause make to return 1 even when build succeeds
|
||||
# We verify success by checking if the output file exists
|
||||
cmake --build "${BUILD_DIR}" --target install || true
|
||||
|
||||
if [ ! -f "${TMP_DIR}/lib/libjknative.a" ]; then
|
||||
msg_err "Build failed - libjknative.a not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cmake --build "${BUILD_DIR}" --target install
|
||||
cp -r "${TMP_DIR}/include" "${CGO_PATH}"
|
||||
cp -r "${TMP_DIR}/lib" "${CGO_PATH}"
|
||||
rm -rf "${TMP_DIR}"
|
||||
|
|
|
|||
|
|
@ -16,8 +16,7 @@ show_help() {
|
|||
echo " --run-go-tests-only Run go tests and exit"
|
||||
echo " --skip-ui-build Skip frontend/UI build"
|
||||
echo " --skip-native-build Skip native build"
|
||||
echo " --skip-audio-binaries Skip audio binaries build if they exist"
|
||||
echo " --disable-docker Disable docker build (auto-detected if Docker unavailable)"
|
||||
echo " --disable-docker Disable docker build"
|
||||
echo " -i, --install Build for release and install the app"
|
||||
echo " --help Display this help message"
|
||||
echo
|
||||
|
|
@ -33,9 +32,8 @@ REMOTE_PATH="/userdata/jetkvm/bin"
|
|||
SKIP_UI_BUILD=false
|
||||
SKIP_UI_BUILD_RELEASE=0
|
||||
SKIP_NATIVE_BUILD=0
|
||||
SKIP_AUDIO_BINARIES=0
|
||||
RESET_USB_HID_DEVICE=false
|
||||
LOG_TRACE_SCOPES="${LOG_TRACE_SCOPES:-jetkvm,cloud,websocket,native,jsonrpc,audio}"
|
||||
LOG_TRACE_SCOPES="${LOG_TRACE_SCOPES:-jetkvm,cloud,websocket,native,jsonrpc}"
|
||||
RUN_GO_TESTS=false
|
||||
RUN_GO_TESTS_ONLY=false
|
||||
INSTALL_APP=false
|
||||
|
|
@ -62,10 +60,6 @@ while [[ $# -gt 0 ]]; do
|
|||
SKIP_NATIVE_BUILD=1
|
||||
shift
|
||||
;;
|
||||
--skip-audio-binaries)
|
||||
SKIP_AUDIO_BINARIES=1
|
||||
shift
|
||||
;;
|
||||
--reset-usb-hid)
|
||||
RESET_USB_HID_DEVICE=true
|
||||
shift
|
||||
|
|
@ -112,38 +106,14 @@ if [ -z "$REMOTE_HOST" ]; then
|
|||
exit 1
|
||||
fi
|
||||
|
||||
# Auto-detect architecture requirements
|
||||
# check if the current CPU architecture is x86_64
|
||||
if [ "$(uname -m)" != "x86_64" ]; then
|
||||
msg_warn "Warning: This script is only supported on x86_64 architecture"
|
||||
BUILD_IN_DOCKER=true
|
||||
fi
|
||||
|
||||
# Auto-detect Docker availability and fallback if not available
|
||||
# This is especially useful in devcontainers where Docker-in-Docker might not be available
|
||||
if [ "$BUILD_IN_DOCKER" = true ]; then
|
||||
# Check if Docker is available and accessible
|
||||
if ! command -v docker &> /dev/null; then
|
||||
msg_warn "Docker command not found, disabling Docker build"
|
||||
msg_info "Building on host instead (equivalent to --disable-docker)"
|
||||
BUILD_IN_DOCKER=false
|
||||
elif ! docker info &> /dev/null; then
|
||||
msg_warn "Docker daemon not accessible (possibly in devcontainer without Docker socket), disabling Docker build"
|
||||
msg_info "Building on host instead (equivalent to --disable-docker)"
|
||||
BUILD_IN_DOCKER=false
|
||||
else
|
||||
msg_info "Docker is available and accessible"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$BUILD_IN_DOCKER" = true ]; then
|
||||
# Double-check Docker availability before building image
|
||||
if ! docker info &> /dev/null; then
|
||||
msg_warn "Docker daemon became unavailable, switching to host build"
|
||||
BUILD_IN_DOCKER=false
|
||||
else
|
||||
build_docker_image
|
||||
fi
|
||||
build_docker_image
|
||||
fi
|
||||
|
||||
# Build the development version on the host
|
||||
|
|
@ -154,13 +124,10 @@ if [[ "$SKIP_UI_BUILD" = true && ! -f "static/index.html" ]]; then
|
|||
SKIP_UI_BUILD=false
|
||||
fi
|
||||
|
||||
if [[ "$SKIP_UI_BUILD" = false && "$JETKVM_INSIDE_DOCKER" != 1 ]]; then
|
||||
if [[ "$SKIP_UI_BUILD" = false && "$JETKVM_INSIDE_DOCKER" != 1 ]]; then
|
||||
msg_info "▶ Building frontend"
|
||||
make frontend SKIP_UI_BUILD=0
|
||||
SKIP_UI_BUILD_RELEASE=1
|
||||
elif [[ "$SKIP_UI_BUILD" = true ]]; then
|
||||
# User explicitly requested to skip UI build and static files exist
|
||||
SKIP_UI_BUILD_RELEASE=1
|
||||
fi
|
||||
|
||||
if [[ "$SKIP_UI_BUILD_RELEASE" = 0 && "$BUILD_IN_DOCKER" = true ]]; then
|
||||
|
|
@ -213,16 +180,16 @@ fi
|
|||
if [ "$INSTALL_APP" = true ]
|
||||
then
|
||||
msg_info "▶ Building release binary"
|
||||
do_make build_release SKIP_NATIVE_IF_EXISTS=${SKIP_NATIVE_BUILD} SKIP_UI_BUILD=${SKIP_UI_BUILD_RELEASE} SKIP_AUDIO_BINARIES_IF_EXISTS=${SKIP_AUDIO_BINARIES}
|
||||
|
||||
do_make build_release SKIP_NATIVE_IF_EXISTS=${SKIP_NATIVE_BUILD} SKIP_UI_BUILD=${SKIP_UI_BUILD_RELEASE}
|
||||
|
||||
# Copy the binary to the remote host as if we were the OTA updater.
|
||||
ssh "${REMOTE_USER}@${REMOTE_HOST}" "cat > /userdata/jetkvm/jetkvm_app.update" < bin/jetkvm_app
|
||||
|
||||
|
||||
# Reboot the device, the new app will be deployed by the startup process.
|
||||
ssh "${REMOTE_USER}@${REMOTE_HOST}" "reboot"
|
||||
else
|
||||
msg_info "▶ Building development binary"
|
||||
do_make build_dev SKIP_NATIVE_IF_EXISTS=${SKIP_NATIVE_BUILD} SKIP_UI_BUILD=${SKIP_UI_BUILD_RELEASE} SKIP_AUDIO_BINARIES_IF_EXISTS=${SKIP_AUDIO_BINARIES}
|
||||
do_make build_dev SKIP_NATIVE_IF_EXISTS=${SKIP_NATIVE_BUILD} SKIP_UI_BUILD=${SKIP_UI_BUILD_RELEASE}
|
||||
|
||||
# Kill any existing instances of the application
|
||||
ssh "${REMOTE_USER}@${REMOTE_HOST}" "killall jetkvm_app_debug || true"
|
||||
|
|
|
|||
|
|
@ -1,24 +0,0 @@
|
|||
package kvm
|
||||
|
||||
import "github.com/jetkvm/kvm/internal/audio"
|
||||
|
||||
// KVMSessionProvider implements the audio.SessionProvider interface
|
||||
type KVMSessionProvider struct{}
|
||||
|
||||
// IsSessionActive returns whether there's an active session
|
||||
func (k *KVMSessionProvider) IsSessionActive() bool {
|
||||
return currentSession != nil
|
||||
}
|
||||
|
||||
// GetAudioInputManager returns the current session's audio input manager
|
||||
func (k *KVMSessionProvider) GetAudioInputManager() *audio.AudioInputManager {
|
||||
if currentSession == nil {
|
||||
return nil
|
||||
}
|
||||
return currentSession.AudioInputManager
|
||||
}
|
||||
|
||||
// initializeAudioSessionProvider sets up the session provider for the audio package
|
||||
func initializeAudioSessionProvider() {
|
||||
audio.SetSessionProvider(&KVMSessionProvider{})
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,7 +1,7 @@
|
|||
{
|
||||
"name": "kvm-ui",
|
||||
"private": true,
|
||||
"version": "2025.09.26.01300",
|
||||
"version": "2025.10.01.1900",
|
||||
"type": "module",
|
||||
"engines": {
|
||||
"node": "^22.15.0"
|
||||
|
|
@ -42,7 +42,7 @@
|
|||
"react-hot-toast": "^2.6.0",
|
||||
"react-icons": "^5.5.0",
|
||||
"react-router": "^7.9.3",
|
||||
"react-simple-keyboard": "^3.8.122",
|
||||
"react-simple-keyboard": "^3.8.125",
|
||||
"react-use-websocket": "^4.13.0",
|
||||
"react-xtermjs": "^1.0.10",
|
||||
"recharts": "^3.2.1",
|
||||
|
|
@ -56,15 +56,15 @@
|
|||
"@eslint/eslintrc": "^3.3.1",
|
||||
"@eslint/js": "^9.36.0",
|
||||
"@tailwindcss/forms": "^0.5.10",
|
||||
"@tailwindcss/postcss": "^4.1.13",
|
||||
"@tailwindcss/postcss": "^4.1.14",
|
||||
"@tailwindcss/typography": "^0.5.19",
|
||||
"@tailwindcss/vite": "^4.1.13",
|
||||
"@types/react": "^19.1.14",
|
||||
"@types/react-dom": "^19.1.9",
|
||||
"@tailwindcss/vite": "^4.1.14",
|
||||
"@types/react": "^19.1.17",
|
||||
"@types/react-dom": "^19.1.10",
|
||||
"@types/semver": "^7.7.1",
|
||||
"@types/validator": "^13.15.3",
|
||||
"@typescript-eslint/eslint-plugin": "^8.44.1",
|
||||
"@typescript-eslint/parser": "^8.44.1",
|
||||
"@typescript-eslint/eslint-plugin": "^8.45.0",
|
||||
"@typescript-eslint/parser": "^8.45.0",
|
||||
"@vitejs/plugin-react-swc": "^4.1.0",
|
||||
"autoprefixer": "^10.4.21",
|
||||
"eslint": "^9.36.0",
|
||||
|
|
@ -77,8 +77,8 @@
|
|||
"postcss": "^8.5.6",
|
||||
"prettier": "^3.6.2",
|
||||
"prettier-plugin-tailwindcss": "^0.6.14",
|
||||
"tailwindcss": "^4.1.13",
|
||||
"typescript": "^5.9.2",
|
||||
"tailwindcss": "^4.1.14",
|
||||
"typescript": "^5.9.3",
|
||||
"vite": "^7.1.7",
|
||||
"vite-tsconfig-paths": "^5.1.4"
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,58 +1,30 @@
|
|||
import { MdOutlineContentPasteGo, MdVolumeOff, MdVolumeUp, MdGraphicEq } from "react-icons/md";
|
||||
import { LuCable, LuHardDrive, LuMaximize, LuSettings, LuSignal } from "react-icons/lu";
|
||||
import { MdOutlineContentPasteGo } from "react-icons/md";
|
||||
import { LuCable, LuHardDrive, LuMaximize, LuSettings, LuSignal, LuVolume2 } from "react-icons/lu";
|
||||
import { FaKeyboard } from "react-icons/fa6";
|
||||
import { Popover, PopoverButton, PopoverPanel } from "@headlessui/react";
|
||||
import { Fragment, useCallback, useRef } from "react";
|
||||
import { CommandLineIcon } from "@heroicons/react/20/solid";
|
||||
|
||||
import { Button } from "@components/Button";
|
||||
import Container from "@components/Container";
|
||||
import {
|
||||
useHidStore,
|
||||
useMountMediaStore,
|
||||
useSettingsStore,
|
||||
useUiStore,
|
||||
} from "@/hooks/stores";
|
||||
import Container from "@components/Container";
|
||||
import { cx } from "@/cva.config";
|
||||
import PasteModal from "@/components/popovers/PasteModal";
|
||||
import WakeOnLanModal from "@/components/popovers/WakeOnLan/Index";
|
||||
import MountPopopover from "@/components/popovers/MountPopover";
|
||||
import ExtensionPopover from "@/components/popovers/ExtensionPopover";
|
||||
import AudioControlPopover from "@/components/popovers/AudioControlPopover";
|
||||
import AudioPopover from "@/components/popovers/AudioPopover";
|
||||
import { useDeviceUiNavigation } from "@/hooks/useAppNavigation";
|
||||
import { useAudioEvents } from "@/hooks/useAudioEvents";
|
||||
import { useUsbDeviceConfig } from "@/hooks/useUsbDeviceConfig";
|
||||
|
||||
|
||||
// Type for microphone error
|
||||
interface MicrophoneError {
|
||||
type: 'permission' | 'device' | 'network' | 'unknown';
|
||||
message: string;
|
||||
}
|
||||
|
||||
// Type for microphone hook return value
|
||||
interface MicrophoneHookReturn {
|
||||
isMicrophoneActive: boolean;
|
||||
isMicrophoneMuted: boolean;
|
||||
microphoneStream: MediaStream | null;
|
||||
startMicrophone: (deviceId?: string) => Promise<{ success: boolean; error?: MicrophoneError }>;
|
||||
stopMicrophone: () => Promise<{ success: boolean; error?: MicrophoneError }>;
|
||||
toggleMicrophoneMute: () => Promise<{ success: boolean; error?: MicrophoneError }>;
|
||||
syncMicrophoneState: () => Promise<void>;
|
||||
// Loading states
|
||||
isStarting: boolean;
|
||||
isStopping: boolean;
|
||||
isToggling: boolean;
|
||||
// HTTP/HTTPS detection
|
||||
isHttpsRequired: boolean;
|
||||
}
|
||||
|
||||
export default function Actionbar({
|
||||
requestFullscreen,
|
||||
microphone,
|
||||
}: {
|
||||
requestFullscreen: () => Promise<void>;
|
||||
microphone: MicrophoneHookReturn;
|
||||
}) {
|
||||
const { navigateTo } = useDeviceUiNavigation();
|
||||
const { isVirtualKeyboardEnabled, setVirtualKeyboardEnabled } = useHidStore();
|
||||
|
|
@ -81,17 +53,6 @@ export default function Actionbar({
|
|||
[setDisableVideoFocusTrap],
|
||||
);
|
||||
|
||||
// Use WebSocket-based audio events for real-time updates
|
||||
const { audioMuted } = useAudioEvents();
|
||||
|
||||
// Use WebSocket data exclusively - no polling fallback
|
||||
const isMuted = audioMuted ?? false; // Default to false if WebSocket data not available yet
|
||||
|
||||
// Get USB device configuration to check if audio is enabled
|
||||
const { usbDeviceConfig, loading: usbConfigLoading } = useUsbDeviceConfig();
|
||||
// Default to false while loading to prevent premature access when audio hasn't been enabled yet
|
||||
const isAudioEnabledInUsb = usbDeviceConfig?.audio ?? false;
|
||||
|
||||
return (
|
||||
<Container className="border-b border-b-slate-800/20 bg-white dark:border-b-slate-300/20 dark:bg-slate-900">
|
||||
<div
|
||||
|
|
@ -129,7 +90,7 @@ export default function Actionbar({
|
|||
"flex origin-top flex-col transition duration-300 ease-out data-closed:translate-y-8 data-closed:opacity-0",
|
||||
)}
|
||||
>
|
||||
{({ open }: { open: boolean }) => {
|
||||
{({ open }) => {
|
||||
checkIfStateChanged(open);
|
||||
return (
|
||||
<div className="mx-auto w-full max-w-xl">
|
||||
|
|
@ -171,7 +132,7 @@ export default function Actionbar({
|
|||
"flex origin-top flex-col transition duration-300 ease-out data-closed:translate-y-8 data-closed:opacity-0",
|
||||
)}
|
||||
>
|
||||
{({ open }: { open: boolean }) => {
|
||||
{({ open }) => {
|
||||
checkIfStateChanged(open);
|
||||
return (
|
||||
<div className="mx-auto w-full max-w-xl">
|
||||
|
|
@ -223,7 +184,7 @@ export default function Actionbar({
|
|||
"flex origin-top flex-col transition duration-300 ease-out data-closed:translate-y-8 data-closed:opacity-0",
|
||||
)}
|
||||
>
|
||||
{({ open }: { open: boolean }) => {
|
||||
{({ open }) => {
|
||||
checkIfStateChanged(open);
|
||||
return (
|
||||
<div className="mx-auto w-full max-w-xl">
|
||||
|
|
@ -243,6 +204,36 @@ export default function Actionbar({
|
|||
onClick={() => setVirtualKeyboardEnabled(!isVirtualKeyboardEnabled)}
|
||||
/>
|
||||
</div>
|
||||
<Popover>
|
||||
<PopoverButton as={Fragment}>
|
||||
<Button
|
||||
size="XS"
|
||||
theme="light"
|
||||
text="Audio"
|
||||
LeadingIcon={LuVolume2}
|
||||
onClick={() => {
|
||||
setDisableVideoFocusTrap(true);
|
||||
}}
|
||||
/>
|
||||
</PopoverButton>
|
||||
<PopoverPanel
|
||||
anchor="bottom start"
|
||||
transition
|
||||
className={cx(
|
||||
"z-10 flex w-[420px] flex-col overflow-visible!",
|
||||
"flex origin-top flex-col transition duration-300 ease-out data-closed:translate-y-8 data-closed:opacity-0",
|
||||
)}
|
||||
>
|
||||
{({ open }) => {
|
||||
checkIfStateChanged(open);
|
||||
return (
|
||||
<div className="mx-auto w-full max-w-xl">
|
||||
<AudioPopover />
|
||||
</div>
|
||||
);
|
||||
}}
|
||||
</PopoverPanel>
|
||||
</Popover>
|
||||
</div>
|
||||
|
||||
<div className="flex flex-wrap items-center gap-x-2 gap-y-2">
|
||||
|
|
@ -266,7 +257,7 @@ export default function Actionbar({
|
|||
"flex origin-top flex-col transition duration-300 ease-out data-closed:translate-y-8 data-closed:opacity-0",
|
||||
)}
|
||||
>
|
||||
{({ open }: { open: boolean }) => {
|
||||
{({ open }) => {
|
||||
checkIfStateChanged(open);
|
||||
return <ExtensionPopover />;
|
||||
}}
|
||||
|
|
@ -298,7 +289,6 @@ export default function Actionbar({
|
|||
}}
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<Button
|
||||
size="XS"
|
||||
|
|
@ -322,64 +312,6 @@ export default function Actionbar({
|
|||
onClick={() => requestFullscreen()}
|
||||
/>
|
||||
</div>
|
||||
<Popover>
|
||||
<PopoverButton as={Fragment} disabled={!isAudioEnabledInUsb || usbConfigLoading}>
|
||||
<div title={
|
||||
usbConfigLoading
|
||||
? "Loading audio configuration..."
|
||||
: !isAudioEnabledInUsb
|
||||
? "Audio needs to be enabled in USB device settings"
|
||||
: undefined
|
||||
}>
|
||||
<Button
|
||||
size="XS"
|
||||
theme="light"
|
||||
text="Audio"
|
||||
disabled={!isAudioEnabledInUsb || usbConfigLoading}
|
||||
LeadingIcon={({ className }) => (
|
||||
<div className="flex items-center">
|
||||
{usbConfigLoading ? (
|
||||
<div className={cx(className, "animate-spin rounded-full border border-gray-400 border-t-gray-600")} />
|
||||
) : !isAudioEnabledInUsb ? (
|
||||
<MdVolumeOff className={cx(className, "text-gray-400")} />
|
||||
) : isMuted ? (
|
||||
<MdVolumeOff className={cx(className, "text-red-500")} />
|
||||
) : (
|
||||
<MdVolumeUp className={cx(className, "text-green-500")} />
|
||||
)}
|
||||
<MdGraphicEq className={cx(className, "ml-1",
|
||||
usbConfigLoading ? "text-gray-400" :
|
||||
!isAudioEnabledInUsb ? "text-gray-400" :
|
||||
"text-blue-500"
|
||||
)} />
|
||||
</div>
|
||||
)}
|
||||
onClick={() => {
|
||||
if (isAudioEnabledInUsb && !usbConfigLoading) {
|
||||
setDisableVideoFocusTrap(true);
|
||||
}
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
</PopoverButton>
|
||||
<PopoverPanel
|
||||
anchor="bottom end"
|
||||
transition
|
||||
className={cx(
|
||||
"z-10 flex origin-top flex-col overflow-visible!",
|
||||
"flex origin-top flex-col transition duration-300 ease-out data-closed:translate-y-8 data-closed:opacity-0",
|
||||
)}
|
||||
>
|
||||
{({ open }: { open: boolean }) => {
|
||||
checkIfStateChanged(open);
|
||||
return (
|
||||
<div className="mx-auto">
|
||||
<AudioControlPopover microphone={microphone} />
|
||||
</div>
|
||||
);
|
||||
}}
|
||||
</PopoverPanel>
|
||||
</Popover>
|
||||
</div>
|
||||
</div>
|
||||
</Container>
|
||||
|
|
|
|||
|
|
@ -11,8 +11,6 @@ import { cva } from "@/cva.config";
|
|||
|
||||
import Card from "./Card";
|
||||
|
||||
|
||||
|
||||
export interface ComboboxOption {
|
||||
value: string;
|
||||
label: string;
|
||||
|
|
|
|||
|
|
@ -4,8 +4,6 @@ import { GridCard } from "@/components/Card";
|
|||
|
||||
import { cx } from "../cva.config";
|
||||
|
||||
|
||||
|
||||
interface Props {
|
||||
IconElm?: React.FC<{ className: string | undefined }>;
|
||||
headline: string;
|
||||
|
|
|
|||
|
|
@ -4,22 +4,20 @@ import { ArrowLeftEndOnRectangleIcon, ChevronDownIcon } from "@heroicons/react/1
|
|||
import { Button, Menu, MenuButton, MenuItem, MenuItems } from "@headlessui/react";
|
||||
import { LuMonitorSmartphone } from "react-icons/lu";
|
||||
|
||||
import USBStateStatus from "@components/USBStateStatus";
|
||||
import PeerConnectionStatusCard from "@components/PeerConnectionStatusCard";
|
||||
import Container from "@/components/Container";
|
||||
import Card from "@/components/Card";
|
||||
import { useHidStore, useRTCStore, useUserStore } from "@/hooks/stores";
|
||||
import LogoBlueIcon from "@/assets/logo-blue.svg";
|
||||
import LogoWhiteIcon from "@/assets/logo-white.svg";
|
||||
import USBStateStatus from "@components/USBStateStatus";
|
||||
import PeerConnectionStatusCard from "@components/PeerConnectionStatusCard";
|
||||
import { CLOUD_API, DEVICE_API } from "@/ui.config";
|
||||
|
||||
import { isOnDevice } from "../main";
|
||||
import api from "../api";
|
||||
import { isOnDevice } from "../main";
|
||||
|
||||
import { LinkButton } from "./Button";
|
||||
|
||||
|
||||
|
||||
interface NavbarProps {
|
||||
isLoggedIn: boolean;
|
||||
primaryLinks?: { title: string; to: string }[];
|
||||
|
|
|
|||
|
|
@ -7,7 +7,6 @@ import { JsonRpcResponse, useJsonRpc } from "@/hooks/useJsonRpc";
|
|||
import { InputFieldWithLabel } from "./InputField";
|
||||
import { SelectMenuBasic } from "./SelectMenuBasic";
|
||||
|
||||
|
||||
export interface JigglerConfig {
|
||||
inactivity_limit_seconds: number;
|
||||
jitter_percentage: number;
|
||||
|
|
|
|||
|
|
@ -1,14 +1,12 @@
|
|||
import React, { JSX } from "react";
|
||||
import clsx from "clsx";
|
||||
|
||||
|
||||
import FieldLabel from "@/components/FieldLabel";
|
||||
import { cva } from "@/cva.config";
|
||||
|
||||
import Card from "./Card";
|
||||
|
||||
|
||||
|
||||
type SelectMenuProps = Pick<
|
||||
JSX.IntrinsicElements["select"],
|
||||
"disabled" | "onChange" | "name" | "value"
|
||||
|
|
|
|||
|
|
@ -8,13 +8,11 @@ import { WebglAddon } from "@xterm/addon-webgl";
|
|||
import { Unicode11Addon } from "@xterm/addon-unicode11";
|
||||
import { ClipboardAddon } from "@xterm/addon-clipboard";
|
||||
|
||||
|
||||
import { cx } from "@/cva.config";
|
||||
import { AvailableTerminalTypes, useUiStore } from "@/hooks/stores";
|
||||
|
||||
import { Button } from "./Button";
|
||||
|
||||
|
||||
const isWebGl2Supported = !!document.createElement("canvas").getContext("webgl2");
|
||||
|
||||
// Terminal theme configuration
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
import React from "react";
|
||||
|
||||
import LoadingSpinner from "@components/LoadingSpinner";
|
||||
import StatusCard from "@components/StatusCards";
|
||||
import { cx } from "@/cva.config";
|
||||
import KeyboardAndMouseConnectedIcon from "@/assets/keyboard-and-mouse-connected.png";
|
||||
import LoadingSpinner from "@components/LoadingSpinner";
|
||||
import StatusCard from "@components/StatusCards";
|
||||
import { USBStates } from "@/hooks/stores";
|
||||
|
||||
type StatusProps = Record<
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
|
||||
import { cx } from "@/cva.config";
|
||||
|
||||
import { useDeviceUiNavigation } from "../hooks/useAppNavigation";
|
||||
|
|
@ -7,7 +6,6 @@ import { Button } from "./Button";
|
|||
import { GridCard } from "./Card";
|
||||
import LoadingSpinner from "./LoadingSpinner";
|
||||
|
||||
|
||||
export default function UpdateInProgressStatusCard() {
|
||||
const { navigateTo } = useDeviceUiNavigation();
|
||||
|
||||
|
|
|
|||
|
|
@ -46,17 +46,6 @@ const usbPresets = [
|
|||
audio: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
label: "Keyboard, Mouse and Mass Storage",
|
||||
value: "no_audio",
|
||||
config: {
|
||||
keyboard: true,
|
||||
absolute_mouse: true,
|
||||
relative_mouse: true,
|
||||
mass_storage: true,
|
||||
audio: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
label: "Keyboard Only",
|
||||
value: "keyboard_only",
|
||||
|
|
@ -235,8 +224,8 @@ export function UsbDeviceSetting() {
|
|||
</div>
|
||||
<div className="space-y-4">
|
||||
<SettingsItem
|
||||
title="Enable Audio Input/Output"
|
||||
description="Enable USB audio input and output devices"
|
||||
title="Enable USB Audio"
|
||||
description="Enable bidirectional audio (HDMI capture and microphone input)"
|
||||
>
|
||||
<Checkbox
|
||||
checked={usbDeviceConfig.audio}
|
||||
|
|
|
|||
|
|
@ -4,10 +4,12 @@ import { useCallback, useEffect, useMemo, useRef, useState } from "react";
|
|||
import Keyboard from "react-simple-keyboard";
|
||||
import { LuKeyboard } from "react-icons/lu";
|
||||
|
||||
import Card from "@components/Card";
|
||||
// eslint-disable-next-line import/order
|
||||
import { Button, LinkButton } from "@components/Button";
|
||||
|
||||
import "react-simple-keyboard/build/css/index.css";
|
||||
|
||||
import Card from "@components/Card";
|
||||
import { Button, LinkButton } from "@components/Button";
|
||||
import DetachIconRaw from "@/assets/detach-icon.svg";
|
||||
import { cx } from "@/cva.config";
|
||||
import { useHidStore, useUiStore } from "@/hooks/stores";
|
||||
|
|
|
|||
|
|
@ -3,8 +3,8 @@ import { useResizeObserver } from "usehooks-ts";
|
|||
|
||||
import VirtualKeyboard from "@components/VirtualKeyboard";
|
||||
import Actionbar from "@components/ActionBar";
|
||||
import InfoBar from "@components/InfoBar";
|
||||
import MacroBar from "@/components/MacroBar";
|
||||
import InfoBar from "@components/InfoBar";
|
||||
import notifications from "@/notifications";
|
||||
import useKeyboard from "@/hooks/useKeyboard";
|
||||
import { cx } from "@/cva.config";
|
||||
|
|
@ -23,35 +23,7 @@ import {
|
|||
PointerLockBar,
|
||||
} from "./VideoOverlay";
|
||||
|
||||
|
||||
// Type for microphone error
|
||||
interface MicrophoneError {
|
||||
type: 'permission' | 'device' | 'network' | 'unknown';
|
||||
message: string;
|
||||
}
|
||||
|
||||
// Interface for microphone hook return type
|
||||
interface MicrophoneHookReturn {
|
||||
isMicrophoneActive: boolean;
|
||||
isMicrophoneMuted: boolean;
|
||||
microphoneStream: MediaStream | null;
|
||||
startMicrophone: (deviceId?: string) => Promise<{ success: boolean; error?: MicrophoneError }>;
|
||||
stopMicrophone: () => Promise<{ success: boolean; error?: MicrophoneError }>;
|
||||
toggleMicrophoneMute: () => Promise<{ success: boolean; error?: MicrophoneError }>;
|
||||
syncMicrophoneState: () => Promise<void>;
|
||||
// Loading states
|
||||
isStarting: boolean;
|
||||
isStopping: boolean;
|
||||
isToggling: boolean;
|
||||
// HTTP/HTTPS detection
|
||||
isHttpsRequired: boolean;
|
||||
}
|
||||
|
||||
interface WebRTCVideoProps {
|
||||
microphone: MicrophoneHookReturn;
|
||||
}
|
||||
|
||||
export default function WebRTCVideo({ microphone }: WebRTCVideoProps) {
|
||||
export default function WebRTCVideo() {
|
||||
// Video and stream related refs and states
|
||||
const videoElm = useRef<HTMLVideoElement>(null);
|
||||
const { mediaStream, peerConnectionState } = useRTCStore();
|
||||
|
|
@ -346,15 +318,20 @@ export default function WebRTCVideo({ microphone }: WebRTCVideoProps) {
|
|||
if (!peerConnection) return;
|
||||
const abortController = new AbortController();
|
||||
const signal = abortController.signal;
|
||||
const audioElements: HTMLAudioElement[] = [];
|
||||
|
||||
peerConnection.addEventListener(
|
||||
"track",
|
||||
(_e: RTCTrackEvent) => {
|
||||
// The combined MediaStream is now managed in the main component
|
||||
// We'll use the mediaStream from the store instead of individual track streams
|
||||
const { mediaStream } = useRTCStore.getState();
|
||||
if (mediaStream) {
|
||||
addStreamToVideoElm(mediaStream);
|
||||
(e: RTCTrackEvent) => {
|
||||
if (e.track.kind === "video") {
|
||||
addStreamToVideoElm(e.streams[0]);
|
||||
} else if (e.track.kind === "audio") {
|
||||
const audioElm = document.createElement("audio");
|
||||
audioElm.autoplay = true;
|
||||
audioElm.srcObject = e.streams[0];
|
||||
audioElm.style.display = "none";
|
||||
document.body.appendChild(audioElm);
|
||||
audioElements.push(audioElm);
|
||||
}
|
||||
},
|
||||
{ signal },
|
||||
|
|
@ -362,6 +339,10 @@ export default function WebRTCVideo({ microphone }: WebRTCVideoProps) {
|
|||
|
||||
return () => {
|
||||
abortController.abort();
|
||||
audioElements.forEach((audioElm) => {
|
||||
audioElm.srcObject = null;
|
||||
audioElm.remove();
|
||||
});
|
||||
};
|
||||
},
|
||||
[addStreamToVideoElm, peerConnection],
|
||||
|
|
@ -521,7 +502,7 @@ export default function WebRTCVideo({ microphone }: WebRTCVideoProps) {
|
|||
disabled={peerConnection?.connectionState !== "connected"}
|
||||
className="contents"
|
||||
>
|
||||
<Actionbar requestFullscreen={requestFullscreen} microphone={microphone} />
|
||||
<Actionbar requestFullscreen={requestFullscreen} />
|
||||
<MacroBar />
|
||||
</fieldset>
|
||||
</div>
|
||||
|
|
@ -551,7 +532,6 @@ export default function WebRTCVideo({ microphone }: WebRTCVideoProps) {
|
|||
controls={false}
|
||||
onPlaying={onVideoPlaying}
|
||||
onPlay={onVideoPlaying}
|
||||
muted={false}
|
||||
playsInline
|
||||
disablePictureInPicture
|
||||
controlsList="nofullscreen"
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ import LoadingSpinner from "@/components/LoadingSpinner";
|
|||
|
||||
import { JsonRpcResponse, useJsonRpc } from "../../hooks/useJsonRpc";
|
||||
|
||||
|
||||
const LONG_PRESS_DURATION = 3000; // 3 seconds for long press
|
||||
|
||||
interface ATXState {
|
||||
|
|
|
|||
|
|
@ -4,11 +4,11 @@ import { useCallback, useEffect, useState } from "react";
|
|||
import { Button } from "@components/Button";
|
||||
import Card from "@components/Card";
|
||||
import { SettingsPageHeader } from "@components/SettingsPageheader";
|
||||
import { JsonRpcResponse, useJsonRpc } from "@/hooks/useJsonRpc";
|
||||
import notifications from "@/notifications";
|
||||
import FieldLabel from "@components/FieldLabel";
|
||||
import LoadingSpinner from "@components/LoadingSpinner";
|
||||
import {SelectMenuBasic} from "@components/SelectMenuBasic";
|
||||
import notifications from "@/notifications";
|
||||
import { JsonRpcResponse, useJsonRpc } from "@/hooks/useJsonRpc";
|
||||
|
||||
interface DCPowerState {
|
||||
isOn: boolean;
|
||||
|
|
|
|||
|
|
@ -4,10 +4,10 @@ import { useEffect, useState } from "react";
|
|||
import { Button } from "@components/Button";
|
||||
import Card from "@components/Card";
|
||||
import { SettingsPageHeader } from "@components/SettingsPageheader";
|
||||
import { SelectMenuBasic } from "@components/SelectMenuBasic";
|
||||
import { JsonRpcResponse, useJsonRpc } from "@/hooks/useJsonRpc";
|
||||
import notifications from "@/notifications";
|
||||
import { useUiStore } from "@/hooks/stores";
|
||||
import { SelectMenuBasic } from "@components/SelectMenuBasic";
|
||||
|
||||
interface SerialSettings {
|
||||
baudRate: string;
|
||||
|
|
|
|||
|
|
@ -1,450 +0,0 @@
|
|||
import { useEffect, useState } from "react";
|
||||
import { MdVolumeOff, MdVolumeUp, MdGraphicEq, MdMic, MdMicOff, MdRefresh } from "react-icons/md";
|
||||
|
||||
import { Button } from "@components/Button";
|
||||
import { cx } from "@/cva.config";
|
||||
import { useAudioDevices } from "@/hooks/useAudioDevices";
|
||||
import { useAudioEvents } from "@/hooks/useAudioEvents";
|
||||
import { useJsonRpc, JsonRpcResponse } from "@/hooks/useJsonRpc";
|
||||
import { useRTCStore } from "@/hooks/stores";
|
||||
import notifications from "@/notifications";
|
||||
|
||||
// Type for microphone error
|
||||
interface MicrophoneError {
|
||||
type: 'permission' | 'device' | 'network' | 'unknown';
|
||||
message: string;
|
||||
}
|
||||
|
||||
// Type for microphone hook return value
|
||||
interface MicrophoneHookReturn {
|
||||
isMicrophoneActive: boolean;
|
||||
isMicrophoneMuted: boolean;
|
||||
microphoneStream: MediaStream | null;
|
||||
startMicrophone: (deviceId?: string) => Promise<{ success: boolean; error?: MicrophoneError }>;
|
||||
stopMicrophone: () => Promise<{ success: boolean; error?: MicrophoneError }>;
|
||||
toggleMicrophoneMute: () => Promise<{ success: boolean; error?: MicrophoneError }>;
|
||||
syncMicrophoneState: () => Promise<void>;
|
||||
// Loading states
|
||||
isStarting: boolean;
|
||||
isStopping: boolean;
|
||||
isToggling: boolean;
|
||||
// HTTP/HTTPS detection
|
||||
isHttpsRequired: boolean;
|
||||
}
|
||||
|
||||
interface AudioConfig {
|
||||
Quality: number;
|
||||
Bitrate: number;
|
||||
SampleRate: number;
|
||||
Channels: number;
|
||||
FrameSize: string;
|
||||
}
|
||||
|
||||
interface AudioControlPopoverProps {
|
||||
microphone: MicrophoneHookReturn;
|
||||
}
|
||||
|
||||
export default function AudioControlPopover({ microphone }: AudioControlPopoverProps) {
|
||||
const [currentConfig, setCurrentConfig] = useState<AudioConfig | null>(null);
|
||||
|
||||
const [isLoading, setIsLoading] = useState(false);
|
||||
|
||||
// Add cache flags to prevent unnecessary API calls
|
||||
const [configsLoaded, setConfigsLoaded] = useState(false);
|
||||
|
||||
// Add cooldown to prevent rapid clicking
|
||||
const [lastClickTime, setLastClickTime] = useState(0);
|
||||
const CLICK_COOLDOWN = 500; // 500ms cooldown between clicks
|
||||
|
||||
// Use WebSocket-based audio events for real-time updates
|
||||
const {
|
||||
audioMuted,
|
||||
// microphoneState - now using hook state instead
|
||||
isConnected: wsConnected
|
||||
} = useAudioEvents();
|
||||
|
||||
// RPC for device communication (works both locally and via cloud)
|
||||
const { rpcDataChannel } = useRTCStore();
|
||||
const { send } = useJsonRpc();
|
||||
|
||||
// Initialize audio quality service with RPC for cloud compatibility
|
||||
// Audio quality service removed - using fixed optimal configuration
|
||||
|
||||
// WebSocket-only implementation - no fallback polling
|
||||
|
||||
// Microphone state from props (keeping hook for legacy device operations)
|
||||
const {
|
||||
isMicrophoneActive: isMicrophoneActiveFromHook,
|
||||
startMicrophone,
|
||||
stopMicrophone,
|
||||
syncMicrophoneState,
|
||||
// Loading states
|
||||
isStarting,
|
||||
isStopping,
|
||||
isToggling,
|
||||
// HTTP/HTTPS detection
|
||||
isHttpsRequired,
|
||||
} = microphone;
|
||||
|
||||
// Use WebSocket data exclusively - no polling fallback
|
||||
const isMuted = audioMuted ?? false;
|
||||
const isConnected = wsConnected;
|
||||
|
||||
|
||||
|
||||
// Audio devices
|
||||
const {
|
||||
audioInputDevices,
|
||||
audioOutputDevices,
|
||||
selectedInputDevice,
|
||||
selectedOutputDevice,
|
||||
setSelectedInputDevice,
|
||||
setSelectedOutputDevice,
|
||||
isLoading: devicesLoading,
|
||||
error: devicesError,
|
||||
refreshDevices
|
||||
} = useAudioDevices();
|
||||
|
||||
|
||||
|
||||
// Load initial configurations once - cache to prevent repeated calls
|
||||
useEffect(() => {
|
||||
if (!configsLoaded) {
|
||||
loadAudioConfigurations();
|
||||
}
|
||||
}, [configsLoaded]);
|
||||
|
||||
// WebSocket-only implementation - sync microphone state when needed
|
||||
useEffect(() => {
|
||||
// Always sync microphone state, but debounce it
|
||||
const syncTimeout = setTimeout(() => {
|
||||
syncMicrophoneState();
|
||||
}, 500);
|
||||
|
||||
return () => clearTimeout(syncTimeout);
|
||||
}, [syncMicrophoneState]);
|
||||
|
||||
const loadAudioConfigurations = async () => {
|
||||
try {
|
||||
// Load audio configuration directly via RPC
|
||||
if (!send) return;
|
||||
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
send("audioStatus", {}, (resp: JsonRpcResponse) => {
|
||||
if ("error" in resp) {
|
||||
reject(new Error(resp.error.message));
|
||||
} else if ("result" in resp && resp.result) {
|
||||
const result = resp.result as any;
|
||||
if (result.config) {
|
||||
setCurrentConfig(result.config);
|
||||
}
|
||||
resolve();
|
||||
} else {
|
||||
resolve();
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
setConfigsLoaded(true);
|
||||
} catch {
|
||||
// Failed to load audio configurations
|
||||
}
|
||||
};
|
||||
|
||||
const handleToggleMute = async () => {
|
||||
const now = Date.now();
|
||||
|
||||
// Prevent rapid clicking
|
||||
if (isLoading || (now - lastClickTime < CLICK_COOLDOWN)) {
|
||||
return;
|
||||
}
|
||||
|
||||
setLastClickTime(now);
|
||||
setIsLoading(true);
|
||||
|
||||
try {
|
||||
// Use RPC for device communication - works for both local and cloud
|
||||
if (rpcDataChannel?.readyState !== "open") {
|
||||
throw new Error("Device connection not available");
|
||||
}
|
||||
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
send("audioMute", { muted: !isMuted }, (resp: JsonRpcResponse) => {
|
||||
if ("error" in resp) {
|
||||
reject(new Error(resp.error.message));
|
||||
} else {
|
||||
resolve();
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// WebSocket will handle the state update automatically
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : "Failed to toggle audio mute";
|
||||
notifications.error(errorMessage);
|
||||
} finally {
|
||||
setIsLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
// Quality change handler removed - quality is now fixed at optimal settings
|
||||
|
||||
const handleToggleMicrophoneEnable = async () => {
|
||||
const now = Date.now();
|
||||
|
||||
// Prevent rapid clicking - if any operation is in progress or within cooldown, ignore the click
|
||||
if (isStarting || isStopping || isToggling || (now - lastClickTime < CLICK_COOLDOWN)) {
|
||||
return;
|
||||
}
|
||||
|
||||
setLastClickTime(now);
|
||||
setIsLoading(true);
|
||||
|
||||
try {
|
||||
if (isMicrophoneActiveFromHook) {
|
||||
// Disable: Use the hook's stopMicrophone which handles both RPC and local cleanup
|
||||
const result = await stopMicrophone();
|
||||
if (!result.success) {
|
||||
throw new Error(result.error?.message || "Failed to stop microphone");
|
||||
}
|
||||
} else {
|
||||
// Enable: Use the hook's startMicrophone which handles both RPC and local setup
|
||||
const result = await startMicrophone();
|
||||
if (!result.success) {
|
||||
throw new Error(result.error?.message || "Failed to start microphone");
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : "Failed to toggle microphone";
|
||||
notifications.error(errorMessage);
|
||||
} finally {
|
||||
setIsLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
// Handle microphone device change
|
||||
const handleMicrophoneDeviceChange = async (deviceId: string) => {
|
||||
// Don't process device changes for HTTPS-required placeholder
|
||||
if (deviceId === 'https-required') {
|
||||
return;
|
||||
}
|
||||
|
||||
setSelectedInputDevice(deviceId);
|
||||
|
||||
// If microphone is currently active, restart it with the new device
|
||||
if (isMicrophoneActiveFromHook) {
|
||||
try {
|
||||
// Stop current microphone
|
||||
await stopMicrophone();
|
||||
// Start with new device
|
||||
const result = await startMicrophone(deviceId);
|
||||
if (!result.success && result.error) {
|
||||
notifications.error(result.error.message);
|
||||
}
|
||||
} catch {
|
||||
// Failed to change microphone device
|
||||
notifications.error("Failed to change microphone device");
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const handleAudioOutputDeviceChange = async (deviceId: string) => {
|
||||
setSelectedOutputDevice(deviceId);
|
||||
|
||||
// Find the video element and set the audio output device
|
||||
const videoElement = document.querySelector('video');
|
||||
if (videoElement && 'setSinkId' in videoElement) {
|
||||
try {
|
||||
await (videoElement as HTMLVideoElement & { setSinkId: (deviceId: string) => Promise<void> }).setSinkId(deviceId);
|
||||
} catch {
|
||||
// Failed to change audio output device
|
||||
}
|
||||
} else {
|
||||
// setSinkId not supported or video element not found
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
return (
|
||||
<div className="w-full max-w-md rounded-lg border border-slate-200 bg-white p-4 shadow-lg dark:border-slate-700 dark:bg-slate-800">
|
||||
<div className="space-y-4">
|
||||
{/* Header */}
|
||||
<div className="flex items-center justify-between">
|
||||
<h3 className="text-lg font-semibold text-slate-900 dark:text-slate-100">
|
||||
Audio Controls
|
||||
</h3>
|
||||
<div className="flex items-center gap-2">
|
||||
<div className={cx(
|
||||
"h-2 w-2 rounded-full",
|
||||
isConnected ? "bg-green-500" : "bg-red-500"
|
||||
)} />
|
||||
<span className="text-xs text-slate-500 dark:text-slate-400">
|
||||
{isConnected ? "Connected" : "Disconnected"}
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Mute Control */}
|
||||
<div className="flex items-center justify-between rounded-lg bg-slate-50 p-3 dark:bg-slate-700">
|
||||
<div className="flex items-center gap-3">
|
||||
{isMuted ? (
|
||||
<MdVolumeOff className="h-5 w-5 text-red-500" />
|
||||
) : (
|
||||
<MdVolumeUp className="h-5 w-5 text-green-500" />
|
||||
)}
|
||||
<span className="font-medium text-slate-900 dark:text-slate-100">
|
||||
{isMuted ? "Muted" : "Unmuted"}
|
||||
</span>
|
||||
</div>
|
||||
<Button
|
||||
size="SM"
|
||||
theme={isMuted ? "primary" : "danger"}
|
||||
text={isMuted ? "Enable" : "Disable"}
|
||||
onClick={handleToggleMute}
|
||||
disabled={isLoading}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{/* Microphone Control */}
|
||||
<div className="space-y-3">
|
||||
<div className="flex items-center gap-2">
|
||||
<MdMic className="h-4 w-4 text-slate-600 dark:text-slate-400" />
|
||||
<span className="font-medium text-slate-900 dark:text-slate-100">
|
||||
Microphone Input
|
||||
</span>
|
||||
</div>
|
||||
|
||||
<div className="flex items-center justify-between rounded-lg bg-slate-50 p-3 dark:bg-slate-700">
|
||||
<div className="flex items-center gap-3">
|
||||
{isMicrophoneActiveFromHook ? (
|
||||
<MdMic className="h-5 w-5 text-green-500" />
|
||||
) : (
|
||||
<MdMicOff className="h-5 w-5 text-red-500" />
|
||||
)}
|
||||
<span className="font-medium text-slate-900 dark:text-slate-100">
|
||||
{isMicrophoneActiveFromHook ? "Enabled" : "Disabled"}
|
||||
</span>
|
||||
</div>
|
||||
<Button
|
||||
size="SM"
|
||||
theme={isMicrophoneActiveFromHook ? "danger" : "primary"}
|
||||
text={isMicrophoneActiveFromHook ? "Disable" : "Enable"}
|
||||
onClick={handleToggleMicrophoneEnable}
|
||||
disabled={isLoading || isHttpsRequired}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{/* HTTPS requirement notice */}
|
||||
{isHttpsRequired && (
|
||||
<div className="text-xs text-amber-600 dark:text-amber-400 bg-amber-50 dark:bg-amber-900/20 p-2 rounded-md">
|
||||
<p className="font-medium mb-1">HTTPS Required for Microphone Input</p>
|
||||
<p>
|
||||
Microphone access requires a secure connection due to browser security policies. Audio output works fine on HTTP, but microphone input needs HTTPS.
|
||||
</p>
|
||||
<p className="mt-1">
|
||||
<span className="font-medium">Current:</span> {window.location.protocol + '//' + window.location.host}
|
||||
<br />
|
||||
<span className="font-medium">Secure:</span> {'https://' + window.location.host}
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
|
||||
</div>
|
||||
|
||||
{/* Device Selection */}
|
||||
<div className="space-y-3">
|
||||
<div className="flex items-center gap-2">
|
||||
<MdMic className="h-4 w-4 text-slate-600 dark:text-slate-400" />
|
||||
<span className="font-medium text-slate-900 dark:text-slate-100">
|
||||
Audio Devices
|
||||
</span>
|
||||
{devicesLoading && (
|
||||
<div className="h-3 w-3 animate-spin rounded-full border border-slate-300 border-t-slate-600 dark:border-slate-600 dark:border-t-slate-300" />
|
||||
)}
|
||||
</div>
|
||||
|
||||
{devicesError && (
|
||||
<div className="rounded-md bg-red-50 p-2 text-xs text-red-600 dark:bg-red-900/20 dark:text-red-400">
|
||||
{devicesError}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Microphone Selection */}
|
||||
<div className="space-y-2">
|
||||
<label className="text-sm font-medium text-slate-700 dark:text-slate-300">
|
||||
Microphone
|
||||
</label>
|
||||
<select
|
||||
value={selectedInputDevice}
|
||||
onChange={(e) => handleMicrophoneDeviceChange(e.target.value)}
|
||||
disabled={devicesLoading || isHttpsRequired}
|
||||
className="w-full rounded-md border border-slate-200 bg-white px-3 py-2 text-sm text-slate-700 focus:border-blue-500 focus:outline-none focus:ring-1 focus:ring-blue-500 disabled:bg-slate-50 disabled:text-slate-500 dark:border-slate-600 dark:bg-slate-700 dark:text-slate-300 dark:focus:border-blue-400 dark:disabled:bg-slate-800"
|
||||
>
|
||||
{audioInputDevices.map((device) => (
|
||||
<option key={device.deviceId} value={device.deviceId}>
|
||||
{device.label}
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
{isHttpsRequired ? (
|
||||
<p className="text-xs text-amber-600 dark:text-amber-400">
|
||||
HTTPS connection required for microphone device selection
|
||||
</p>
|
||||
) : isMicrophoneActiveFromHook ? (
|
||||
<p className="text-xs text-slate-500 dark:text-slate-400">
|
||||
Changing device will restart the microphone
|
||||
</p>
|
||||
) : null}
|
||||
</div>
|
||||
|
||||
{/* Speaker Selection */}
|
||||
<div className="space-y-2">
|
||||
<label className="text-sm font-medium text-slate-700 dark:text-slate-300">
|
||||
Speaker
|
||||
</label>
|
||||
<select
|
||||
value={selectedOutputDevice}
|
||||
onChange={(e) => handleAudioOutputDeviceChange(e.target.value)}
|
||||
disabled={devicesLoading}
|
||||
className="w-full rounded-md border border-slate-200 bg-white px-3 py-2 text-sm text-slate-700 focus:border-blue-500 focus:outline-none focus:ring-1 focus:ring-blue-500 disabled:bg-slate-50 disabled:text-slate-500 dark:border-slate-600 dark:bg-slate-700 dark:text-slate-300 dark:focus:border-blue-400 dark:disabled:bg-slate-800"
|
||||
>
|
||||
{audioOutputDevices.map((device) => (
|
||||
<option key={device.deviceId} value={device.deviceId}>
|
||||
{device.label}
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
</div>
|
||||
|
||||
<button
|
||||
onClick={refreshDevices}
|
||||
disabled={devicesLoading}
|
||||
className="flex w-full items-center justify-center gap-2 rounded-md border border-slate-200 px-3 py-2 text-sm font-medium text-slate-700 hover:bg-slate-50 disabled:opacity-50 dark:border-slate-600 dark:text-slate-300 dark:hover:bg-slate-700"
|
||||
>
|
||||
<MdRefresh className={cx("h-4 w-4", devicesLoading && "animate-spin")} />
|
||||
Refresh Devices
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{/* Audio Quality Info (fixed optimal configuration) */}
|
||||
{currentConfig && (
|
||||
<div className="space-y-2 rounded-md bg-slate-50 p-3 dark:bg-slate-800">
|
||||
<div className="flex items-center gap-2">
|
||||
<MdGraphicEq className="h-4 w-4 text-slate-600 dark:text-slate-400" />
|
||||
<span className="font-medium text-slate-900 dark:text-slate-100">
|
||||
Audio Configuration
|
||||
</span>
|
||||
</div>
|
||||
<div className="text-sm text-slate-600 dark:text-slate-400">
|
||||
Optimized for S16_LE @ 48kHz stereo HDMI audio
|
||||
</div>
|
||||
<div className="text-xs text-slate-500 dark:text-slate-500">
|
||||
Bitrate: {currentConfig.Bitrate} kbps | Sample Rate: {currentConfig.SampleRate} Hz | Channels: {currentConfig.Channels}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
|
@ -0,0 +1,182 @@
|
|||
import { useCallback, useEffect, useState } from "react";
|
||||
import { LuVolume2 } from "react-icons/lu";
|
||||
|
||||
import { JsonRpcResponse, useJsonRpc } from "@/hooks/useJsonRpc";
|
||||
import { GridCard } from "@components/Card";
|
||||
import { SettingsItem } from "@components/SettingsItem";
|
||||
import { SelectMenuBasic } from "@components/SelectMenuBasic";
|
||||
import { Button } from "@components/Button";
|
||||
import notifications from "@/notifications";
|
||||
|
||||
export default function AudioPopover() {
|
||||
const { send } = useJsonRpc();
|
||||
const [audioOutputSource, setAudioOutputSource] = useState<string>("hdmi");
|
||||
const [audioOutputEnabled, setAudioOutputEnabled] = useState<boolean>(true);
|
||||
const [audioInputEnabled, setAudioInputEnabled] = useState<boolean>(true);
|
||||
const [usbAudioEnabled, setUsbAudioEnabled] = useState<boolean>(false);
|
||||
const [loading, setLoading] = useState(false);
|
||||
|
||||
useEffect(() => {
|
||||
// Load current audio settings
|
||||
send("getAudioOutputSource", {}, (resp: JsonRpcResponse) => {
|
||||
if ("error" in resp) {
|
||||
console.error("Failed to load audio output source:", resp.error);
|
||||
} else {
|
||||
setAudioOutputSource(resp.result as string);
|
||||
}
|
||||
});
|
||||
|
||||
send("getAudioOutputEnabled", {}, (resp: JsonRpcResponse) => {
|
||||
if ("error" in resp) {
|
||||
console.error("Failed to load audio output enabled:", resp.error);
|
||||
} else {
|
||||
setAudioOutputEnabled(resp.result as boolean);
|
||||
}
|
||||
});
|
||||
|
||||
send("getAudioInputEnabled", {}, (resp: JsonRpcResponse) => {
|
||||
if ("error" in resp) {
|
||||
console.error("Failed to load audio input enabled:", resp.error);
|
||||
} else {
|
||||
setAudioInputEnabled(resp.result as boolean);
|
||||
}
|
||||
});
|
||||
|
||||
send("getUsbDevices", {}, (resp: JsonRpcResponse) => {
|
||||
if ("error" in resp) {
|
||||
console.error("Failed to load USB devices:", resp.error);
|
||||
} else {
|
||||
const usbDevices = resp.result as { audio: boolean };
|
||||
setUsbAudioEnabled(usbDevices.audio || false);
|
||||
}
|
||||
});
|
||||
}, [send]);
|
||||
|
||||
const handleAudioOutputSourceChange = useCallback(
|
||||
(e: React.ChangeEvent<HTMLSelectElement>) => {
|
||||
const newSource = e.target.value;
|
||||
setLoading(true);
|
||||
send("setAudioOutputSource", { source: newSource }, (resp: JsonRpcResponse) => {
|
||||
setLoading(false);
|
||||
if ("error" in resp) {
|
||||
notifications.error(
|
||||
`Failed to set audio output source: ${resp.error.data || "Unknown error"}`,
|
||||
);
|
||||
} else {
|
||||
setAudioOutputSource(newSource);
|
||||
notifications.success(`Audio output source set to ${newSource.toUpperCase()}`);
|
||||
}
|
||||
});
|
||||
},
|
||||
[send],
|
||||
);
|
||||
|
||||
const handleAudioOutputEnabledToggle = useCallback(
|
||||
(e: React.ChangeEvent<HTMLInputElement>) => {
|
||||
const enabled = e.target.checked;
|
||||
setLoading(true);
|
||||
send("setAudioOutputEnabled", { enabled }, (resp: JsonRpcResponse) => {
|
||||
setLoading(false);
|
||||
if ("error" in resp) {
|
||||
notifications.error(
|
||||
`Failed to ${enabled ? "enable" : "disable"} audio output: ${resp.error.data || "Unknown error"}`,
|
||||
);
|
||||
} else {
|
||||
setAudioOutputEnabled(enabled);
|
||||
notifications.success(`Audio output ${enabled ? "enabled" : "disabled"}`);
|
||||
}
|
||||
});
|
||||
},
|
||||
[send],
|
||||
);
|
||||
|
||||
const handleAudioInputEnabledToggle = useCallback(
|
||||
(e: React.ChangeEvent<HTMLInputElement>) => {
|
||||
const enabled = e.target.checked;
|
||||
setLoading(true);
|
||||
send("setAudioInputEnabled", { enabled }, (resp: JsonRpcResponse) => {
|
||||
setLoading(false);
|
||||
if ("error" in resp) {
|
||||
notifications.error(
|
||||
`Failed to ${enabled ? "enable" : "disable"} audio input: ${resp.error.data || "Unknown error"}`,
|
||||
);
|
||||
} else {
|
||||
setAudioInputEnabled(enabled);
|
||||
notifications.success(`Audio input ${enabled ? "enabled" : "disabled"}`);
|
||||
}
|
||||
});
|
||||
},
|
||||
[send],
|
||||
);
|
||||
|
||||
return (
|
||||
<GridCard>
|
||||
<div className="space-y-4 p-4 py-3">
|
||||
<div className="space-y-4">
|
||||
<div className="flex items-center gap-2 text-slate-900 dark:text-slate-100">
|
||||
<LuVolume2 className="h-5 w-5" />
|
||||
<h3 className="font-semibold">Audio Settings</h3>
|
||||
</div>
|
||||
|
||||
<div className="space-y-3">
|
||||
<SettingsItem
|
||||
loading={loading}
|
||||
title="Audio Output"
|
||||
description="Enable audio from target to speakers"
|
||||
>
|
||||
<Button
|
||||
size="SM"
|
||||
theme={audioOutputEnabled ? "light" : "primary"}
|
||||
text={audioOutputEnabled ? "Disable" : "Enable"}
|
||||
onClick={() => handleAudioOutputEnabledToggle({ target: { checked: !audioOutputEnabled } } as any)}
|
||||
/>
|
||||
</SettingsItem>
|
||||
|
||||
<SettingsItem
|
||||
loading={loading}
|
||||
title="Audio Output Source"
|
||||
description={usbAudioEnabled ? "Select where to capture audio from" : "Enable USB Audio to use USB as source"}
|
||||
>
|
||||
<SelectMenuBasic
|
||||
size="SM"
|
||||
label=""
|
||||
className="max-w-[180px]"
|
||||
value={audioOutputSource}
|
||||
fullWidth
|
||||
disabled={!audioOutputEnabled}
|
||||
onChange={handleAudioOutputSourceChange}
|
||||
options={
|
||||
usbAudioEnabled
|
||||
? [
|
||||
{ label: "HDMI", value: "hdmi" },
|
||||
{ label: "USB", value: "usb" },
|
||||
]
|
||||
: [{ label: "HDMI", value: "hdmi" }]
|
||||
}
|
||||
/>
|
||||
</SettingsItem>
|
||||
|
||||
{usbAudioEnabled && (
|
||||
<>
|
||||
<div className="h-px w-full bg-slate-800/10 dark:bg-slate-300/20" />
|
||||
|
||||
<SettingsItem
|
||||
loading={loading}
|
||||
title="Audio Input (Microphone)"
|
||||
description="Enable microphone input to target"
|
||||
>
|
||||
<Button
|
||||
size="SM"
|
||||
theme={audioInputEnabled ? "light" : "primary"}
|
||||
text={audioInputEnabled ? "Disable" : "Enable"}
|
||||
onClick={() => handleAudioInputEnabledToggle({ target: { checked: !audioInputEnabled } } as any)}
|
||||
/>
|
||||
</SettingsItem>
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</GridCard>
|
||||
);
|
||||
}
|
||||
|
|
@ -1,13 +1,13 @@
|
|||
import { useEffect, useState } from "react";
|
||||
import { LuPower, LuTerminal, LuPlugZap } from "react-icons/lu";
|
||||
|
||||
import { JsonRpcResponse, useJsonRpc } from "@/hooks/useJsonRpc";
|
||||
import Card, { GridCard } from "@components/Card";
|
||||
import { SettingsPageHeader } from "@components/SettingsPageheader";
|
||||
import { ATXPowerControl } from "@components/extensions/ATXPowerControl";
|
||||
import { DCPowerControl } from "@components/extensions/DCPowerControl";
|
||||
import { SerialConsole } from "@components/extensions/SerialConsole";
|
||||
import { Button } from "@components/Button";
|
||||
import { JsonRpcResponse, useJsonRpc } from "@/hooks/useJsonRpc";
|
||||
import notifications from "@/notifications";
|
||||
|
||||
interface Extension {
|
||||
|
|
|
|||
|
|
@ -10,9 +10,9 @@ import { useLocation } from "react-router";
|
|||
|
||||
import { Button } from "@components/Button";
|
||||
import Card, { GridCard } from "@components/Card";
|
||||
import { SettingsPageHeader } from "@components/SettingsPageheader";
|
||||
import { formatters } from "@/utils";
|
||||
import { RemoteVirtualMediaState, useMountMediaStore } from "@/hooks/stores";
|
||||
import { SettingsPageHeader } from "@components/SettingsPageheader";
|
||||
import { JsonRpcResponse, useJsonRpc } from "@/hooks/useJsonRpc";
|
||||
import { useDeviceUiNavigation } from "@/hooks/useAppNavigation";
|
||||
import notifications from "@/notifications";
|
||||
|
|
|
|||
|
|
@ -3,17 +3,17 @@ import { ExclamationCircleIcon } from "@heroicons/react/16/solid";
|
|||
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
|
||||
import { LuCornerDownLeft } from "react-icons/lu";
|
||||
|
||||
import { Button } from "@components/Button";
|
||||
import { GridCard } from "@components/Card";
|
||||
import { InputFieldWithLabel } from "@components/InputField";
|
||||
import { SettingsPageHeader } from "@components/SettingsPageheader";
|
||||
import { TextAreaWithLabel } from "@components/TextArea";
|
||||
import { cx } from "@/cva.config";
|
||||
import { useHidStore, useSettingsStore, useUiStore } from "@/hooks/stores";
|
||||
import { JsonRpcResponse, useJsonRpc } from "@/hooks/useJsonRpc";
|
||||
import useKeyboard, { type MacroStep } from "@/hooks/useKeyboard";
|
||||
import useKeyboardLayout from "@/hooks/useKeyboardLayout";
|
||||
import notifications from "@/notifications";
|
||||
import { Button } from "@components/Button";
|
||||
import { GridCard } from "@components/Card";
|
||||
import { InputFieldWithLabel } from "@components/InputField";
|
||||
import { SettingsPageHeader } from "@components/SettingsPageheader";
|
||||
import { TextAreaWithLabel } from "@components/TextArea";
|
||||
|
||||
// uint32 max value / 4
|
||||
const pasteMaxLength = 1073741824;
|
||||
|
|
|
|||
|
|
@ -11,8 +11,6 @@ import EmptyStateCard from "./EmptyStateCard";
|
|||
import DeviceList, { StoredDevice } from "./DeviceList";
|
||||
import AddDeviceForm from "./AddDeviceForm";
|
||||
|
||||
|
||||
|
||||
export default function WakeOnLanModal() {
|
||||
const [storedDevices, setStoredDevices] = useState<StoredDevice[]>([]);
|
||||
const [showAddForm, setShowAddForm] = useState(false);
|
||||
|
|
|
|||
|
|
@ -1,13 +1,11 @@
|
|||
import { useInterval } from "usehooks-ts";
|
||||
|
||||
|
||||
import SidebarHeader from "@/components/SidebarHeader";
|
||||
import { useRTCStore, useUiStore } from "@/hooks/stores";
|
||||
import { someIterable } from "@/utils";
|
||||
|
||||
import { SettingsSectionHeader } from "../SettingsSectionHeader";
|
||||
import { createChartArray, Metric } from "../Metric";
|
||||
|
||||
import { SettingsSectionHeader } from "../SettingsSectionHeader";
|
||||
|
||||
export default function ConnectionStatsSidebar() {
|
||||
const { sidebarView, setSidebarView } = useUiStore();
|
||||
|
|
|
|||
|
|
@ -1,113 +0,0 @@
|
|||
// Centralized configuration constants
|
||||
|
||||
// Network and API Configuration
|
||||
export const NETWORK_CONFIG = {
|
||||
WEBSOCKET_RECONNECT_INTERVAL: 3000,
|
||||
LONG_PRESS_DURATION: 3000,
|
||||
ERROR_MESSAGE_TIMEOUT: 3000,
|
||||
AUDIO_TEST_DURATION: 5000,
|
||||
BACKEND_RETRY_DELAY: 500,
|
||||
RESET_DELAY: 200,
|
||||
STATE_CHECK_DELAY: 100,
|
||||
VERIFICATION_DELAY: 1000,
|
||||
} as const;
|
||||
|
||||
// Default URLs and Endpoints
|
||||
export const DEFAULT_URLS = {
|
||||
JETKVM_PROD_API: "https://api.jetkvm.com",
|
||||
JETKVM_PROD_APP: "https://app.jetkvm.com",
|
||||
JETKVM_DOCS_TROUBLESHOOTING: "https://jetkvm.com/docs/getting-started/troubleshooting",
|
||||
JETKVM_DOCS_REMOTE_ACCESS: "https://jetkvm.com/docs/networking/remote-access",
|
||||
JETKVM_DOCS_LOCAL_ACCESS_RESET: "https://jetkvm.com/docs/networking/local-access#reset-password",
|
||||
JETKVM_GITHUB: "https://github.com/jetkvm",
|
||||
CRONTAB_GURU: "https://crontab.guru/examples.html",
|
||||
} as const;
|
||||
|
||||
// Sample ISO URLs for mounting
|
||||
export const SAMPLE_ISOS = {
|
||||
UBUNTU_24_04: {
|
||||
name: "Ubuntu 24.04.2 Desktop",
|
||||
url: "https://releases.ubuntu.com/24.04.2/ubuntu-24.04.2-desktop-amd64.iso",
|
||||
},
|
||||
DEBIAN_13: {
|
||||
name: "Debian 13.0.0 (Testing)",
|
||||
url: "https://cdimage.debian.org/debian-cd/current/amd64/iso-cd/debian-13.0.0-amd64-netinst.iso",
|
||||
},
|
||||
DEBIAN_12: {
|
||||
name: "Debian 12.11.0 (Stable)",
|
||||
url: "https://cdimage.debian.org/mirror/cdimage/archive/12.11.0/amd64/iso-cd/debian-12.11.0-amd64-netinst.iso",
|
||||
},
|
||||
FEDORA_41: {
|
||||
name: "Fedora 41 Workstation",
|
||||
url: "https://download.fedoraproject.org/pub/fedora/linux/releases/41/Workstation/x86_64/iso/Fedora-Workstation-Live-x86_64-41-1.4.iso",
|
||||
},
|
||||
OPENSUSE_LEAP: {
|
||||
name: "openSUSE Leap 15.6",
|
||||
url: "https://download.opensuse.org/distribution/leap/15.6/iso/openSUSE-Leap-15.6-NET-x86_64-Media.iso",
|
||||
},
|
||||
OPENSUSE_TUMBLEWEED: {
|
||||
name: "openSUSE Tumbleweed",
|
||||
url: "https://download.opensuse.org/tumbleweed/iso/openSUSE-Tumbleweed-NET-x86_64-Current.iso",
|
||||
},
|
||||
ARCH_LINUX: {
|
||||
name: "Arch Linux",
|
||||
url: "https://archlinux.doridian.net/iso/2025.02.01/archlinux-2025.02.01-x86_64.iso",
|
||||
},
|
||||
NETBOOT_XYZ: {
|
||||
name: "netboot.xyz",
|
||||
url: "https://boot.netboot.xyz/ipxe/netboot.xyz.iso",
|
||||
},
|
||||
} as const;
|
||||
|
||||
// Security and Access Configuration
|
||||
export const SECURITY_CONFIG = {
|
||||
LOCALHOST_ONLY_IP: "127.0.0.1",
|
||||
LOCALHOST_HOSTNAME: "localhost",
|
||||
HTTPS_PROTOCOL: "https:",
|
||||
} as const;
|
||||
|
||||
// Default Hardware Configuration
|
||||
export const HARDWARE_CONFIG = {
|
||||
DEFAULT_OFF_AFTER: 50000,
|
||||
SAMPLE_EDID: "00FFFFFFFFFFFF00047265058A3F6101101E0104A53420783FC125A8554EA0260D5054BFEF80714F8140818081C081008B009500B300283C80A070B023403020360006442100001A000000FD00304C575716010A202020202020000000FC0042323436574C0A202020202020000000FF0054384E4545303033383532320A01F802031CF14F90020304050607011112131415161F2309070783010000011D8018711C1620582C250006442100009E011D007251D01E206E28550006442100001E8C0AD08A20E02D10103E9600064421000018C344806E70B028401720A80406442100001E00000000000000000000000000000000000000000000000000000096",
|
||||
} as const;
|
||||
|
||||
// Audio Configuration
|
||||
export const AUDIO_CONFIG = {
|
||||
// Audio Level Analysis
|
||||
LEVEL_UPDATE_INTERVAL: 100, // ms - throttle audio level updates for performance
|
||||
FFT_SIZE: 128, // reduced from 256 for better performance
|
||||
SMOOTHING_TIME_CONSTANT: 0.8,
|
||||
RELEVANT_FREQUENCY_BINS: 32, // focus on lower frequencies for voice
|
||||
RMS_SCALING_FACTOR: 180, // for converting RMS to percentage
|
||||
MAX_LEVEL_PERCENTAGE: 100,
|
||||
|
||||
// Microphone Configuration
|
||||
SAMPLE_RATE: 48000, // Hz - high quality audio sampling
|
||||
CHANNEL_COUNT: 1, // mono for microphone input
|
||||
OPERATION_DEBOUNCE_MS: 1000, // debounce microphone operations
|
||||
SYNC_DEBOUNCE_MS: 1000, // debounce state synchronization
|
||||
AUDIO_TEST_TIMEOUT: 100, // ms - timeout for audio testing
|
||||
|
||||
// Audio quality is fixed at optimal settings (96 kbps @ 48kHz stereo)
|
||||
// No quality presets needed - single optimal configuration for all use cases
|
||||
|
||||
// Audio Analysis
|
||||
ANALYSIS_FFT_SIZE: 256, // for detailed audio analysis
|
||||
ANALYSIS_UPDATE_INTERVAL: 100, // ms - 10fps for audio level updates
|
||||
LEVEL_SCALING_FACTOR: 255, // for RMS to percentage conversion
|
||||
|
||||
// Audio Metrics Thresholds
|
||||
DROP_RATE_WARNING_THRESHOLD: 1, // percentage - yellow warning
|
||||
DROP_RATE_CRITICAL_THRESHOLD: 5, // percentage - red critical
|
||||
PERCENTAGE_MULTIPLIER: 100, // for converting ratios to percentages
|
||||
PERCENTAGE_DECIMAL_PLACES: 2, // decimal places for percentage display
|
||||
} as const;
|
||||
|
||||
// Placeholder URLs
|
||||
export const PLACEHOLDERS = {
|
||||
ISO_URL: "https://example.com/image.iso",
|
||||
PROXY_URL: "http://proxy.example.com:8080/",
|
||||
API_URL: "https://api.example.com",
|
||||
APP_URL: "https://app.example.com",
|
||||
} as const;
|
||||
|
|
@ -129,16 +129,6 @@ export interface RTCState {
|
|||
mediaStream: MediaStream | null;
|
||||
setMediaStream: (stream: MediaStream) => void;
|
||||
|
||||
// Microphone stream management
|
||||
microphoneStream: MediaStream | null;
|
||||
setMicrophoneStream: (stream: MediaStream | null) => void;
|
||||
microphoneSender: RTCRtpSender | null;
|
||||
setMicrophoneSender: (sender: RTCRtpSender | null) => void;
|
||||
isMicrophoneActive: boolean;
|
||||
setMicrophoneActive: (active: boolean) => void;
|
||||
isMicrophoneMuted: boolean;
|
||||
setMicrophoneMuted: (muted: boolean) => void;
|
||||
|
||||
videoStreamStats: RTCInboundRtpStreamStats | null;
|
||||
appendVideoStreamStats: (stats: RTCInboundRtpStreamStats) => void;
|
||||
videoStreamStatsHistory: Map<number, RTCInboundRtpStreamStats>;
|
||||
|
|
@ -200,16 +190,6 @@ export const useRTCStore = create<RTCState>(set => ({
|
|||
mediaStream: null,
|
||||
setMediaStream: (stream: MediaStream) => set({ mediaStream: stream }),
|
||||
|
||||
// Microphone stream management
|
||||
microphoneStream: null,
|
||||
setMicrophoneStream: stream => set({ microphoneStream: stream }),
|
||||
microphoneSender: null,
|
||||
setMicrophoneSender: sender => set({ microphoneSender: sender }),
|
||||
isMicrophoneActive: false,
|
||||
setMicrophoneActive: active => set({ isMicrophoneActive: active }),
|
||||
isMicrophoneMuted: false,
|
||||
setMicrophoneMuted: muted => set({ isMicrophoneMuted: muted }),
|
||||
|
||||
videoStreamStats: null,
|
||||
appendVideoStreamStats: (stats: RTCInboundRtpStreamStats) => set({ videoStreamStats: stats }),
|
||||
videoStreamStatsHistory: new Map(),
|
||||
|
|
@ -371,10 +351,6 @@ export interface SettingsState {
|
|||
setVideoBrightness: (value: number) => void;
|
||||
videoContrast: number;
|
||||
setVideoContrast: (value: number) => void;
|
||||
|
||||
// Microphone persistence settings
|
||||
microphoneWasEnabled: boolean;
|
||||
setMicrophoneWasEnabled: (enabled: boolean) => void;
|
||||
}
|
||||
|
||||
export const useSettingsStore = create(
|
||||
|
|
@ -420,10 +396,6 @@ export const useSettingsStore = create(
|
|||
setVideoBrightness: (value: number) => set({ videoBrightness: value }),
|
||||
videoContrast: 1.0,
|
||||
setVideoContrast: (value: number) => set({ videoContrast: value }),
|
||||
|
||||
// Microphone persistence settings
|
||||
microphoneWasEnabled: false,
|
||||
setMicrophoneWasEnabled: (enabled: boolean) => set({ microphoneWasEnabled: enabled }),
|
||||
}),
|
||||
{
|
||||
name: "settings",
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ import type { NavigateOptions } from "react-router";
|
|||
import { useCallback, useMemo } from "react";
|
||||
|
||||
import { isOnDevice } from "../main";
|
||||
import { devError } from '../utils/debug';
|
||||
|
||||
/**
|
||||
* Generates the correct path based on whether the app is running on device or in cloud mode
|
||||
|
|
@ -23,7 +22,7 @@ export function getDeviceUiPath(path: string, deviceId?: string): string {
|
|||
return normalizedPath;
|
||||
} else {
|
||||
if (!deviceId) {
|
||||
devError("No device ID provided when generating path in cloud mode");
|
||||
console.error("No device ID provided when generating path in cloud mode");
|
||||
throw new Error("Device ID is required for cloud mode path generation");
|
||||
}
|
||||
return `/devices/${deviceId}${normalizedPath}`;
|
||||
|
|
|
|||
|
|
@ -1,187 +0,0 @@
|
|||
import { useState, useEffect, useCallback } from 'react';
|
||||
|
||||
import { devError } from '../utils/debug';
|
||||
|
||||
export interface AudioDevice {
|
||||
deviceId: string;
|
||||
label: string;
|
||||
kind: 'audioinput' | 'audiooutput';
|
||||
}
|
||||
|
||||
export interface UseAudioDevicesReturn {
|
||||
audioInputDevices: AudioDevice[];
|
||||
audioOutputDevices: AudioDevice[];
|
||||
selectedInputDevice: string;
|
||||
selectedOutputDevice: string;
|
||||
isLoading: boolean;
|
||||
error: string | null;
|
||||
refreshDevices: () => Promise<void>;
|
||||
setSelectedInputDevice: (deviceId: string) => void;
|
||||
setSelectedOutputDevice: (deviceId: string) => void;
|
||||
}
|
||||
|
||||
export function useAudioDevices(): UseAudioDevicesReturn {
|
||||
const [audioInputDevices, setAudioInputDevices] = useState<AudioDevice[]>([]);
|
||||
const [audioOutputDevices, setAudioOutputDevices] = useState<AudioDevice[]>([]);
|
||||
const [selectedInputDevice, setSelectedInputDevice] = useState<string>('default');
|
||||
const [selectedOutputDevice, setSelectedOutputDevice] = useState<string>('default');
|
||||
const [isLoading, setIsLoading] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
|
||||
const refreshDevices = useCallback(async () => {
|
||||
setIsLoading(true);
|
||||
setError(null);
|
||||
|
||||
try {
|
||||
// Check if we're on HTTP (microphone requires HTTPS, but speakers can work)
|
||||
const isHttp = window.location.protocol === 'http:';
|
||||
const hasMediaDevices = !!navigator.mediaDevices;
|
||||
const hasGetUserMedia = !!navigator.mediaDevices?.getUserMedia;
|
||||
const hasEnumerateDevices = !!navigator.mediaDevices?.enumerateDevices;
|
||||
|
||||
if (isHttp || !hasMediaDevices || !hasGetUserMedia) {
|
||||
// Set placeholder devices when HTTPS is required for microphone
|
||||
setAudioInputDevices([
|
||||
{ deviceId: 'https-required', label: 'HTTPS Required for Microphone Access', kind: 'audioinput' }
|
||||
]);
|
||||
|
||||
// Try to enumerate speakers if possible, otherwise provide defaults
|
||||
if (hasMediaDevices && hasEnumerateDevices) {
|
||||
try {
|
||||
const devices = await navigator.mediaDevices.enumerateDevices();
|
||||
const outputDevices: AudioDevice[] = [
|
||||
{ deviceId: 'default', label: 'Default Speaker', kind: 'audiooutput' }
|
||||
];
|
||||
|
||||
devices.forEach(device => {
|
||||
if (device.kind === 'audiooutput' && device.deviceId !== 'default') {
|
||||
outputDevices.push({
|
||||
deviceId: device.deviceId,
|
||||
label: device.label || `Speaker ${device.deviceId.slice(0, 8)}`,
|
||||
kind: 'audiooutput'
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
setAudioOutputDevices(outputDevices);
|
||||
} catch {
|
||||
// Fallback to default speakers if enumeration fails
|
||||
setAudioOutputDevices([
|
||||
{ deviceId: 'default', label: 'Default Speaker', kind: 'audiooutput' },
|
||||
{ deviceId: 'system-default', label: 'System Default Audio Output', kind: 'audiooutput' }
|
||||
]);
|
||||
}
|
||||
} else {
|
||||
// No enumeration available, use defaults
|
||||
setAudioOutputDevices([
|
||||
{ deviceId: 'default', label: 'Default Speaker', kind: 'audiooutput' },
|
||||
{ deviceId: 'system-default', label: 'System Default Audio Output', kind: 'audiooutput' }
|
||||
]);
|
||||
}
|
||||
|
||||
setSelectedInputDevice('https-required');
|
||||
setSelectedOutputDevice('default');
|
||||
return; // Exit gracefully without throwing error on HTTP
|
||||
}
|
||||
|
||||
// Request permissions first to get device labels
|
||||
await navigator.mediaDevices.getUserMedia({ audio: true });
|
||||
|
||||
const devices = await navigator.mediaDevices.enumerateDevices();
|
||||
|
||||
const inputDevices: AudioDevice[] = [
|
||||
{ deviceId: 'default', label: 'Default Microphone', kind: 'audioinput' }
|
||||
];
|
||||
|
||||
const outputDevices: AudioDevice[] = [
|
||||
{ deviceId: 'default', label: 'Default Speaker', kind: 'audiooutput' }
|
||||
];
|
||||
|
||||
devices.forEach(device => {
|
||||
if (device.kind === 'audioinput' && device.deviceId !== 'default') {
|
||||
inputDevices.push({
|
||||
deviceId: device.deviceId,
|
||||
label: device.label || `Microphone ${device.deviceId.slice(0, 8)}`,
|
||||
kind: 'audioinput'
|
||||
});
|
||||
} else if (device.kind === 'audiooutput' && device.deviceId !== 'default') {
|
||||
outputDevices.push({
|
||||
deviceId: device.deviceId,
|
||||
label: device.label || `Speaker ${device.deviceId.slice(0, 8)}`,
|
||||
kind: 'audiooutput'
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
setAudioInputDevices(inputDevices);
|
||||
setAudioOutputDevices(outputDevices);
|
||||
|
||||
// Audio devices enumerated
|
||||
|
||||
} catch (err) {
|
||||
// Only log errors on HTTPS where we expect full device access
|
||||
const isHttp = window.location.protocol === 'http:';
|
||||
if (!isHttp) {
|
||||
devError('Failed to enumerate audio devices:', err);
|
||||
}
|
||||
|
||||
let errorMessage = 'Failed to access audio devices';
|
||||
|
||||
if (err instanceof Error) {
|
||||
if (err.message.includes('HTTPS')) {
|
||||
errorMessage = err.message;
|
||||
} else if (err.name === 'NotAllowedError' || err.name === 'PermissionDeniedError') {
|
||||
errorMessage = 'Microphone permission denied. Please allow microphone access.';
|
||||
} else if (err.name === 'NotFoundError' || err.name === 'DevicesNotFoundError') {
|
||||
errorMessage = 'No microphone devices found.';
|
||||
} else if (err.name === 'NotSupportedError') {
|
||||
errorMessage = 'Audio devices are not supported on this connection. Please use HTTPS.';
|
||||
} else {
|
||||
errorMessage = err.message || errorMessage;
|
||||
}
|
||||
}
|
||||
|
||||
// Only set error state on HTTPS where we expect device access to work
|
||||
if (!isHttp) {
|
||||
setError(errorMessage);
|
||||
}
|
||||
} finally {
|
||||
setIsLoading(false);
|
||||
}
|
||||
}, []);
|
||||
|
||||
// Listen for device changes
|
||||
useEffect(() => {
|
||||
const handleDeviceChange = () => {
|
||||
// Audio devices changed, refreshing
|
||||
refreshDevices();
|
||||
};
|
||||
|
||||
// Check if navigator.mediaDevices exists and supports addEventListener
|
||||
if (navigator.mediaDevices && typeof navigator.mediaDevices.addEventListener === 'function') {
|
||||
navigator.mediaDevices.addEventListener('devicechange', handleDeviceChange);
|
||||
}
|
||||
|
||||
// Initial load
|
||||
refreshDevices();
|
||||
|
||||
return () => {
|
||||
// Check if navigator.mediaDevices exists and supports removeEventListener
|
||||
if (navigator.mediaDevices && typeof navigator.mediaDevices.removeEventListener === 'function') {
|
||||
navigator.mediaDevices.removeEventListener('devicechange', handleDeviceChange);
|
||||
}
|
||||
};
|
||||
}, [refreshDevices]);
|
||||
|
||||
return {
|
||||
audioInputDevices,
|
||||
audioOutputDevices,
|
||||
selectedInputDevice,
|
||||
selectedOutputDevice,
|
||||
isLoading,
|
||||
error,
|
||||
refreshDevices,
|
||||
setSelectedInputDevice,
|
||||
setSelectedOutputDevice,
|
||||
};
|
||||
}
|
||||
|
|
@ -1,308 +0,0 @@
|
|||
import { useCallback, useEffect, useRef, useState } from 'react';
|
||||
import useWebSocket, { ReadyState } from 'react-use-websocket';
|
||||
|
||||
import { devError, devWarn } from '../utils/debug';
|
||||
import { NETWORK_CONFIG } from '../config/constants';
|
||||
|
||||
import { JsonRpcResponse, useJsonRpc } from './useJsonRpc';
|
||||
import { useRTCStore } from './stores';
|
||||
|
||||
// Audio event types matching the backend
|
||||
export type AudioEventType =
|
||||
| 'audio-mute-changed'
|
||||
| 'microphone-state-changed'
|
||||
| 'audio-device-changed';
|
||||
|
||||
// Audio event data interfaces
|
||||
export interface AudioMuteData {
|
||||
muted: boolean;
|
||||
}
|
||||
|
||||
export interface MicrophoneStateData {
|
||||
running: boolean;
|
||||
session_active: boolean;
|
||||
}
|
||||
|
||||
export interface AudioDeviceChangedData {
|
||||
enabled: boolean;
|
||||
reason: string;
|
||||
}
|
||||
|
||||
// Audio event structure
|
||||
export interface AudioEvent {
|
||||
type: AudioEventType;
|
||||
data: AudioMuteData | MicrophoneStateData | AudioDeviceChangedData;
|
||||
}
|
||||
|
||||
// Hook return type
|
||||
export interface UseAudioEventsReturn {
|
||||
// Connection state
|
||||
connectionState: ReadyState;
|
||||
isConnected: boolean;
|
||||
|
||||
// Audio state
|
||||
audioMuted: boolean | null;
|
||||
|
||||
// Microphone state
|
||||
microphoneState: MicrophoneStateData | null;
|
||||
|
||||
// Device change events
|
||||
onAudioDeviceChanged?: (data: AudioDeviceChangedData) => void;
|
||||
|
||||
// Manual subscription control
|
||||
subscribe: () => void;
|
||||
unsubscribe: () => void;
|
||||
}
|
||||
|
||||
// Global subscription management to prevent multiple subscriptions per WebSocket connection
|
||||
const globalSubscriptionState = {
|
||||
isSubscribed: false,
|
||||
subscriberCount: 0,
|
||||
connectionId: null as string | null
|
||||
};
|
||||
|
||||
export function useAudioEvents(onAudioDeviceChanged?: (data: AudioDeviceChangedData) => void): UseAudioEventsReturn {
|
||||
// State for audio data
|
||||
const [audioMuted, setAudioMuted] = useState<boolean | null>(null);
|
||||
const [microphoneState, setMicrophoneState] = useState<MicrophoneStateData | null>(null);
|
||||
|
||||
// Get RTC store and JSON RPC functionality
|
||||
const { rpcDataChannel } = useRTCStore();
|
||||
const { send } = useJsonRpc();
|
||||
|
||||
// Fetch initial audio status using RPC for cloud compatibility
|
||||
const fetchInitialAudioStatus = useCallback(async () => {
|
||||
// Early return if RPC data channel is not open
|
||||
if (rpcDataChannel?.readyState !== "open") {
|
||||
devWarn('RPC connection not available for initial audio status, skipping');
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
await new Promise<void>((resolve) => {
|
||||
send("audioStatus", {}, (resp: JsonRpcResponse) => {
|
||||
if ("error" in resp) {
|
||||
devError('RPC audioStatus failed:', resp.error);
|
||||
} else if ("result" in resp) {
|
||||
const data = resp.result as { muted: boolean };
|
||||
setAudioMuted(data.muted);
|
||||
}
|
||||
resolve(); // Continue regardless of result
|
||||
});
|
||||
});
|
||||
} catch (error) {
|
||||
devError('Failed to fetch initial audio status via RPC:', error);
|
||||
}
|
||||
}, [rpcDataChannel?.readyState, send]);
|
||||
|
||||
// Local subscription state
|
||||
const [isLocallySubscribed, setIsLocallySubscribed] = useState(false);
|
||||
const subscriptionTimeoutRef = useRef<number | null>(null);
|
||||
|
||||
// Get WebSocket URL
|
||||
const getWebSocketUrl = () => {
|
||||
const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
|
||||
const host = window.location.host;
|
||||
return `${protocol}//${host}/webrtc/signaling/client`;
|
||||
};
|
||||
|
||||
// Shared WebSocket connection using the `share` option for better resource management
|
||||
const {
|
||||
sendMessage,
|
||||
lastMessage,
|
||||
readyState,
|
||||
} = useWebSocket(getWebSocketUrl(), {
|
||||
shouldReconnect: () => true,
|
||||
reconnectAttempts: 10,
|
||||
reconnectInterval: NETWORK_CONFIG.WEBSOCKET_RECONNECT_INTERVAL,
|
||||
share: true, // Share the WebSocket connection across multiple hooks
|
||||
onOpen: () => {
|
||||
// WebSocket connected
|
||||
// Reset global state on new connection
|
||||
globalSubscriptionState.isSubscribed = false;
|
||||
globalSubscriptionState.connectionId = Math.random().toString(36);
|
||||
},
|
||||
onClose: () => {
|
||||
// WebSocket disconnected
|
||||
// Reset global state on disconnect
|
||||
globalSubscriptionState.isSubscribed = false;
|
||||
globalSubscriptionState.subscriberCount = 0;
|
||||
globalSubscriptionState.connectionId = null;
|
||||
},
|
||||
onError: (event) => {
|
||||
devError('[AudioEvents] WebSocket error:', event);
|
||||
},
|
||||
});
|
||||
|
||||
// Subscribe to audio events
|
||||
const subscribe = useCallback(() => {
|
||||
if (readyState === ReadyState.OPEN && !globalSubscriptionState.isSubscribed) {
|
||||
// Clear any pending subscription timeout
|
||||
if (subscriptionTimeoutRef.current) {
|
||||
clearTimeout(subscriptionTimeoutRef.current);
|
||||
subscriptionTimeoutRef.current = null;
|
||||
}
|
||||
|
||||
// Add a small delay to prevent rapid subscription attempts
|
||||
subscriptionTimeoutRef.current = setTimeout(() => {
|
||||
if (readyState === ReadyState.OPEN && !globalSubscriptionState.isSubscribed) {
|
||||
const subscribeMessage = {
|
||||
type: 'subscribe-audio-events',
|
||||
data: {}
|
||||
};
|
||||
|
||||
sendMessage(JSON.stringify(subscribeMessage));
|
||||
globalSubscriptionState.isSubscribed = true;
|
||||
// Subscribed to audio events
|
||||
}
|
||||
}, 100); // 100ms delay to debounce subscription attempts
|
||||
}
|
||||
|
||||
// Track local subscription regardless of global state
|
||||
if (!isLocallySubscribed) {
|
||||
globalSubscriptionState.subscriberCount++;
|
||||
setIsLocallySubscribed(true);
|
||||
}
|
||||
}, [readyState, sendMessage, isLocallySubscribed]);
|
||||
|
||||
// Unsubscribe from audio events
|
||||
const unsubscribe = useCallback(() => {
|
||||
// Clear any pending subscription timeout
|
||||
if (subscriptionTimeoutRef.current) {
|
||||
clearTimeout(subscriptionTimeoutRef.current);
|
||||
subscriptionTimeoutRef.current = null;
|
||||
}
|
||||
|
||||
if (isLocallySubscribed) {
|
||||
globalSubscriptionState.subscriberCount--;
|
||||
setIsLocallySubscribed(false);
|
||||
|
||||
// Only send unsubscribe message if this is the last subscriber and connection is still open
|
||||
if (globalSubscriptionState.subscriberCount <= 0 &&
|
||||
readyState === ReadyState.OPEN &&
|
||||
globalSubscriptionState.isSubscribed) {
|
||||
|
||||
const unsubscribeMessage = {
|
||||
type: 'unsubscribe-audio-events',
|
||||
data: {}
|
||||
};
|
||||
|
||||
sendMessage(JSON.stringify(unsubscribeMessage));
|
||||
globalSubscriptionState.isSubscribed = false;
|
||||
globalSubscriptionState.subscriberCount = 0;
|
||||
// Sent unsubscribe message to backend
|
||||
}
|
||||
}
|
||||
|
||||
// Component unsubscribed from audio events
|
||||
}, [readyState, isLocallySubscribed, sendMessage]);
|
||||
|
||||
// Handle incoming messages
|
||||
useEffect(() => {
|
||||
if (lastMessage !== null) {
|
||||
try {
|
||||
const message = JSON.parse(lastMessage.data);
|
||||
|
||||
// Handle audio events
|
||||
if (message.type && message.data) {
|
||||
const audioEvent = message as AudioEvent;
|
||||
|
||||
switch (audioEvent.type) {
|
||||
case 'audio-mute-changed': {
|
||||
const muteData = audioEvent.data as AudioMuteData;
|
||||
setAudioMuted(muteData.muted);
|
||||
// Audio mute changed
|
||||
break;
|
||||
}
|
||||
|
||||
case 'microphone-state-changed': {
|
||||
const micStateData = audioEvent.data as MicrophoneStateData;
|
||||
setMicrophoneState(micStateData);
|
||||
// Microphone state changed
|
||||
break;
|
||||
}
|
||||
|
||||
case 'audio-device-changed': {
|
||||
const deviceChangedData = audioEvent.data as AudioDeviceChangedData;
|
||||
// Audio device changed
|
||||
if (onAudioDeviceChanged) {
|
||||
onAudioDeviceChanged(deviceChangedData);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
// Ignore other message types (WebRTC signaling, etc.)
|
||||
break;
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
// Ignore parsing errors for non-JSON messages (like "pong")
|
||||
if (lastMessage.data !== 'pong') {
|
||||
devWarn('[AudioEvents] Failed to parse WebSocket message:', error);
|
||||
}
|
||||
}
|
||||
}
|
||||
}, [lastMessage, onAudioDeviceChanged]);
|
||||
|
||||
// Auto-subscribe when connected
|
||||
useEffect(() => {
|
||||
if (readyState === ReadyState.OPEN) {
|
||||
subscribe();
|
||||
}
|
||||
|
||||
// Cleanup subscription on component unmount or connection change
|
||||
return () => {
|
||||
if (subscriptionTimeoutRef.current) {
|
||||
clearTimeout(subscriptionTimeoutRef.current);
|
||||
subscriptionTimeoutRef.current = null;
|
||||
}
|
||||
unsubscribe();
|
||||
};
|
||||
}, [readyState, subscribe, unsubscribe]);
|
||||
|
||||
// Reset local subscription state on disconnect
|
||||
useEffect(() => {
|
||||
if (readyState === ReadyState.CLOSED || readyState === ReadyState.CLOSING) {
|
||||
setIsLocallySubscribed(false);
|
||||
if (subscriptionTimeoutRef.current) {
|
||||
clearTimeout(subscriptionTimeoutRef.current);
|
||||
subscriptionTimeoutRef.current = null;
|
||||
}
|
||||
}
|
||||
}, [readyState]);
|
||||
|
||||
// Fetch initial audio status on component mount - but only when RPC is ready
|
||||
useEffect(() => {
|
||||
// Only fetch when RPC data channel is open and ready
|
||||
if (rpcDataChannel?.readyState === "open") {
|
||||
fetchInitialAudioStatus();
|
||||
}
|
||||
}, [fetchInitialAudioStatus, rpcDataChannel?.readyState]);
|
||||
|
||||
// Cleanup on component unmount
|
||||
useEffect(() => {
|
||||
return () => {
|
||||
unsubscribe();
|
||||
};
|
||||
}, [unsubscribe]);
|
||||
|
||||
return {
|
||||
// Connection state
|
||||
connectionState: readyState,
|
||||
isConnected: readyState === ReadyState.OPEN && globalSubscriptionState.isSubscribed,
|
||||
|
||||
// Audio state
|
||||
audioMuted,
|
||||
|
||||
// Microphone state
|
||||
microphoneState,
|
||||
|
||||
// Device change events
|
||||
onAudioDeviceChanged,
|
||||
|
||||
// Manual subscription control
|
||||
subscribe,
|
||||
unsubscribe,
|
||||
};
|
||||
}
|
||||
|
|
@ -17,8 +17,6 @@ import {
|
|||
unmarshalHidRpcMessage,
|
||||
} from "./hidRpc";
|
||||
|
||||
|
||||
|
||||
const KEEPALIVE_MESSAGE = new KeypressKeepAliveMessage();
|
||||
|
||||
interface sendMessageParams {
|
||||
|
|
|
|||
|
|
@ -1,700 +0,0 @@
|
|||
import { useCallback, useEffect, useRef, useState } from "react";
|
||||
|
||||
import { useRTCStore, useSettingsStore } from "@/hooks/stores";
|
||||
import { JsonRpcResponse, useJsonRpc } from "@/hooks/useJsonRpc";
|
||||
import { useUsbDeviceConfig } from "@/hooks/useUsbDeviceConfig";
|
||||
import { useAudioEvents, AudioDeviceChangedData } from "@/hooks/useAudioEvents";
|
||||
import { devLog, devInfo, devWarn, devError, devOnly } from "@/utils/debug";
|
||||
import { AUDIO_CONFIG } from "@/config/constants";
|
||||
|
||||
export interface MicrophoneError {
|
||||
type: 'permission' | 'device' | 'network' | 'unknown';
|
||||
message: string;
|
||||
}
|
||||
|
||||
// Helper function to check if HTTPS is required for microphone access
|
||||
export function isHttpsRequired(): boolean {
|
||||
// Check if we're on HTTP (not HTTPS)
|
||||
const isHttp = window.location.protocol === 'http:';
|
||||
|
||||
// Check if media devices are available
|
||||
const hasMediaDevices = !!navigator.mediaDevices;
|
||||
const hasGetUserMedia = !!navigator.mediaDevices?.getUserMedia;
|
||||
|
||||
// HTTPS is required if we're on HTTP OR if media devices aren't available
|
||||
return isHttp || !hasMediaDevices || !hasGetUserMedia;
|
||||
}
|
||||
|
||||
export function useMicrophone() {
|
||||
const {
|
||||
peerConnection,
|
||||
microphoneStream,
|
||||
setMicrophoneStream,
|
||||
microphoneSender,
|
||||
setMicrophoneSender,
|
||||
isMicrophoneActive,
|
||||
setMicrophoneActive,
|
||||
isMicrophoneMuted,
|
||||
setMicrophoneMuted,
|
||||
rpcDataChannel,
|
||||
} = useRTCStore();
|
||||
|
||||
const { microphoneWasEnabled, setMicrophoneWasEnabled } = useSettingsStore();
|
||||
const { send } = useJsonRpc();
|
||||
|
||||
// Check USB audio status and handle microphone restoration when USB audio is re-enabled
|
||||
const { usbDeviceConfig } = useUsbDeviceConfig();
|
||||
const isUsbAudioEnabled = usbDeviceConfig?.audio ?? true;
|
||||
|
||||
// RPC helper functions to replace HTTP API calls
|
||||
const rpcMicrophoneStart = useCallback((): Promise<void> => {
|
||||
return new Promise((resolve, reject) => {
|
||||
if (rpcDataChannel?.readyState !== "open") {
|
||||
reject(new Error("Device connection not available"));
|
||||
return;
|
||||
}
|
||||
|
||||
send("microphoneStart", {}, (resp: JsonRpcResponse) => {
|
||||
if ("error" in resp) {
|
||||
reject(new Error(resp.error.message));
|
||||
} else {
|
||||
resolve();
|
||||
}
|
||||
});
|
||||
});
|
||||
}, [rpcDataChannel?.readyState, send]);
|
||||
|
||||
const microphoneStreamRef = useRef<MediaStream | null>(null);
|
||||
|
||||
// Loading states
|
||||
const [isStarting, setIsStarting] = useState(false);
|
||||
const [isStopping, setIsStopping] = useState(false);
|
||||
const [isToggling, setIsToggling] = useState(false);
|
||||
|
||||
// Add debouncing refs to prevent rapid operations
|
||||
const lastOperationRef = useRef<number>(0);
|
||||
const operationTimeoutRef = useRef<number | null>(null);
|
||||
|
||||
// Debounced operation wrapper
|
||||
const debouncedOperation = useCallback((operation: () => Promise<void>, operationType: string) => {
|
||||
const now = Date.now();
|
||||
const timeSinceLastOp = now - lastOperationRef.current;
|
||||
|
||||
if (timeSinceLastOp < AUDIO_CONFIG.OPERATION_DEBOUNCE_MS) {
|
||||
devLog(`Debouncing ${operationType} operation - too soon (${timeSinceLastOp}ms since last)`);
|
||||
return;
|
||||
}
|
||||
|
||||
// Clear any pending operation
|
||||
if (operationTimeoutRef.current) {
|
||||
clearTimeout(operationTimeoutRef.current);
|
||||
operationTimeoutRef.current = null;
|
||||
}
|
||||
|
||||
lastOperationRef.current = now;
|
||||
operation().catch(error => {
|
||||
devError(`Debounced ${operationType} operation failed:`, error);
|
||||
});
|
||||
}, []);
|
||||
|
||||
// Cleanup function to stop microphone stream
|
||||
const stopMicrophoneStream = useCallback(async () => {
|
||||
if (microphoneStreamRef.current) {
|
||||
microphoneStreamRef.current.getTracks().forEach((track: MediaStreamTrack) => {
|
||||
track.stop();
|
||||
});
|
||||
microphoneStreamRef.current = null;
|
||||
setMicrophoneStream(null);
|
||||
}
|
||||
|
||||
if (microphoneSender && peerConnection) {
|
||||
// Instead of removing the track, replace it with null to keep the transceiver
|
||||
try {
|
||||
await microphoneSender.replaceTrack(null);
|
||||
} catch (error) {
|
||||
devWarn("Failed to replace track with null:", error);
|
||||
// Fallback to removing the track
|
||||
peerConnection.removeTrack(microphoneSender);
|
||||
}
|
||||
setMicrophoneSender(null);
|
||||
}
|
||||
|
||||
setMicrophoneActive(false);
|
||||
setMicrophoneMuted(false);
|
||||
}, [microphoneSender, peerConnection, setMicrophoneStream, setMicrophoneSender, setMicrophoneActive, setMicrophoneMuted]);
|
||||
|
||||
|
||||
|
||||
const lastSyncRef = useRef<number>(0);
|
||||
const isStartingRef = useRef<boolean>(false); // Track if we're in the middle of starting
|
||||
|
||||
const syncMicrophoneState = useCallback(async () => {
|
||||
// Debounce sync calls to prevent race conditions
|
||||
const now = Date.now();
|
||||
if (now - lastSyncRef.current < AUDIO_CONFIG.SYNC_DEBOUNCE_MS) {
|
||||
return;
|
||||
}
|
||||
lastSyncRef.current = now;
|
||||
|
||||
// Don't sync if we're in the middle of starting the microphone
|
||||
if (isStartingRef.current) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Early return if RPC data channel is not ready
|
||||
if (rpcDataChannel?.readyState !== "open") {
|
||||
devWarn("RPC connection not available for microphone sync, skipping");
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
send("microphoneStatus", {}, (resp: JsonRpcResponse) => {
|
||||
if ("error" in resp) {
|
||||
devError("RPC microphone status failed:", resp.error);
|
||||
reject(new Error(resp.error.message));
|
||||
} else if ("result" in resp) {
|
||||
const data = resp.result as { running: boolean };
|
||||
const backendRunning = data.running;
|
||||
|
||||
// Only sync if there's a significant state difference and we're not in a transition
|
||||
if (backendRunning !== isMicrophoneActive) {
|
||||
devInfo(`Syncing microphone state: backend=${backendRunning}, frontend=${isMicrophoneActive}`);
|
||||
|
||||
// If backend is running but frontend thinks it's not, just update frontend state
|
||||
if (backendRunning && !isMicrophoneActive) {
|
||||
devLog("Backend running, updating frontend state to active");
|
||||
setMicrophoneActive(true);
|
||||
}
|
||||
// If backend is not running but frontend thinks it is, clean up and update state
|
||||
else if (!backendRunning && isMicrophoneActive) {
|
||||
devLog("Backend not running, cleaning up frontend state");
|
||||
setMicrophoneActive(false);
|
||||
// Only clean up stream if we actually have one
|
||||
if (microphoneStreamRef.current) {
|
||||
stopMicrophoneStream();
|
||||
}
|
||||
setMicrophoneMuted(false);
|
||||
}
|
||||
}
|
||||
resolve();
|
||||
} else {
|
||||
reject(new Error("Invalid response"));
|
||||
}
|
||||
});
|
||||
});
|
||||
} catch (error) {
|
||||
devError("Error syncing microphone state:", error);
|
||||
}
|
||||
}, [isMicrophoneActive, setMicrophoneActive, setMicrophoneMuted, stopMicrophoneStream, rpcDataChannel?.readyState, send]);
|
||||
|
||||
// Start microphone stream
|
||||
const startMicrophone = useCallback(async (deviceId?: string): Promise<{ success: boolean; error?: MicrophoneError }> => {
|
||||
// Prevent multiple simultaneous start operations
|
||||
if (isStarting || isStopping || isToggling) {
|
||||
devLog("Microphone operation already in progress, skipping start");
|
||||
return { success: false, error: { type: 'unknown', message: 'Operation already in progress' } };
|
||||
}
|
||||
|
||||
setIsStarting(true);
|
||||
try {
|
||||
// Set flag to prevent sync during startup
|
||||
isStartingRef.current = true;
|
||||
|
||||
// Check if getUserMedia is available (requires HTTPS in most browsers)
|
||||
if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
|
||||
setIsStarting(false);
|
||||
isStartingRef.current = false;
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
type: 'permission',
|
||||
message: 'Microphone access requires HTTPS connection. Please use HTTPS to use audio input.'
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Request microphone permission and get stream
|
||||
const audioConstraints: MediaTrackConstraints = {
|
||||
echoCancellation: true,
|
||||
noiseSuppression: true,
|
||||
autoGainControl: true,
|
||||
sampleRate: AUDIO_CONFIG.SAMPLE_RATE,
|
||||
channelCount: AUDIO_CONFIG.CHANNEL_COUNT,
|
||||
};
|
||||
|
||||
// Add device ID if specified
|
||||
if (deviceId && deviceId !== 'default') {
|
||||
audioConstraints.deviceId = { exact: deviceId };
|
||||
}
|
||||
|
||||
const stream = await navigator.mediaDevices.getUserMedia({
|
||||
audio: audioConstraints
|
||||
});
|
||||
|
||||
// Store the stream in both ref and store
|
||||
microphoneStreamRef.current = stream;
|
||||
setMicrophoneStream(stream);
|
||||
|
||||
// Add audio track to peer connection if available
|
||||
if (peerConnection && stream.getAudioTracks().length > 0) {
|
||||
const audioTrack = stream.getAudioTracks()[0];
|
||||
|
||||
// Find the audio transceiver (should already exist with sendrecv direction)
|
||||
const transceivers = peerConnection.getTransceivers();
|
||||
|
||||
// Look for an audio transceiver that can send (has sendrecv or sendonly direction)
|
||||
const audioTransceiver = transceivers.find((transceiver: RTCRtpTransceiver) => {
|
||||
// Check if this transceiver is for audio and can send
|
||||
const canSend = transceiver.direction === 'sendrecv' || transceiver.direction === 'sendonly';
|
||||
|
||||
// For newly created transceivers, we need to check if they're for audio
|
||||
// We can do this by checking if the sender doesn't have a track yet and direction allows sending
|
||||
if (canSend && !transceiver.sender.track) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// For existing transceivers, check if they already have an audio track
|
||||
if (transceiver.sender.track?.kind === 'audio' || transceiver.receiver.track?.kind === 'audio') {
|
||||
return canSend;
|
||||
}
|
||||
|
||||
return false;
|
||||
});
|
||||
|
||||
let sender: RTCRtpSender;
|
||||
if (audioTransceiver && audioTransceiver.sender) {
|
||||
// Use the existing audio transceiver's sender
|
||||
await audioTransceiver.sender.replaceTrack(audioTrack);
|
||||
sender = audioTransceiver.sender;
|
||||
} else {
|
||||
// Fallback: add new track if no transceiver found
|
||||
sender = peerConnection.addTrack(audioTrack, stream);
|
||||
}
|
||||
|
||||
setMicrophoneSender(sender);
|
||||
|
||||
// Check sender stats to verify audio is being transmitted
|
||||
devOnly(() => {
|
||||
setTimeout(async () => {
|
||||
try {
|
||||
const stats = await sender.getStats();
|
||||
stats.forEach((report) => {
|
||||
if (report.type === 'outbound-rtp' && report.kind === 'audio') {
|
||||
devLog("Audio RTP stats:", {
|
||||
packetsSent: report.packetsSent,
|
||||
bytesSent: report.bytesSent
|
||||
});
|
||||
}
|
||||
});
|
||||
} catch (error) {
|
||||
devError("Failed to get sender stats:", error);
|
||||
}
|
||||
}, 2000);
|
||||
});
|
||||
}
|
||||
|
||||
// Notify backend that microphone is started - only if USB audio is enabled
|
||||
if (!isUsbAudioEnabled) {
|
||||
devInfo("USB audio is disabled, skipping backend microphone start");
|
||||
// Still set frontend state as active since the stream was successfully created
|
||||
setMicrophoneActive(true);
|
||||
setMicrophoneMuted(false);
|
||||
setMicrophoneWasEnabled(true);
|
||||
isStartingRef.current = false;
|
||||
setIsStarting(false);
|
||||
return { success: true };
|
||||
}
|
||||
|
||||
// Retry logic for backend failures
|
||||
let backendSuccess = false;
|
||||
let lastError: Error | string | null = null;
|
||||
|
||||
for (let attempt = 1; attempt <= 3; attempt++) {
|
||||
// If this is a retry, first try to reset the backend microphone state
|
||||
if (attempt > 1) {
|
||||
try {
|
||||
// Use RPC for reset (cloud-compatible)
|
||||
if (rpcDataChannel?.readyState === "open") {
|
||||
await new Promise<void>((resolve) => {
|
||||
send("microphoneReset", {}, (resp: JsonRpcResponse) => {
|
||||
if ("error" in resp) {
|
||||
devWarn("RPC microphone reset failed:", resp.error);
|
||||
// Try stop as fallback
|
||||
send("microphoneStop", {}, (stopResp: JsonRpcResponse) => {
|
||||
if ("error" in stopResp) {
|
||||
devWarn("RPC microphone stop also failed:", stopResp.error);
|
||||
}
|
||||
resolve(); // Continue even if both fail
|
||||
});
|
||||
} else {
|
||||
resolve();
|
||||
}
|
||||
});
|
||||
});
|
||||
// Wait a bit for the backend to reset
|
||||
await new Promise(resolve => setTimeout(resolve, 200));
|
||||
} else {
|
||||
devWarn("RPC connection not available for reset");
|
||||
}
|
||||
} catch (resetError) {
|
||||
devWarn("Failed to reset backend state:", resetError);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
await rpcMicrophoneStart();
|
||||
backendSuccess = true;
|
||||
break; // Exit the retry loop on success
|
||||
} catch (rpcError) {
|
||||
lastError = `Backend RPC error: ${rpcError instanceof Error ? rpcError.message : 'Unknown error'}`;
|
||||
devError(`Backend microphone start failed with RPC error: ${lastError} (attempt ${attempt})`);
|
||||
|
||||
// For RPC errors, try again after a short delay
|
||||
if (attempt < 3) {
|
||||
await new Promise(resolve => setTimeout(resolve, 500));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If all backend attempts failed, cleanup and return error
|
||||
if (!backendSuccess) {
|
||||
devError("All backend start attempts failed, cleaning up stream");
|
||||
await stopMicrophoneStream();
|
||||
isStartingRef.current = false;
|
||||
setIsStarting(false);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
type: 'network',
|
||||
message: `Failed to start microphone on backend after 3 attempts. Last error: ${lastError}`
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Only set active state after backend confirms success
|
||||
setMicrophoneActive(true);
|
||||
setMicrophoneMuted(false);
|
||||
|
||||
// Save microphone enabled state for auto-restore on page reload
|
||||
setMicrophoneWasEnabled(true);
|
||||
|
||||
// Clear the starting flag
|
||||
isStartingRef.current = false;
|
||||
setIsStarting(false);
|
||||
return { success: true };
|
||||
} catch (error) {
|
||||
let micError: MicrophoneError;
|
||||
if (error instanceof Error) {
|
||||
if (error.name === 'NotAllowedError' || error.name === 'PermissionDeniedError') {
|
||||
micError = {
|
||||
type: 'permission',
|
||||
message: 'Microphone permission denied. Please allow microphone access and try again.'
|
||||
};
|
||||
} else if (error.name === 'NotFoundError' || error.name === 'DevicesNotFoundError') {
|
||||
micError = {
|
||||
type: 'device',
|
||||
message: 'No microphone device found. Please check your microphone connection.'
|
||||
};
|
||||
} else {
|
||||
micError = {
|
||||
type: 'unknown',
|
||||
message: error.message || 'Failed to access microphone'
|
||||
};
|
||||
}
|
||||
} else {
|
||||
micError = {
|
||||
type: 'unknown',
|
||||
message: 'Unknown error occurred while accessing microphone'
|
||||
};
|
||||
}
|
||||
|
||||
// Clear the starting flag on error
|
||||
isStartingRef.current = false;
|
||||
setIsStarting(false);
|
||||
return { success: false, error: micError };
|
||||
}
|
||||
}, [peerConnection, setMicrophoneStream, setMicrophoneSender, setMicrophoneActive, setMicrophoneMuted, setMicrophoneWasEnabled, stopMicrophoneStream, isStarting, isStopping, isToggling, rpcMicrophoneStart, rpcDataChannel?.readyState, send, isUsbAudioEnabled]);
|
||||
|
||||
|
||||
|
||||
// Stop microphone
|
||||
const stopMicrophone = useCallback(async (): Promise<{ success: boolean; error?: MicrophoneError }> => {
|
||||
// Prevent multiple simultaneous stop operations
|
||||
if (isStarting || isStopping || isToggling) {
|
||||
devLog("Microphone operation already in progress, skipping stop");
|
||||
return { success: false, error: { type: 'unknown', message: 'Operation already in progress' } };
|
||||
}
|
||||
|
||||
setIsStopping(true);
|
||||
try {
|
||||
// First stop the stream
|
||||
await stopMicrophoneStream();
|
||||
|
||||
// Then notify backend that microphone is stopped using RPC
|
||||
try {
|
||||
if (rpcDataChannel?.readyState === "open") {
|
||||
await new Promise<void>((resolve) => {
|
||||
send("microphoneStop", {}, (resp: JsonRpcResponse) => {
|
||||
if ("error" in resp) {
|
||||
devWarn("RPC microphone stop failed:", resp.error);
|
||||
}
|
||||
resolve(); // Continue regardless of result
|
||||
});
|
||||
});
|
||||
} else {
|
||||
devWarn("RPC connection not available for microphone stop");
|
||||
}
|
||||
} catch (error) {
|
||||
devWarn("Failed to notify backend about microphone stop:", error);
|
||||
}
|
||||
|
||||
// Update frontend state immediately
|
||||
setMicrophoneActive(false);
|
||||
setMicrophoneMuted(false);
|
||||
|
||||
// Save microphone disabled state for persistence
|
||||
setMicrophoneWasEnabled(false);
|
||||
|
||||
// Sync state after stopping to ensure consistency (with longer delay)
|
||||
setTimeout(() => syncMicrophoneState(), 500);
|
||||
|
||||
setIsStopping(false);
|
||||
return { success: true };
|
||||
} catch (error) {
|
||||
devError("Failed to stop microphone:", error);
|
||||
setIsStopping(false);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
type: 'unknown',
|
||||
message: error instanceof Error ? error.message : 'Failed to stop microphone'
|
||||
}
|
||||
};
|
||||
}
|
||||
}, [stopMicrophoneStream, syncMicrophoneState, setMicrophoneActive, setMicrophoneMuted, setMicrophoneWasEnabled, isStarting, isStopping, isToggling, rpcDataChannel?.readyState, send]);
|
||||
|
||||
// Toggle microphone mute
|
||||
const toggleMicrophoneMute = useCallback(async (): Promise<{ success: boolean; error?: MicrophoneError }> => {
|
||||
// Prevent multiple simultaneous toggle operations
|
||||
if (isStarting || isStopping || isToggling) {
|
||||
devLog("Microphone operation already in progress, skipping toggle");
|
||||
return { success: false, error: { type: 'unknown', message: 'Operation already in progress' } };
|
||||
}
|
||||
|
||||
setIsToggling(true);
|
||||
try {
|
||||
// Use the ref instead of store value to avoid race conditions
|
||||
const currentStream = microphoneStreamRef.current || microphoneStream;
|
||||
|
||||
if (!currentStream || !isMicrophoneActive) {
|
||||
const errorDetails = {
|
||||
hasStream: !!currentStream,
|
||||
isActive: isMicrophoneActive,
|
||||
streamId: currentStream?.id,
|
||||
audioTracks: currentStream?.getAudioTracks().length || 0
|
||||
};
|
||||
devWarn("Microphone mute failed: stream or active state missing", errorDetails);
|
||||
|
||||
// Provide more specific error message
|
||||
let errorMessage = 'Microphone is not active';
|
||||
if (!currentStream) {
|
||||
errorMessage = 'No microphone stream found. Please restart the microphone.';
|
||||
} else if (!isMicrophoneActive) {
|
||||
errorMessage = 'Microphone is not marked as active. Please restart the microphone.';
|
||||
}
|
||||
|
||||
setIsToggling(false);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
type: 'device',
|
||||
message: errorMessage
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
const audioTracks = currentStream.getAudioTracks();
|
||||
if (audioTracks.length === 0) {
|
||||
setIsToggling(false);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
type: 'device',
|
||||
message: 'No audio tracks found in microphone stream'
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
const newMutedState = !isMicrophoneMuted;
|
||||
|
||||
// Mute/unmute the audio track
|
||||
audioTracks.forEach((track: MediaStreamTrack) => {
|
||||
track.enabled = !newMutedState;
|
||||
});
|
||||
|
||||
setMicrophoneMuted(newMutedState);
|
||||
|
||||
// Notify backend about mute state using RPC
|
||||
try {
|
||||
if (rpcDataChannel?.readyState === "open") {
|
||||
await new Promise<void>((resolve) => {
|
||||
send("microphoneMute", { muted: newMutedState }, (resp: JsonRpcResponse) => {
|
||||
if ("error" in resp) {
|
||||
devWarn("RPC microphone mute failed:", resp.error);
|
||||
}
|
||||
resolve(); // Continue regardless of result
|
||||
});
|
||||
});
|
||||
} else {
|
||||
devWarn("RPC connection not available for microphone mute");
|
||||
}
|
||||
} catch (error) {
|
||||
devWarn("Failed to notify backend about microphone mute:", error);
|
||||
}
|
||||
|
||||
setIsToggling(false);
|
||||
return { success: true };
|
||||
} catch (error) {
|
||||
devError("Failed to toggle microphone mute:", error);
|
||||
setIsToggling(false);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
type: 'unknown',
|
||||
message: error instanceof Error ? error.message : 'Failed to toggle microphone mute'
|
||||
}
|
||||
};
|
||||
}
|
||||
}, [microphoneStream, isMicrophoneActive, isMicrophoneMuted, setMicrophoneMuted, isStarting, isStopping, isToggling, rpcDataChannel?.readyState, send]);
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
const startMicrophoneDebounced = useCallback((deviceId?: string) => {
|
||||
debouncedOperation(async () => {
|
||||
await startMicrophone(deviceId).catch(devError);
|
||||
}, "start");
|
||||
}, [startMicrophone, debouncedOperation]);
|
||||
|
||||
const stopMicrophoneDebounced = useCallback(() => {
|
||||
debouncedOperation(async () => {
|
||||
await stopMicrophone().catch(devError);
|
||||
}, "stop");
|
||||
}, [stopMicrophone, debouncedOperation]);
|
||||
|
||||
|
||||
|
||||
// Sync state on mount and auto-restore microphone if it was enabled before page reload
|
||||
useEffect(() => {
|
||||
const autoRestoreMicrophone = async () => {
|
||||
// Wait for RPC connection to be ready before attempting any operations
|
||||
if (rpcDataChannel?.readyState !== "open") {
|
||||
return;
|
||||
}
|
||||
|
||||
// First sync the current state
|
||||
await syncMicrophoneState();
|
||||
|
||||
// If microphone was enabled before page reload and is not currently active, restore it
|
||||
if (microphoneWasEnabled && !isMicrophoneActive && peerConnection) {
|
||||
try {
|
||||
const result = await startMicrophone();
|
||||
if (result.success) {
|
||||
devInfo("Microphone auto-restored successfully after page reload");
|
||||
} else {
|
||||
devWarn("Failed to auto-restore microphone:", result.error);
|
||||
}
|
||||
} catch (error) {
|
||||
devWarn("Error during microphone auto-restoration:", error);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Add a delay to ensure RTC connection is fully established
|
||||
const timer = setTimeout(autoRestoreMicrophone, 1000);
|
||||
return () => clearTimeout(timer);
|
||||
}, [syncMicrophoneState, microphoneWasEnabled, isMicrophoneActive, peerConnection, startMicrophone, rpcDataChannel?.readyState]);
|
||||
|
||||
// Handle audio device changes (USB audio enable/disable) via WebSocket events
|
||||
const handleAudioDeviceChanged = useCallback((data: AudioDeviceChangedData) => {
|
||||
devInfo("Audio device changed:", data);
|
||||
devInfo("Current microphone state:", { isMicrophoneActive, microphoneWasEnabled });
|
||||
|
||||
// USB audio was just disabled
|
||||
if (!data.enabled && data.reason === "usb_reconfiguration") {
|
||||
devInfo(`USB audio disabled via device change event - microphone was ${isMicrophoneActive ? 'active' : 'inactive'}`);
|
||||
|
||||
// The microphoneWasEnabled flag is already being managed by the microphone start/stop functions
|
||||
// We don't need to do anything special here - it will be preserved for restoration
|
||||
devInfo(`Current microphoneWasEnabled flag: ${microphoneWasEnabled}`);
|
||||
}
|
||||
|
||||
// USB audio was just re-enabled
|
||||
else if (data.enabled && data.reason === "usb_reconfiguration") {
|
||||
devInfo("USB audio re-enabled via device change event - checking if microphone should be restored");
|
||||
devInfo(`microphoneWasEnabled: ${microphoneWasEnabled}`);
|
||||
devInfo(`Current microphone active: ${isMicrophoneActive}`);
|
||||
devInfo(`RPC ready: ${rpcDataChannel?.readyState === "open"}`);
|
||||
|
||||
// If microphone was enabled before (using the same logic as page reload restore), restore it
|
||||
if (microphoneWasEnabled && !isMicrophoneActive && rpcDataChannel?.readyState === "open") {
|
||||
devInfo("Restoring microphone after USB audio re-enabled (using microphoneWasEnabled flag)");
|
||||
setTimeout(async () => {
|
||||
try {
|
||||
const result = await startMicrophone();
|
||||
if (result.success) {
|
||||
devInfo("Microphone successfully restored after USB audio re-enable");
|
||||
} else {
|
||||
devWarn("Failed to restore microphone after USB audio re-enable:", result.error);
|
||||
}
|
||||
} catch (error) {
|
||||
devWarn("Error restoring microphone after USB audio re-enable:", error);
|
||||
}
|
||||
}, 500); // Small delay to ensure USB device reconfiguration is complete
|
||||
} else {
|
||||
devInfo("Not restoring microphone - conditions not met or microphone was not previously enabled");
|
||||
}
|
||||
}
|
||||
}, [isMicrophoneActive, microphoneWasEnabled, startMicrophone, rpcDataChannel?.readyState]);
|
||||
|
||||
// Subscribe to audio device change events
|
||||
useAudioEvents(handleAudioDeviceChanged);
|
||||
|
||||
// Cleanup on unmount - use ref to avoid dependency on stopMicrophoneStream
|
||||
useEffect(() => {
|
||||
return () => {
|
||||
// Clean up stream directly without depending on the callback
|
||||
const stream = microphoneStreamRef.current;
|
||||
if (stream) {
|
||||
stream.getAudioTracks().forEach((track: MediaStreamTrack) => {
|
||||
track.stop();
|
||||
});
|
||||
microphoneStreamRef.current = null;
|
||||
}
|
||||
};
|
||||
}, []); // No dependencies to prevent re-running
|
||||
|
||||
return {
|
||||
isMicrophoneActive,
|
||||
isMicrophoneMuted,
|
||||
microphoneStream,
|
||||
startMicrophone,
|
||||
stopMicrophone,
|
||||
toggleMicrophoneMute,
|
||||
|
||||
// Expose debounced variants for UI handlers
|
||||
startMicrophoneDebounced,
|
||||
stopMicrophoneDebounced,
|
||||
// Expose sync and loading flags for consumers that expect them
|
||||
syncMicrophoneState,
|
||||
isStarting,
|
||||
isStopping,
|
||||
isToggling,
|
||||
|
||||
// HTTP/HTTPS detection
|
||||
isHttpsRequired: isHttpsRequired(),
|
||||
};
|
||||
}
|
||||
|
|
@ -1,60 +0,0 @@
|
|||
import { useCallback, useEffect, useState } from "react";
|
||||
|
||||
import { devError } from '../utils/debug';
|
||||
|
||||
import { JsonRpcResponse, useJsonRpc } from "./useJsonRpc";
|
||||
import { useAudioEvents } from "./useAudioEvents";
|
||||
|
||||
export interface UsbDeviceConfig {
|
||||
keyboard: boolean;
|
||||
absolute_mouse: boolean;
|
||||
relative_mouse: boolean;
|
||||
mass_storage: boolean;
|
||||
audio: boolean;
|
||||
}
|
||||
|
||||
export function useUsbDeviceConfig() {
|
||||
const { send } = useJsonRpc();
|
||||
const [usbDeviceConfig, setUsbDeviceConfig] = useState<UsbDeviceConfig | null>(null);
|
||||
const [loading, setLoading] = useState(true);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
|
||||
const fetchUsbDeviceConfig = useCallback(() => {
|
||||
setLoading(true);
|
||||
setError(null);
|
||||
|
||||
send("getUsbDevices", {}, (resp: JsonRpcResponse) => {
|
||||
setLoading(false);
|
||||
|
||||
if ("error" in resp) {
|
||||
devError("Failed to load USB devices:", resp.error);
|
||||
setError(resp.error.data || "Unknown error");
|
||||
setUsbDeviceConfig(null);
|
||||
} else {
|
||||
const config = resp.result as UsbDeviceConfig;
|
||||
setUsbDeviceConfig(config);
|
||||
setError(null);
|
||||
}
|
||||
});
|
||||
}, [send]);
|
||||
|
||||
// Listen for audio device changes to update USB config in real-time
|
||||
const handleAudioDeviceChanged = useCallback(() => {
|
||||
// Audio device changed, refetching USB config
|
||||
fetchUsbDeviceConfig();
|
||||
}, [fetchUsbDeviceConfig]);
|
||||
|
||||
// Subscribe to audio events for real-time updates
|
||||
useAudioEvents(handleAudioDeviceChanged);
|
||||
|
||||
useEffect(() => {
|
||||
fetchUsbDeviceConfig();
|
||||
}, [fetchUsbDeviceConfig]);
|
||||
|
||||
return {
|
||||
usbDeviceConfig,
|
||||
loading,
|
||||
error,
|
||||
refetch: fetchUsbDeviceConfig,
|
||||
};
|
||||
}
|
||||
|
|
@ -153,13 +153,13 @@ body {
|
|||
|
||||
@property --grid-color-start {
|
||||
syntax: "<color>";
|
||||
initial-value: var(--color-blue-50/10);
|
||||
initial-value: oklch(97% 0.014 254.604 / 10); /* var(--color-blue-50/10) */
|
||||
inherits: false;
|
||||
}
|
||||
|
||||
@property --grid-color-end {
|
||||
syntax: "<color>";
|
||||
initial-value: var(--color-blue-50/100);
|
||||
initial-value: oklch(97% 0.014 254.604 / 100); /* var(--color-blue-50/100) */
|
||||
inherits: false;
|
||||
}
|
||||
|
||||
|
|
@ -175,8 +175,8 @@ body {
|
|||
}
|
||||
|
||||
.group:hover .grid-card {
|
||||
--grid-color-start: var(--color-blue-100/50);
|
||||
--grid-color-end: var(--color-blue-50/50);
|
||||
--grid-color-start: oklch(from var(--color-blue-100) l c h / 50);
|
||||
--grid-color-end: oklch(from var(--color-blue-50) l c h / 50);
|
||||
}
|
||||
|
||||
video::-webkit-media-controls {
|
||||
|
|
|
|||
|
|
@ -10,6 +10,9 @@ import {
|
|||
} from "react-router";
|
||||
import { ExclamationTriangleIcon } from "@heroicons/react/16/solid";
|
||||
|
||||
import { CLOUD_API, DEVICE_API } from "@/ui.config";
|
||||
import api from "@/api";
|
||||
import Root from "@/root";
|
||||
import Card from "@components/Card";
|
||||
import EmptyCard from "@components/EmptyCard";
|
||||
import NotFoundPage from "@components/NotFoundPage";
|
||||
|
|
@ -25,9 +28,6 @@ import DeviceIdRename from "@routes/devices.$id.rename";
|
|||
import DevicesRoute from "@routes/devices";
|
||||
import SettingsIndexRoute from "@routes/devices.$id.settings._index";
|
||||
import SettingsAccessIndexRoute from "@routes/devices.$id.settings.access._index";
|
||||
import Root from "@/root";
|
||||
import api from "@/api";
|
||||
import { CLOUD_API, DEVICE_API } from "@/ui.config";
|
||||
import Notifications from "@/notifications";
|
||||
const SignupRoute = lazy(() => import("@routes/signup"));
|
||||
const LoginRoute = lazy(() => import("@routes/login"));
|
||||
|
|
|
|||
|
|
@ -6,9 +6,9 @@ import { Button, LinkButton } from "@components/Button";
|
|||
import Card from "@components/Card";
|
||||
import { CardHeader } from "@components/CardHeader";
|
||||
import DashboardNavbar from "@components/Header";
|
||||
import Fieldset from "@components/Fieldset";
|
||||
import { User } from "@/hooks/stores";
|
||||
import { checkAuth } from "@/main";
|
||||
import Fieldset from "@components/Fieldset";
|
||||
import { CLOUD_API } from "@/ui.config";
|
||||
|
||||
interface LoaderData {
|
||||
|
|
|
|||
|
|
@ -9,12 +9,12 @@ import { PlusCircleIcon, ExclamationTriangleIcon } from "@heroicons/react/20/sol
|
|||
import { TrashIcon } from "@heroicons/react/16/solid";
|
||||
import { useNavigate } from "react-router";
|
||||
|
||||
import { Button } from "@components/Button";
|
||||
import AutoHeight from "@components/AutoHeight";
|
||||
import Card, { GridCard } from "@/components/Card";
|
||||
import { Button } from "@components/Button";
|
||||
import LogoBlueIcon from "@/assets/logo-blue.svg";
|
||||
import LogoWhiteIcon from "@/assets/logo-white.svg";
|
||||
import { formatters } from "@/utils";
|
||||
import AutoHeight from "@components/AutoHeight";
|
||||
import { InputFieldWithLabel } from "@/components/InputField";
|
||||
import DebianIcon from "@/assets/debian-icon.png";
|
||||
import UbuntuIcon from "@/assets/ubuntu-icon.png";
|
||||
|
|
@ -25,17 +25,16 @@ import NetBootIcon from "@/assets/netboot-icon.svg";
|
|||
import Fieldset from "@/components/Fieldset";
|
||||
import { DEVICE_API } from "@/ui.config";
|
||||
|
||||
import { JsonRpcResponse, useJsonRpc } from "../hooks/useJsonRpc";
|
||||
import notifications from "../notifications";
|
||||
import { isOnDevice } from "../main";
|
||||
import { cx } from "../cva.config";
|
||||
import {
|
||||
MountMediaState,
|
||||
RemoteVirtualMediaState,
|
||||
useMountMediaStore,
|
||||
useRTCStore,
|
||||
} from "../hooks/stores";
|
||||
import { cx } from "../cva.config";
|
||||
import { isOnDevice } from "../main";
|
||||
import notifications from "../notifications";
|
||||
import { JsonRpcResponse, useJsonRpc } from "../hooks/useJsonRpc";
|
||||
|
||||
|
||||
export default function MountRoute() {
|
||||
const navigate = useNavigate();
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import { useNavigate, useOutletContext } from "react-router";
|
||||
|
||||
import { Button } from "@components/Button";
|
||||
import { GridCard } from "@/components/Card";
|
||||
import { Button } from "@components/Button";
|
||||
import LogoBlue from "@/assets/logo-blue.svg";
|
||||
import LogoWhite from "@/assets/logo-white.svg";
|
||||
|
||||
|
|
|
|||
|
|
@ -7,14 +7,13 @@ import Card from "@components/Card";
|
|||
import { CardHeader } from "@components/CardHeader";
|
||||
import { InputFieldWithLabel } from "@components/InputField";
|
||||
import DashboardNavbar from "@components/Header";
|
||||
import Fieldset from "@components/Fieldset";
|
||||
import { User } from "@/hooks/stores";
|
||||
import { checkAuth } from "@/main";
|
||||
import Fieldset from "@components/Fieldset";
|
||||
import { CLOUD_API } from "@/ui.config";
|
||||
|
||||
import api from "../api";
|
||||
|
||||
|
||||
interface LoaderData {
|
||||
device: { id: string; name: string; user: { googleId: string } };
|
||||
user: User;
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
import { useNavigate } from "react-router";
|
||||
import { useCallback } from "react";
|
||||
|
||||
import { Button } from "@components/Button";
|
||||
import { useJsonRpc } from "@/hooks/useJsonRpc";
|
||||
import { Button } from "@components/Button";
|
||||
|
||||
export default function SettingsGeneralRebootRoute() {
|
||||
const navigate = useNavigate();
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue