Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Change output device for running AudioContext #232

Merged
merged 9 commits into from
Nov 6, 2022
7 changes: 6 additions & 1 deletion examples/sink_id.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,5 +25,10 @@ fn main() {
osc.connect(&context.destination());
osc.start();

std::thread::sleep(std::time::Duration::from_secs(4));
loop {
println!("Choose output device, enter the 'device_id' and press <Enter>:");
let sink_id = std::io::stdin().lines().next().unwrap().unwrap();
context.set_sink_id_sync(sink_id).unwrap();
println!("Playing beep for sink {:?}", context.sink_id());
}
}
37 changes: 24 additions & 13 deletions src/context/concrete_base.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,9 @@ use crate::spatial::AudioListenerParams;

use crate::AudioListener;

use crossbeam_channel::Sender;
use crossbeam_channel::{SendError, Sender};
use std::sync::atomic::{AtomicU64, AtomicU8, Ordering};
use std::sync::{Arc, Mutex};
use std::sync::{Arc, Mutex, RwLock, RwLockWriteGuard};

/// The struct that corresponds to the Javascript `BaseAudioContext` object.
///
Expand Down Expand Up @@ -51,7 +51,7 @@ struct ConcreteBaseAudioContextInner {
/// destination node's current channel count
destination_channel_config: ChannelConfig,
/// message channel from control to render thread
render_channel: Sender<ControlMessage>,
render_channel: RwLock<Sender<ControlMessage>>,
/// control messages that cannot be sent immediately
queued_messages: Mutex<Vec<ControlMessage>>,
/// number of frames played
Expand Down Expand Up @@ -104,7 +104,7 @@ impl BaseAudioContext for ConcreteBaseAudioContext {
self.inner.queued_audio_listener_msgs.lock().unwrap();
queued_audio_listener_msgs.push(message);
} else {
self.inner.render_channel.send(message).unwrap();
self.send_control_msg(message).unwrap();
self.resolve_queued_control_msgs(id);
}

Expand All @@ -124,7 +124,7 @@ impl ConcreteBaseAudioContext {
let base_inner = ConcreteBaseAudioContextInner {
sample_rate,
max_channel_count,
render_channel,
render_channel: RwLock::new(render_channel),
queued_messages: Mutex::new(Vec::new()),
node_id_inc: AtomicU64::new(0),
destination_channel_config: ChannelConfigOptions::default().into(),
Expand Down Expand Up @@ -188,6 +188,17 @@ impl ConcreteBaseAudioContext {
base
}

pub(crate) fn send_control_msg(
&self,
msg: ControlMessage,
) -> Result<(), SendError<ControlMessage>> {
self.inner.render_channel.read().unwrap().send(msg)
}

pub(crate) fn lock_control_msg_sender(&self) -> RwLockWriteGuard<Sender<ControlMessage>> {
self.inner.render_channel.write().unwrap()
}

/// Inform render thread that the control thread `AudioNode` no langer has any handles
pub(super) fn mark_node_dropped(&self, id: u64) {
// do not drop magic nodes
Expand All @@ -199,7 +210,7 @@ impl ConcreteBaseAudioContext {

// Sending the message will fail when the render thread has already shut down.
// This is fine
let _r = self.inner.render_channel.send(message);
let _r = self.send_control_msg(message);
}
}

Expand All @@ -211,7 +222,7 @@ impl ConcreteBaseAudioContext {

// Sending the message will fail when the render thread has already shut down.
// This is fine
let _r = self.inner.render_channel.send(message);
let _r = self.send_control_msg(message);
}

/// `ChannelConfig` of the `AudioDestinationNode`
Expand Down Expand Up @@ -283,7 +294,7 @@ impl ConcreteBaseAudioContext {
while i < queued.len() {
if matches!(&queued[i], ControlMessage::ConnectNode {to, ..} if *to == id) {
let m = queued.remove(i);
self.inner.render_channel.send(m).unwrap();
self.send_control_msg(m).unwrap();
} else {
i += 1;
}
Expand All @@ -304,7 +315,7 @@ impl ConcreteBaseAudioContext {
output,
input,
};
self.inner.render_channel.send(message).unwrap();
self.send_control_msg(message).unwrap();
}

/// Schedule a connection of an `AudioParam` to the `AudioNode` it belongs to
Expand All @@ -326,13 +337,13 @@ impl ConcreteBaseAudioContext {
from: from.0,
to: to.0,
};
self.inner.render_channel.send(message).unwrap();
self.send_control_msg(message).unwrap();
}

/// Disconnects all outgoing connections from the audio node.
pub(crate) fn disconnect(&self, from: &AudioNodeId) {
let message = ControlMessage::DisconnectAll { from: from.0 };
self.inner.render_channel.send(message).unwrap();
self.send_control_msg(message).unwrap();
}

/// Pass an `AudioParam::AudioParamEvent` to the render thread
Expand All @@ -348,7 +359,7 @@ impl ConcreteBaseAudioContext {
to: to.clone(),
event,
};
self.inner.render_channel.send(message).unwrap();
self.send_control_msg(message).unwrap();
}

/// Attach the 9 `AudioListener` coordinates to a `PannerNode`
Expand All @@ -370,7 +381,7 @@ impl ConcreteBaseAudioContext {
let mut released = false;
while let Some(message) = queued_audio_listener_msgs.pop() {
// add the AudioListenerRenderer to the graph
self.inner.render_channel.send(message).unwrap();
self.send_control_msg(message).unwrap();
released = true;
}

Expand Down
5 changes: 5 additions & 0 deletions src/context/offline.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,12 +59,17 @@ impl OfflineAudioContext {
/// * `length` - length of the rendering audio buffer
/// * `sample_rate` - output sample rate
#[must_use]
#[allow(clippy::missing_panics_doc)]
pub fn new(number_of_channels: usize, length: usize, sample_rate: f32) -> Self {
assert_valid_sample_rate(sample_rate);

// communication channel to the render thread
let (sender, receiver) = crossbeam_channel::unbounded();

let graph = crate::render::graph::Graph::new();
let message = crate::message::ControlMessage::Startup { graph };
sender.send(message).unwrap();

// track number of frames - synced from render thread to control thread
let frames_played = Arc::new(AtomicU64::new(0));
let frames_played_clone = frames_played.clone();
Expand Down
123 changes: 103 additions & 20 deletions src/context/online.rs
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
//! The `AudioContext` type and constructor options
use crate::context::{AudioContextState, BaseAudioContext, ConcreteBaseAudioContext};
use crate::io::{self, AudioBackend};
use crate::io::{self, AudioBackend, ControlThreadInit, RenderThreadInit};
use crate::media::{MediaElement, MediaStream};
use crate::message::ControlMessage;
use crate::node::{self, ChannelConfigOptions};
use crate::AudioRenderCapacity;

use std::sync::atomic::AtomicU64;
use std::sync::Arc;
use std::error::Error;
use std::sync::Mutex;

/// Identify the type of playback, which affects tradeoffs
/// between audio output latency and power consumption
Expand Down Expand Up @@ -57,9 +58,11 @@ pub struct AudioContext {
/// represents the underlying `BaseAudioContext`
base: ConcreteBaseAudioContext,
/// audio backend (play/pause functionality)
backend: Box<dyn AudioBackend>,
backend: Mutex<Box<dyn AudioBackend>>,
/// Provider for rendering performance metrics
render_capacity: AudioRenderCapacity,
/// Initializer for the render thread (when restart is required)
render_thread_init: RenderThreadInit,
}

impl BaseAudioContext for AudioContext {
Expand All @@ -80,7 +83,7 @@ impl AudioContext {
/// This will play live audio on the default output device.
///
/// ```no_run
/// use web_audio_api::context::{AudioContext, AudioContextLatencyCategory, AudioContextOptions};
/// use web_audio_api::context::{AudioContext, AudioContextOptions};
///
/// // Request a sample rate of 44.1 kHz and default latency (buffer size 128, if available)
/// let opts = AudioContextOptions {
Expand All @@ -94,33 +97,55 @@ impl AudioContext {
/// // Alternatively, use the default constructor to get the best settings for your hardware
/// // let context = AudioContext::default();
/// ```
///
/// # Panics
///
/// The `AudioContext` constructor will panic when an invalid `sinkId` is provided in the
/// `AudioContextOptions`. In a future version, a `try_new` constructor will be introduced that
/// will never panic.
#[allow(clippy::needless_pass_by_value)]
#[must_use]
pub fn new(options: AudioContextOptions) -> Self {
// track number of frames - synced from render thread to control thread
let frames_played = Arc::new(AtomicU64::new(0));
let frames_played_clone = frames_played.clone();
if let Some(sink_id) = &options.sink_id {
if !crate::enumerate_devices()
.into_iter()
.any(|d| d.device_id() == sink_id)
{
panic!("NotFoundError: invalid sinkId");
}
}

let (control_thread_init, render_thread_init) = io::thread_init();
let backend = io::build_output(options, render_thread_init.clone());

let ControlThreadInit {
frames_played,
ctrl_msg_send,
load_value_recv,
} = control_thread_init;

// select backend based on cargo features
let (backend, sender, cap_recv) = io::build_output(options, frames_played_clone);
let graph = crate::render::graph::Graph::new();
let message = crate::message::ControlMessage::Startup { graph };
ctrl_msg_send.send(message).unwrap();

let base = ConcreteBaseAudioContext::new(
backend.sample_rate(),
backend.number_of_channels(),
frames_played,
sender,
ctrl_msg_send,
false,
);
base.set_state(AudioContextState::Running);

// setup AudioRenderCapacity for this context
let base_clone = base.clone();
let render_capacity = AudioRenderCapacity::new(base_clone, cap_recv);
let render_capacity = AudioRenderCapacity::new(base_clone, load_value_recv);

Self {
base,
backend,
backend: Mutex::new(backend),
render_capacity,
render_thread_init,
}
}

Expand All @@ -131,7 +156,7 @@ impl AudioContext {
// it to the audio subsystem, so this value is zero. (see Gecko)
#[allow(clippy::unused_self)]
#[must_use]
pub const fn base_latency(&self) -> f64 {
pub fn base_latency(&self) -> f64 {
0.
}

Expand All @@ -140,15 +165,73 @@ impl AudioContext {
/// the time at which the first sample in the buffer is actually processed
/// by the audio output device.
#[must_use]
#[allow(clippy::missing_panics_doc)]
pub fn output_latency(&self) -> f64 {
self.backend.output_latency()
self.backend.lock().unwrap().output_latency()
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

not sure of the thread layout here, but can't this lock() be a problem regarding the audio thread?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

that's something we might want to call periodically during rendering I guess

Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I realize now the backend naming is confusing.
The backend object live in the control thread, so locking is no issue.
A better name would be backend_manager, because it passes instructions from the control thread (start, pause, close) and collects info from the actual backend in the render thread (latency, sink_id).
I will update the PR by renaming this struct

}

/// Identifier or the information of the current audio output device.
///
/// The initial value is `None`, which means the default audio output device.
pub fn sink_id(&self) -> Option<&str> {
self.backend.sink_id()
#[allow(clippy::missing_panics_doc)]
pub fn sink_id(&self) -> Option<String> {
self.backend.lock().unwrap().sink_id().map(str::to_string)
}

/// Update the current audio output device.
///
/// This function operates synchronously and might block the current thread. An async version
/// is currently not implemented.
#[allow(clippy::needless_collect, clippy::missing_panics_doc)]
pub fn set_sink_id_sync(&self, sink_id: String) -> Result<(), Box<dyn Error>> {
Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is the meat of the PR.

  1. check if sink is already active
  2. validate sink
  3. acquire necessary locks (avoid issues with concurrent calls)
  4. shut down current render thread and recover the audio graph
  5. boot up new thread, and ship the audio graph
  6. release locks

if self.sink_id().as_deref() == Some(&sink_id) {
return Ok(()); // sink is already active
}

if !crate::enumerate_devices()
.into_iter()
.any(|d| d.device_id() == sink_id)
{
Err("NotFoundError: invalid sinkId")?;
}

let mut backend_guard = self.backend.lock().unwrap();

// acquire exclusive lock on ctrl msg sender
let ctrl_msg_send = self.base.lock_control_msg_sender();

// flush out the ctrl msg receiver, cache
let pending_msgs: Vec<_> = self.render_thread_init.ctrl_msg_recv.try_iter().collect();

// acquire the audio graph from the current render thread, shutting it down
let (graph_send, graph_recv) = crossbeam_channel::bounded(1);
let message = ControlMessage::Shutdown { sender: graph_send };
ctrl_msg_send.send(message).unwrap();
let graph = graph_recv.recv().unwrap();

// hotswap the backend
let options = AudioContextOptions {
sample_rate: Some(self.sample_rate()),
Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If this sample rate is not available on the new sink, bad things will happen. That is why #183 exists

latency_hint: AudioContextLatencyCategory::default(), // todo reuse existing setting
sink_id: Some(sink_id),
};
*backend_guard = io::build_output(options, self.render_thread_init.clone());

// send the audio graph to the new render thread
let message = ControlMessage::Startup { graph };
ctrl_msg_send.send(message).unwrap();

self.base().set_state(AudioContextState::Running);
Copy link
Collaborator

@b-ma b-ma Nov 1, 2022

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If the context was suspended maybe we want to keep it suspended?

Actually, I didn't read the thing carefully, but the spec seems to say:

If wasRunning is true:

Set the [[rendering thread state]] on the AudioContext to "suspended".

https://webaudio.github.io/web-audio-api/#ref-for-dom-audiocontext-setsinkid%E2%91%A0

But as we already differ on the initial state, I don't really know...
Just keeping the state as it was could be a good tradeoff?

Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, very good point. I will update the PR


// flush the cached msgs
pending_msgs
.into_iter()
.for_each(|m| self.base().send_control_msg(m).unwrap());

// explicitly release the lock to prevent concurrent render threads
drop(backend_guard);

Ok(())
}

/// Suspends the progression of time in the audio context.
Expand All @@ -167,7 +250,7 @@ impl AudioContext {
/// * For a `BackendSpecificError`
#[allow(clippy::missing_const_for_fn, clippy::unused_self)]
pub fn suspend_sync(&self) {
if self.backend.suspend() {
if self.backend.lock().unwrap().suspend() {
self.base().set_state(AudioContextState::Suspended);
}
}
Expand All @@ -186,7 +269,7 @@ impl AudioContext {
/// * For a `BackendSpecificError`
#[allow(clippy::missing_const_for_fn, clippy::unused_self)]
pub fn resume_sync(&self) {
if self.backend.resume() {
if self.backend.lock().unwrap().resume() {
self.base().set_state(AudioContextState::Running);
}
}
Expand All @@ -204,7 +287,7 @@ impl AudioContext {
/// Will panic when this function is called multiple times
#[allow(clippy::missing_const_for_fn, clippy::unused_self)]
pub fn close_sync(&self) {
self.backend.close();
self.backend.lock().unwrap().close();

self.base().set_state(AudioContextState::Closed);
}
Expand Down
Loading