Skip to content

Commit

Permalink
Audio Device Optimization
Browse files Browse the repository at this point in the history
allow listen-only mode in AudioUnit, adjust when category changes (#2)

release mic when category changes (#5)

Change defaults to iOS defaults (#7)

Sync audio session config (#8)

feat: support bypass voice processing for iOS. (#15)

Remove MacBookPro audio pan right code (#22)

fix: Fix can't open mic alone when built-in AEC is enabled. (#29)

feat: add audio device changes detect for windows. (#41)

fix Linux compile (#47)

AudioUnit: Don't rely on category switch for mic indicator to turn off (#52)

Stop recording on mute (turn off mic indicator) (#55)

Cherry pick audio selection from m97 release (#35)

[Mac] Allow audio device selection (#21)

RTCAudioDeviceModule.outputDevice / inputDevice getter and setter (#80)

Co-authored-by: Hiroshi Horie <[email protected]>
Co-authored-by: David Zhao <[email protected]>
  • Loading branch information
3 people committed May 20, 2024
1 parent b6c65fc commit 5652b78
Show file tree
Hide file tree
Showing 39 changed files with 1,209 additions and 202 deletions.
5 changes: 5 additions & 0 deletions audio/audio_send_stream.cc
Original file line number Diff line number Diff line change
Expand Up @@ -415,6 +415,11 @@ void AudioSendStream::SetMuted(bool muted) {
channel_send_->SetInputMute(muted);
}

bool AudioSendStream::GetMuted() {
RTC_DCHECK_RUN_ON(&worker_thread_checker_);
return channel_send_->InputMute();
}

webrtc::AudioSendStream::Stats AudioSendStream::GetStats() const {
return GetStats(true);
}
Expand Down
1 change: 1 addition & 0 deletions audio/audio_send_stream.h
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,7 @@ class AudioSendStream final : public webrtc::AudioSendStream,
int payload_frequency,
int event,
int duration_ms) override;
bool GetMuted() override;
void SetMuted(bool muted) override;
webrtc::AudioSendStream::Stats GetStats() const override;
webrtc::AudioSendStream::Stats GetStats(
Expand Down
64 changes: 55 additions & 9 deletions audio/audio_state.cc
Original file line number Diff line number Diff line change
Expand Up @@ -98,14 +98,26 @@ void AudioState::AddSendingStream(webrtc::AudioSendStream* stream,
UpdateAudioTransportWithSendingStreams();

// Make sure recording is initialized; start recording if enabled.
auto* adm = config_.audio_device_module.get();
if (!adm->Recording()) {
if (adm->InitRecording() == 0) {
if (recording_enabled_) {
adm->StartRecording();
if (ShouldRecord()) {
auto* adm = config_.audio_device_module.get();
if (!adm->Recording()) {
if (adm->InitRecording() == 0) {
if (recording_enabled_) {

// TODO: Verify if the following windows only logic is still required.
#if defined(WEBRTC_WIN)
if (adm->BuiltInAECIsAvailable() && !adm->Playing()) {
if (!adm->PlayoutIsInitialized()) {
adm->InitPlayout();
}
adm->StartPlayout();
}
#endif
adm->StartRecording();
}
} else {
RTC_DLOG_F(LS_ERROR) << "Failed to initialize recording.";
}
} else {
RTC_DLOG_F(LS_ERROR) << "Failed to initialize recording.";
}
}
}
Expand All @@ -115,7 +127,8 @@ void AudioState::RemoveSendingStream(webrtc::AudioSendStream* stream) {
auto count = sending_streams_.erase(stream);
RTC_DCHECK_EQ(1, count);
UpdateAudioTransportWithSendingStreams();
if (sending_streams_.empty()) {

if (!ShouldRecord()) {
config_.audio_device_module->StopRecording();
}
}
Expand Down Expand Up @@ -143,7 +156,7 @@ void AudioState::SetRecording(bool enabled) {
if (recording_enabled_ != enabled) {
recording_enabled_ = enabled;
if (enabled) {
if (!sending_streams_.empty()) {
if (ShouldRecord()) {
config_.audio_device_module->StartRecording();
}
} else {
Expand Down Expand Up @@ -203,6 +216,39 @@ void AudioState::UpdateNullAudioPollerState() {
null_audio_poller_.Stop();
}
}

void AudioState::OnMuteStreamChanged() {

auto* adm = config_.audio_device_module.get();
bool should_record = ShouldRecord();

if (should_record && !adm->Recording()) {
if (adm->InitRecording() == 0) {
adm->StartRecording();
}
} else if (!should_record && adm->Recording()) {
adm->StopRecording();
}
}

bool AudioState::ShouldRecord() {
// no streams to send
if (sending_streams_.empty()) {
return false;
}

int stream_count = sending_streams_.size();

int muted_count = 0;
for (const auto& kv : sending_streams_) {
if (kv.first->GetMuted()) {
muted_count++;
}
}

return muted_count != stream_count;
}

} // namespace internal

rtc::scoped_refptr<AudioState> AudioState::Create(
Expand Down
5 changes: 5 additions & 0 deletions audio/audio_state.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,8 @@ class AudioState : public webrtc::AudioState {

void SetStereoChannelSwapping(bool enable) override;

void OnMuteStreamChanged() override;

AudioDeviceModule* audio_device_module() {
RTC_DCHECK(config_.audio_device_module);
return config_.audio_device_module.get();
Expand All @@ -64,6 +66,9 @@ class AudioState : public webrtc::AudioState {
void UpdateAudioTransportWithSendingStreams();
void UpdateNullAudioPollerState() RTC_RUN_ON(&thread_checker_);

// Returns true when at least 1 stream exists and all streams are not muted.
bool ShouldRecord();

SequenceChecker thread_checker_;
SequenceChecker process_thread_checker_{SequenceChecker::kDetached};
const webrtc::AudioState::Config config_;
Expand Down
4 changes: 2 additions & 2 deletions audio/channel_send.cc
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,8 @@ class ChannelSend : public ChannelSendInterface,
// Muting, Volume and Level.
void SetInputMute(bool enable) override;

bool InputMute() const override;

// Stats.
ANAStats GetANAStatistics() const override;

Expand Down Expand Up @@ -163,8 +165,6 @@ class ChannelSend : public ChannelSendInterface,
size_t payloadSize,
int64_t absolute_capture_timestamp_ms) override;

bool InputMute() const;

int32_t SendRtpAudio(AudioFrameType frameType,
uint8_t payloadType,
uint32_t rtp_timestamp_without_offset,
Expand Down
2 changes: 2 additions & 0 deletions audio/channel_send.h
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,8 @@ class ChannelSendInterface {
virtual bool SendTelephoneEventOutband(int event, int duration_ms) = 0;
virtual void OnBitrateAllocation(BitrateAllocationUpdate update) = 0;
virtual int GetTargetBitrate() const = 0;

virtual bool InputMute() const = 0;
virtual void SetInputMute(bool muted) = 0;

virtual void ProcessAndEncodeAudio(
Expand Down
1 change: 1 addition & 0 deletions call/audio_send_stream.h
Original file line number Diff line number Diff line change
Expand Up @@ -190,6 +190,7 @@ class AudioSendStream : public AudioSender {
int event,
int duration_ms) = 0;

virtual bool GetMuted() = 0;
virtual void SetMuted(bool muted) = 0;

virtual Stats GetStats() const = 0;
Expand Down
3 changes: 3 additions & 0 deletions call/audio_state.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,9 @@ class AudioState : public rtc::RefCountInterface {

virtual void SetStereoChannelSwapping(bool enable) = 0;

// Notify the AudioState that a stream updated it's mute state.
virtual void OnMuteStreamChanged() = 0;

static rtc::scoped_refptr<AudioState> Create(
const AudioState::Config& config);

Expand Down
3 changes: 2 additions & 1 deletion media/engine/webrtc_voice_engine.h
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,8 @@ class WebRtcVoiceEngine final : public VoiceEngineInterface {

absl::optional<webrtc::AudioDeviceModule::Stats> GetAudioDeviceStats()
override;
// Moved to public so WebRtcVoiceMediaChannel can access it.
webrtc::AudioState* audio_state();

private:
// Every option that is "set" will be applied. Every option not "set" will be
Expand All @@ -145,7 +147,6 @@ class WebRtcVoiceEngine final : public VoiceEngineInterface {

webrtc::AudioDeviceModule* adm();
webrtc::AudioProcessing* apm() const;
webrtc::AudioState* audio_state();

std::vector<AudioCodec> CollectCodecs(
const std::vector<webrtc::AudioCodecSpec>& specs) const;
Expand Down
4 changes: 4 additions & 0 deletions modules/audio_device/audio_device_data_observer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -307,6 +307,10 @@ class ADMWrapper : public AudioDeviceModule, public AudioTransport {
}
#endif // WEBRTC_IOS

int32_t SetAudioDeviceSink(AudioDeviceSink* sink) const override {
return impl_->SetAudioDeviceSink(sink);
}

protected:
rtc::scoped_refptr<AudioDeviceModule> impl_;
AudioDeviceDataObserver* legacy_observer_ = nullptr;
Expand Down
4 changes: 4 additions & 0 deletions modules/audio_device/audio_device_generic.h
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,10 @@ class AudioDeviceGeneric {
virtual int GetRecordAudioParameters(AudioParameters* params) const;
#endif // WEBRTC_IOS

virtual int32_t SetAudioDeviceSink(AudioDeviceSink* sink) { return -1; }
virtual int32_t GetPlayoutDevice() const { return -1; }
virtual int32_t GetRecordingDevice() const { return -1; }

virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) = 0;

virtual ~AudioDeviceGeneric() {}
Expand Down
42 changes: 35 additions & 7 deletions modules/audio_device/audio_device_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -63,15 +63,17 @@ namespace webrtc {

rtc::scoped_refptr<AudioDeviceModule> AudioDeviceModule::Create(
AudioLayer audio_layer,
TaskQueueFactory* task_queue_factory) {
TaskQueueFactory* task_queue_factory,
bool bypass_voice_processing) {
RTC_DLOG(LS_INFO) << __FUNCTION__;
return AudioDeviceModule::CreateForTest(audio_layer, task_queue_factory);
return AudioDeviceModule::CreateForTest(audio_layer, task_queue_factory, bypass_voice_processing);
}

// static
rtc::scoped_refptr<AudioDeviceModuleForTest> AudioDeviceModule::CreateForTest(
AudioLayer audio_layer,
TaskQueueFactory* task_queue_factory) {
TaskQueueFactory* task_queue_factory,
bool bypass_voice_processing) {
RTC_DLOG(LS_INFO) << __FUNCTION__;

// The "AudioDeviceModule::kWindowsCoreAudio2" audio layer has its own
Expand All @@ -92,7 +94,7 @@ rtc::scoped_refptr<AudioDeviceModuleForTest> AudioDeviceModule::CreateForTest(

// Create the generic reference counted (platform independent) implementation.
auto audio_device = rtc::make_ref_counted<AudioDeviceModuleImpl>(
audio_layer, task_queue_factory);
audio_layer, task_queue_factory, bypass_voice_processing);

// Ensure that the current platform is supported.
if (audio_device->CheckPlatform() == -1) {
Expand All @@ -115,8 +117,13 @@ rtc::scoped_refptr<AudioDeviceModuleForTest> AudioDeviceModule::CreateForTest(

AudioDeviceModuleImpl::AudioDeviceModuleImpl(
AudioLayer audio_layer,
TaskQueueFactory* task_queue_factory)
: audio_layer_(audio_layer), audio_device_buffer_(task_queue_factory) {
TaskQueueFactory* task_queue_factory,
bool bypass_voice_processing)
: audio_layer_(audio_layer),
#if defined(WEBRTC_IOS)
bypass_voice_processing_(bypass_voice_processing),
#endif
audio_device_buffer_(task_queue_factory) {
RTC_DLOG(LS_INFO) << __FUNCTION__;
}

Expand Down Expand Up @@ -240,7 +247,7 @@ int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects() {
#if defined(WEBRTC_IOS)
if (audio_layer == kPlatformDefaultAudio) {
audio_device_.reset(
new ios_adm::AudioDeviceIOS(/*bypass_voice_processing=*/false));
new ios_adm::AudioDeviceIOS(/*bypass_voice_processing=*/bypass_voice_processing_));
RTC_LOG(LS_INFO) << "iPhone Audio APIs will be utilized.";
}
// END #if defined(WEBRTC_IOS)
Expand Down Expand Up @@ -895,6 +902,27 @@ int AudioDeviceModuleImpl::GetRecordAudioParameters(
}
#endif // WEBRTC_IOS

int32_t AudioDeviceModuleImpl::SetAudioDeviceSink(AudioDeviceSink* sink) const {
RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << sink << ")";
int32_t ok = audio_device_->SetAudioDeviceSink(sink);
RTC_LOG(LS_INFO) << "output: " << ok;
return ok;
}

int32_t AudioDeviceModuleImpl::GetPlayoutDevice() const {
RTC_LOG(LS_INFO) << __FUNCTION__;
int32_t r = audio_device_->GetPlayoutDevice();
RTC_LOG(LS_INFO) << "output: " << r;
return r;
}

int32_t AudioDeviceModuleImpl::GetRecordingDevice() const {
RTC_LOG(LS_INFO) << __FUNCTION__;
int32_t r = audio_device_->GetRecordingDevice();
RTC_LOG(LS_INFO) << "output: " << r;
return r;
}

AudioDeviceModuleImpl::PlatformType AudioDeviceModuleImpl::Platform() const {
RTC_LOG(LS_INFO) << __FUNCTION__;
return platform_type_;
Expand Down
10 changes: 9 additions & 1 deletion modules/audio_device/audio_device_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,8 @@ class AudioDeviceModuleImpl : public AudioDeviceModuleForTest {
int32_t AttachAudioBuffer();

AudioDeviceModuleImpl(AudioLayer audio_layer,
TaskQueueFactory* task_queue_factory);
TaskQueueFactory* task_queue_factory,
bool bypass_voice_processing = false);
// If `create_detached` is true, created ADM can be used on another thread
// compared to the one on which it was created. It's useful for testing.
AudioDeviceModuleImpl(AudioLayer audio_layer,
Expand Down Expand Up @@ -155,6 +156,10 @@ class AudioDeviceModuleImpl : public AudioDeviceModuleForTest {
int GetRecordAudioParameters(AudioParameters* params) const override;
#endif // WEBRTC_IOS

int32_t SetAudioDeviceSink(AudioDeviceSink* sink) const override;
int32_t GetPlayoutDevice() const override;
int32_t GetRecordingDevice() const override;

AudioDeviceBuffer* GetAudioDeviceBuffer() { return &audio_device_buffer_; }

int RestartPlayoutInternally() override { return -1; }
Expand All @@ -169,6 +174,9 @@ class AudioDeviceModuleImpl : public AudioDeviceModuleForTest {
AudioLayer audio_layer_;
PlatformType platform_type_ = kPlatformNotSupported;
bool initialized_ = false;
#if defined(WEBRTC_IOS)
bool bypass_voice_processing_;
#endif
AudioDeviceBuffer audio_device_buffer_;
std::unique_ptr<AudioDeviceGeneric> audio_device_;
};
Expand Down
23 changes: 19 additions & 4 deletions modules/audio_device/include/audio_device.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,16 +12,25 @@
#define MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_H_

#include "absl/types/optional.h"
#include "api/ref_count.h"
#include "api/scoped_refptr.h"
#include "api/task_queue/task_queue_factory.h"
#include "modules/audio_device/include/audio_device_defines.h"
#include "rtc_base/ref_count.h"

namespace webrtc {

class AudioDeviceModuleForTest;

class AudioDeviceModule : public webrtc::RefCountInterface {
// Sink for callbacks related to a audio device.
class AudioDeviceSink {
public:
virtual ~AudioDeviceSink() = default;

// input/output devices updated or default device changed
virtual void OnDevicesUpdated() = 0;
};

class AudioDeviceModule : public rtc::RefCountInterface {
public:
enum AudioLayer {
kPlatformDefaultAudio = 0,
Expand Down Expand Up @@ -56,12 +65,14 @@ class AudioDeviceModule : public webrtc::RefCountInterface {
// Creates a default ADM for usage in production code.
static rtc::scoped_refptr<AudioDeviceModule> Create(
AudioLayer audio_layer,
TaskQueueFactory* task_queue_factory);
TaskQueueFactory* task_queue_factory,
bool bypass_voice_processing = false);
// Creates an ADM with support for extra test methods. Don't use this factory
// in production code.
static rtc::scoped_refptr<AudioDeviceModuleForTest> CreateForTest(
AudioLayer audio_layer,
TaskQueueFactory* task_queue_factory);
TaskQueueFactory* task_queue_factory,
bool bypass_voice_processing = false);

// Retrieve the currently utilized audio layer
virtual int32_t ActiveAudioLayer(AudioLayer* audioLayer) const = 0;
Expand Down Expand Up @@ -171,6 +182,10 @@ class AudioDeviceModule : public webrtc::RefCountInterface {
virtual int GetRecordAudioParameters(AudioParameters* params) const = 0;
#endif // WEBRTC_IOS

virtual int32_t SetAudioDeviceSink(AudioDeviceSink* sink) const { return -1; }
virtual int32_t GetPlayoutDevice() const { return -1; }
virtual int32_t GetRecordingDevice() const { return -1; }

protected:
~AudioDeviceModule() override {}
};
Expand Down
Loading

0 comments on commit 5652b78

Please sign in to comment.