Skip to content

Commit

Permalink
Audio Device Optimization
Browse files Browse the repository at this point in the history
allow listen-only mode in AudioUnit, adjust when category changes (#2)

release mic when category changes (#5)

Change defaults to iOS defaults (#7)

Sync audio session config (#8)

feat: support bypass voice processing for iOS. (#15)

Remove MacBookPro audio pan right code (#22)

fix: Fix can't open mic alone when built-in AEC is enabled. (#29)

feat: add audio device changes detect for windows. (#41)

fix Linux compile (#47)

AudioUnit: Don't rely on category switch for mic indicator to turn off (#52)

Stop recording on mute (turn off mic indicator) (#55)

Cherry pick audio selection from m97 release (#35)

[Mac] Allow audio device selection (#21)

Co-authored-by: Hiroshi Horie <[email protected]>
Co-authored-by: David Zhao <[email protected]>
  • Loading branch information
3 people committed Jun 12, 2023
1 parent aaf97b7 commit 22a58c8
Show file tree
Hide file tree
Showing 40 changed files with 1,131 additions and 203 deletions.
5 changes: 5 additions & 0 deletions audio/audio_send_stream.cc
Original file line number Diff line number Diff line change
Expand Up @@ -424,6 +424,11 @@ void AudioSendStream::SetMuted(bool muted) {
channel_send_->SetInputMute(muted);
}

bool AudioSendStream::GetMuted() {
RTC_DCHECK_RUN_ON(&worker_thread_checker_);
return channel_send_->InputMute();
}

webrtc::AudioSendStream::Stats AudioSendStream::GetStats() const {
return GetStats(true);
}
Expand Down
1 change: 1 addition & 0 deletions audio/audio_send_stream.h
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,7 @@ class AudioSendStream final : public webrtc::AudioSendStream,
int payload_frequency,
int event,
int duration_ms) override;
bool GetMuted() override;
void SetMuted(bool muted) override;
webrtc::AudioSendStream::Stats GetStats() const override;
webrtc::AudioSendStream::Stats GetStats(
Expand Down
64 changes: 55 additions & 9 deletions audio/audio_state.cc
Original file line number Diff line number Diff line change
Expand Up @@ -98,14 +98,26 @@ void AudioState::AddSendingStream(webrtc::AudioSendStream* stream,
UpdateAudioTransportWithSendingStreams();

// Make sure recording is initialized; start recording if enabled.
auto* adm = config_.audio_device_module.get();
if (!adm->Recording()) {
if (adm->InitRecording() == 0) {
if (recording_enabled_) {
adm->StartRecording();
if (ShouldRecord()) {
auto* adm = config_.audio_device_module.get();
if (!adm->Recording()) {
if (adm->InitRecording() == 0) {
if (recording_enabled_) {

// TODO: Verify if the following windows only logic is still required.
#if defined(WEBRTC_WIN)
if (adm->BuiltInAECIsAvailable() && !adm->Playing()) {
if (!adm->PlayoutIsInitialized()) {
adm->InitPlayout();
}
adm->StartPlayout();
}
#endif
adm->StartRecording();
}
} else {
RTC_DLOG_F(LS_ERROR) << "Failed to initialize recording.";
}
} else {
RTC_DLOG_F(LS_ERROR) << "Failed to initialize recording.";
}
}
}
Expand All @@ -115,7 +127,8 @@ void AudioState::RemoveSendingStream(webrtc::AudioSendStream* stream) {
auto count = sending_streams_.erase(stream);
RTC_DCHECK_EQ(1, count);
UpdateAudioTransportWithSendingStreams();
if (sending_streams_.empty()) {

if (!ShouldRecord()) {
config_.audio_device_module->StopRecording();
}
}
Expand Down Expand Up @@ -143,7 +156,7 @@ void AudioState::SetRecording(bool enabled) {
if (recording_enabled_ != enabled) {
recording_enabled_ = enabled;
if (enabled) {
if (!sending_streams_.empty()) {
if (ShouldRecord()) {
config_.audio_device_module->StartRecording();
}
} else {
Expand Down Expand Up @@ -203,6 +216,39 @@ void AudioState::UpdateNullAudioPollerState() {
null_audio_poller_.Stop();
}
}

void AudioState::OnMuteStreamChanged() {

auto* adm = config_.audio_device_module.get();
bool should_record = ShouldRecord();

if (should_record && !adm->Recording()) {
if (adm->InitRecording() == 0) {
adm->StartRecording();
}
} else if (!should_record && adm->Recording()) {
adm->StopRecording();
}
}

bool AudioState::ShouldRecord() {
// no streams to send
if (sending_streams_.empty()) {
return false;
}

int stream_count = sending_streams_.size();

int muted_count = 0;
for (const auto& kv : sending_streams_) {
if (kv.first->GetMuted()) {
muted_count++;
}
}

return muted_count != stream_count;
}

} // namespace internal

rtc::scoped_refptr<AudioState> AudioState::Create(
Expand Down
5 changes: 5 additions & 0 deletions audio/audio_state.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,8 @@ class AudioState : public webrtc::AudioState {

void SetStereoChannelSwapping(bool enable) override;

void OnMuteStreamChanged() override;

AudioDeviceModule* audio_device_module() {
RTC_DCHECK(config_.audio_device_module);
return config_.audio_device_module.get();
Expand All @@ -64,6 +66,9 @@ class AudioState : public webrtc::AudioState {
void UpdateAudioTransportWithSendingStreams();
void UpdateNullAudioPollerState() RTC_RUN_ON(&thread_checker_);

// Returns true when at least 1 stream exists and all streams are not muted.
bool ShouldRecord();

SequenceChecker thread_checker_;
SequenceChecker process_thread_checker_{SequenceChecker::kDetached};
const webrtc::AudioState::Config config_;
Expand Down
4 changes: 2 additions & 2 deletions audio/channel_send.cc
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,8 @@ class ChannelSend : public ChannelSendInterface,
// Muting, Volume and Level.
void SetInputMute(bool enable) override;

bool InputMute() const override;

// Stats.
ANAStats GetANAStatistics() const override;

Expand Down Expand Up @@ -161,8 +163,6 @@ class ChannelSend : public ChannelSendInterface,
size_t payloadSize,
int64_t absolute_capture_timestamp_ms) override;

bool InputMute() const;

int32_t SendRtpAudio(AudioFrameType frameType,
uint8_t payloadType,
uint32_t rtp_timestamp,
Expand Down
2 changes: 2 additions & 0 deletions audio/channel_send.h
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,8 @@ class ChannelSendInterface {
virtual bool SendTelephoneEventOutband(int event, int duration_ms) = 0;
virtual void OnBitrateAllocation(BitrateAllocationUpdate update) = 0;
virtual int GetTargetBitrate() const = 0;

virtual bool InputMute() const = 0;
virtual void SetInputMute(bool muted) = 0;

virtual void ProcessAndEncodeAudio(
Expand Down
1 change: 1 addition & 0 deletions call/audio_send_stream.h
Original file line number Diff line number Diff line change
Expand Up @@ -190,6 +190,7 @@ class AudioSendStream : public AudioSender {
int event,
int duration_ms) = 0;

virtual bool GetMuted() = 0;
virtual void SetMuted(bool muted) = 0;

virtual Stats GetStats() const = 0;
Expand Down
3 changes: 3 additions & 0 deletions call/audio_state.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,9 @@ class AudioState : public rtc::RefCountInterface {

virtual void SetStereoChannelSwapping(bool enable) = 0;

// Notify the AudioState that a stream updated it's mute state.
virtual void OnMuteStreamChanged() = 0;

static rtc::scoped_refptr<AudioState> Create(
const AudioState::Config& config);

Expand Down
3 changes: 3 additions & 0 deletions media/engine/webrtc_voice_engine.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2285,6 +2285,9 @@ bool WebRtcVoiceMediaChannel::MuteStream(uint32_t ssrc, bool muted) {
ap->set_output_will_be_muted(all_muted);
}

// Notfy the AudioState that the mute state has updated.
engine_->audio_state()->OnMuteStreamChanged();

return true;
}

Expand Down
3 changes: 2 additions & 1 deletion media/engine/webrtc_voice_engine.h
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,8 @@ class WebRtcVoiceEngine final : public VoiceEngineInterface {

absl::optional<webrtc::AudioDeviceModule::Stats> GetAudioDeviceStats()
override;
// Moved to public so WebRtcVoiceMediaChannel can access it.
webrtc::AudioState* audio_state();

private:
// Every option that is "set" will be applied. Every option not "set" will be
Expand All @@ -105,7 +107,6 @@ class WebRtcVoiceEngine final : public VoiceEngineInterface {

webrtc::AudioDeviceModule* adm();
webrtc::AudioProcessing* apm() const;
webrtc::AudioState* audio_state();

std::vector<AudioCodec> CollectCodecs(
const std::vector<webrtc::AudioCodecSpec>& specs) const;
Expand Down
4 changes: 4 additions & 0 deletions modules/audio_device/audio_device_data_observer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -307,6 +307,10 @@ class ADMWrapper : public AudioDeviceModule, public AudioTransport {
}
#endif // WEBRTC_IOS

int32_t SetAudioDeviceSink(AudioDeviceSink* sink) const override {
return impl_->SetAudioDeviceSink(sink);
}

protected:
rtc::scoped_refptr<AudioDeviceModule> impl_;
AudioDeviceDataObserver* legacy_observer_ = nullptr;
Expand Down
2 changes: 2 additions & 0 deletions modules/audio_device/audio_device_generic.h
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,8 @@ class AudioDeviceGeneric {
virtual int GetRecordAudioParameters(AudioParameters* params) const;
#endif // WEBRTC_IOS

virtual int32_t SetAudioDeviceSink(AudioDeviceSink* sink) { return -1; }

virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) = 0;

virtual ~AudioDeviceGeneric() {}
Expand Down
28 changes: 21 additions & 7 deletions modules/audio_device/audio_device_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -72,15 +72,17 @@ namespace webrtc {

rtc::scoped_refptr<AudioDeviceModule> AudioDeviceModule::Create(
AudioLayer audio_layer,
TaskQueueFactory* task_queue_factory) {
TaskQueueFactory* task_queue_factory,
bool bypass_voice_processing) {
RTC_DLOG(LS_INFO) << __FUNCTION__;
return AudioDeviceModule::CreateForTest(audio_layer, task_queue_factory);
return AudioDeviceModule::CreateForTest(audio_layer, task_queue_factory, bypass_voice_processing);
}

// static
rtc::scoped_refptr<AudioDeviceModuleForTest> AudioDeviceModule::CreateForTest(
AudioLayer audio_layer,
TaskQueueFactory* task_queue_factory) {
TaskQueueFactory* task_queue_factory,
bool bypass_voice_processing) {
RTC_DLOG(LS_INFO) << __FUNCTION__;

// The "AudioDeviceModule::kWindowsCoreAudio2" audio layer has its own
Expand All @@ -93,7 +95,7 @@ rtc::scoped_refptr<AudioDeviceModuleForTest> AudioDeviceModule::CreateForTest(

// Create the generic reference counted (platform independent) implementation.
auto audio_device = rtc::make_ref_counted<AudioDeviceModuleImpl>(
audio_layer, task_queue_factory);
audio_layer, task_queue_factory, bypass_voice_processing);

// Ensure that the current platform is supported.
if (audio_device->CheckPlatform() == -1) {
Expand All @@ -116,8 +118,13 @@ rtc::scoped_refptr<AudioDeviceModuleForTest> AudioDeviceModule::CreateForTest(

AudioDeviceModuleImpl::AudioDeviceModuleImpl(
AudioLayer audio_layer,
TaskQueueFactory* task_queue_factory)
: audio_layer_(audio_layer), audio_device_buffer_(task_queue_factory) {
TaskQueueFactory* task_queue_factory,
bool bypass_voice_processing)
: audio_layer_(audio_layer),
#if defined(WEBRTC_IOS)
bypass_voice_processing_(bypass_voice_processing),
#endif
audio_device_buffer_(task_queue_factory) {
RTC_DLOG(LS_INFO) << __FUNCTION__;
}

Expand Down Expand Up @@ -280,7 +287,7 @@ int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects() {
#if defined(WEBRTC_IOS)
if (audio_layer == kPlatformDefaultAudio) {
audio_device_.reset(
new ios_adm::AudioDeviceIOS(/*bypass_voice_processing=*/false));
new ios_adm::AudioDeviceIOS(/*bypass_voice_processing=*/bypass_voice_processing_));
RTC_LOG(LS_INFO) << "iPhone Audio APIs will be utilized.";
}
// END #if defined(WEBRTC_IOS)
Expand Down Expand Up @@ -937,6 +944,13 @@ int AudioDeviceModuleImpl::GetRecordAudioParameters(
}
#endif // WEBRTC_IOS

int32_t AudioDeviceModuleImpl::SetAudioDeviceSink(AudioDeviceSink* sink) const {
RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << sink << ")";
int32_t ok = audio_device_->SetAudioDeviceSink(sink);
RTC_LOG(LS_INFO) << "output: " << ok;
return ok;
}

AudioDeviceModuleImpl::PlatformType AudioDeviceModuleImpl::Platform() const {
RTC_LOG(LS_INFO) << __FUNCTION__;
return platform_type_;
Expand Down
9 changes: 7 additions & 2 deletions modules/audio_device/audio_device_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,8 @@ class AudioDeviceModuleImpl : public AudioDeviceModuleForTest {
int32_t AttachAudioBuffer();

AudioDeviceModuleImpl(AudioLayer audio_layer,
TaskQueueFactory* task_queue_factory);
TaskQueueFactory* task_queue_factory,
bool bypass_voice_processing = false);
~AudioDeviceModuleImpl() override;

// Retrieve the currently utilized audio layer
Expand Down Expand Up @@ -145,6 +146,8 @@ class AudioDeviceModuleImpl : public AudioDeviceModuleForTest {
int GetRecordAudioParameters(AudioParameters* params) const override;
#endif // WEBRTC_IOS

int32_t SetAudioDeviceSink(AudioDeviceSink* sink) const override;

#if defined(WEBRTC_ANDROID)
// Only use this acccessor for test purposes on Android.
AudioManager* GetAndroidAudioManagerForTest() {
Expand All @@ -165,7 +168,9 @@ class AudioDeviceModuleImpl : public AudioDeviceModuleForTest {
AudioLayer audio_layer_;
PlatformType platform_type_ = kPlatformNotSupported;
bool initialized_ = false;
#if defined(WEBRTC_ANDROID)
#if defined(WEBRTC_IOS)
bool bypass_voice_processing_;
#elif defined(WEBRTC_ANDROID)
// Should be declared first to ensure that it outlives other resources.
std::unique_ptr<AudioManager> audio_manager_android_;
#endif
Expand Down
17 changes: 15 additions & 2 deletions modules/audio_device/include/audio_device.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,15 @@ namespace webrtc {

class AudioDeviceModuleForTest;

// Sink for callbacks related to a audio device.
class AudioDeviceSink {
public:
virtual ~AudioDeviceSink() = default;

// input/output devices updated or default device changed
virtual void OnDevicesUpdated() = 0;
};

class AudioDeviceModule : public rtc::RefCountInterface {
public:
enum AudioLayer {
Expand Down Expand Up @@ -56,12 +65,14 @@ class AudioDeviceModule : public rtc::RefCountInterface {
// Creates a default ADM for usage in production code.
static rtc::scoped_refptr<AudioDeviceModule> Create(
AudioLayer audio_layer,
TaskQueueFactory* task_queue_factory);
TaskQueueFactory* task_queue_factory,
bool bypass_voice_processing = false);
// Creates an ADM with support for extra test methods. Don't use this factory
// in production code.
static rtc::scoped_refptr<AudioDeviceModuleForTest> CreateForTest(
AudioLayer audio_layer,
TaskQueueFactory* task_queue_factory);
TaskQueueFactory* task_queue_factory,
bool bypass_voice_processing = false);

// Retrieve the currently utilized audio layer
virtual int32_t ActiveAudioLayer(AudioLayer* audioLayer) const = 0;
Expand Down Expand Up @@ -171,6 +182,8 @@ class AudioDeviceModule : public rtc::RefCountInterface {
virtual int GetRecordAudioParameters(AudioParameters* params) const = 0;
#endif // WEBRTC_IOS

virtual int32_t SetAudioDeviceSink(AudioDeviceSink* sink) const { return -1; }

protected:
~AudioDeviceModule() override {}
};
Expand Down
Loading

0 comments on commit 22a58c8

Please sign in to comment.