Title: [216156] trunk/Source/WebCore
Revision
216156
Author
[email protected]
Date
2017-05-03 16:50:18 -0700 (Wed, 03 May 2017)

Log Message

Make the VPIO audio unit a singleton, shared between multiple CaptureSources
https://bugs.webkit.org/show_bug.cgi?id=171622

Reviewed by Eric Carlson.

Move the implemnetation of CoreAudioCaptureSource into a shared singleton class, CoreAudioSharedUnit,
which will send audio to each of it's client CoreAudioCaptureSources. The first registered client will
define the settings used by the shared unit.

Drive-by fixes: Fix up setUseAVFoundationAudioCapture() to always accept the first value set.

* platform/mediastream/mac/CoreAudioCaptureSource.cpp:
(WebCore::CoreAudioSharedUnit::isProducingData):
(WebCore::CoreAudioSharedUnit::microphoneFormat):
(WebCore::CoreAudioSharedUnit::singleton):
(WebCore::CoreAudioSharedUnit::addClient):
(WebCore::CoreAudioSharedUnit::removeClient):
(WebCore::CoreAudioSharedUnit::addEchoCancellationSource):
(WebCore::CoreAudioSharedUnit::removeEchoCancellationSource):
(WebCore::CoreAudioSharedUnit::preferredIOBufferSize):
(WebCore::CoreAudioSharedUnit::setupAudioUnits):
(WebCore::CoreAudioSharedUnit::configureMicrophoneProc):
(WebCore::CoreAudioSharedUnit::configureSpeakerProc):
(WebCore::CoreAudioSharedUnit::checkTimestamps):
(WebCore::CoreAudioSharedUnit::provideSpeakerData):
(WebCore::CoreAudioSharedUnit::speakerCallback):
(WebCore::CoreAudioSharedUnit::processMicrophoneSamples):
(WebCore::CoreAudioSharedUnit::microphoneCallback):
(WebCore::CoreAudioSharedUnit::cleanupAudioUnits):
(WebCore::CoreAudioSharedUnit::startProducingData):
(WebCore::CoreAudioSharedUnit::stopProducingData):
(WebCore::CoreAudioSharedUnit::suspend):
(WebCore::CoreAudioSharedUnit::defaultInputDevice):
(WebCore::CoreAudioCaptureSource::create):
(WebCore::CoreAudioCaptureSource::factory):
(WebCore::CoreAudioCaptureSource::CoreAudioCaptureSource):
(WebCore::CoreAudioCaptureSource::~CoreAudioCaptureSource):
(WebCore::CoreAudioCaptureSource::addEchoCancellationSource):
(WebCore::CoreAudioCaptureSource::removeEchoCancellationSource):
(WebCore::CoreAudioCaptureSource::startProducingData):
(WebCore::CoreAudioCaptureSource::stopProducingData):
(WebCore::CoreAudioCaptureSource::audioSourceProvider):
(WebCore::CoreAudioCaptureSource::preferredSampleRate): Deleted.
(WebCore::CoreAudioCaptureSource::preferredIOBufferSize): Deleted.
(WebCore::CoreAudioCaptureSource::configureMicrophoneProc): Deleted.
(WebCore::CoreAudioCaptureSource::configureSpeakerProc): Deleted.
(WebCore::CoreAudioCaptureSource::checkTimestamps): Deleted.
(WebCore::CoreAudioCaptureSource::provideSpeakerData): Deleted.
(WebCore::CoreAudioCaptureSource::speakerCallback): Deleted.
(WebCore::CoreAudioCaptureSource::processMicrophoneSamples): Deleted.
(WebCore::CoreAudioCaptureSource::microphoneCallback): Deleted.
(WebCore::CoreAudioCaptureSource::cleanupAudioUnits): Deleted.
(WebCore::CoreAudioCaptureSource::defaultInputDevice): Deleted.
(WebCore::CoreAudioCaptureSource::setupAudioUnits): Deleted.
(WebCore::CoreAudioCaptureSource::suspend): Deleted.
(WebCore::CoreAudioCaptureSource::resume): Deleted.
* platform/mediastream/mac/CoreAudioCaptureSource.h:
* platform/mediastream/mac/RealtimeMediaSourceCenterMac.cpp:
(WebCore::RealtimeMediaSourceCenterMac::setUseAVFoundationAudioCapture):

Modified Paths

Diff

Modified: trunk/Source/WebCore/ChangeLog (216155 => 216156)


--- trunk/Source/WebCore/ChangeLog	2017-05-03 23:48:35 UTC (rev 216155)
+++ trunk/Source/WebCore/ChangeLog	2017-05-03 23:50:18 UTC (rev 216156)
@@ -1,5 +1,67 @@
 2017-05-03  Jer Noble  <[email protected]>
 
+        Make the VPIO audio unit a singleton, shared between multiple CaptureSources
+        https://bugs.webkit.org/show_bug.cgi?id=171622
+
+        Reviewed by Eric Carlson.
+
+        Move the implemnetation of CoreAudioCaptureSource into a shared singleton class, CoreAudioSharedUnit,
+        which will send audio to each of it's client CoreAudioCaptureSources. The first registered client will
+        define the settings used by the shared unit.
+
+        Drive-by fixes: Fix up setUseAVFoundationAudioCapture() to always accept the first value set.
+
+        * platform/mediastream/mac/CoreAudioCaptureSource.cpp:
+        (WebCore::CoreAudioSharedUnit::isProducingData):
+        (WebCore::CoreAudioSharedUnit::microphoneFormat):
+        (WebCore::CoreAudioSharedUnit::singleton):
+        (WebCore::CoreAudioSharedUnit::addClient):
+        (WebCore::CoreAudioSharedUnit::removeClient):
+        (WebCore::CoreAudioSharedUnit::addEchoCancellationSource):
+        (WebCore::CoreAudioSharedUnit::removeEchoCancellationSource):
+        (WebCore::CoreAudioSharedUnit::preferredIOBufferSize):
+        (WebCore::CoreAudioSharedUnit::setupAudioUnits):
+        (WebCore::CoreAudioSharedUnit::configureMicrophoneProc):
+        (WebCore::CoreAudioSharedUnit::configureSpeakerProc):
+        (WebCore::CoreAudioSharedUnit::checkTimestamps):
+        (WebCore::CoreAudioSharedUnit::provideSpeakerData):
+        (WebCore::CoreAudioSharedUnit::speakerCallback):
+        (WebCore::CoreAudioSharedUnit::processMicrophoneSamples):
+        (WebCore::CoreAudioSharedUnit::microphoneCallback):
+        (WebCore::CoreAudioSharedUnit::cleanupAudioUnits):
+        (WebCore::CoreAudioSharedUnit::startProducingData):
+        (WebCore::CoreAudioSharedUnit::stopProducingData):
+        (WebCore::CoreAudioSharedUnit::suspend):
+        (WebCore::CoreAudioSharedUnit::defaultInputDevice):
+        (WebCore::CoreAudioCaptureSource::create):
+        (WebCore::CoreAudioCaptureSource::factory):
+        (WebCore::CoreAudioCaptureSource::CoreAudioCaptureSource):
+        (WebCore::CoreAudioCaptureSource::~CoreAudioCaptureSource):
+        (WebCore::CoreAudioCaptureSource::addEchoCancellationSource):
+        (WebCore::CoreAudioCaptureSource::removeEchoCancellationSource):
+        (WebCore::CoreAudioCaptureSource::startProducingData):
+        (WebCore::CoreAudioCaptureSource::stopProducingData):
+        (WebCore::CoreAudioCaptureSource::audioSourceProvider):
+        (WebCore::CoreAudioCaptureSource::preferredSampleRate): Deleted.
+        (WebCore::CoreAudioCaptureSource::preferredIOBufferSize): Deleted.
+        (WebCore::CoreAudioCaptureSource::configureMicrophoneProc): Deleted.
+        (WebCore::CoreAudioCaptureSource::configureSpeakerProc): Deleted.
+        (WebCore::CoreAudioCaptureSource::checkTimestamps): Deleted.
+        (WebCore::CoreAudioCaptureSource::provideSpeakerData): Deleted.
+        (WebCore::CoreAudioCaptureSource::speakerCallback): Deleted.
+        (WebCore::CoreAudioCaptureSource::processMicrophoneSamples): Deleted.
+        (WebCore::CoreAudioCaptureSource::microphoneCallback): Deleted.
+        (WebCore::CoreAudioCaptureSource::cleanupAudioUnits): Deleted.
+        (WebCore::CoreAudioCaptureSource::defaultInputDevice): Deleted.
+        (WebCore::CoreAudioCaptureSource::setupAudioUnits): Deleted.
+        (WebCore::CoreAudioCaptureSource::suspend): Deleted.
+        (WebCore::CoreAudioCaptureSource::resume): Deleted.
+        * platform/mediastream/mac/CoreAudioCaptureSource.h:
+        * platform/mediastream/mac/RealtimeMediaSourceCenterMac.cpp:
+        (WebCore::RealtimeMediaSourceCenterMac::setUseAVFoundationAudioCapture):
+
+2017-05-03  Jer Noble  <[email protected]>
+
         getUserMedia() fails because devices list is empty / inactive
         https://bugs.webkit.org/show_bug.cgi?id=171626
 

Modified: trunk/Source/WebCore/platform/mediastream/mac/CoreAudioCaptureSource.cpp (216155 => 216156)


--- trunk/Source/WebCore/platform/mediastream/mac/CoreAudioCaptureSource.cpp	2017-05-03 23:48:35 UTC (rev 216155)
+++ trunk/Source/WebCore/platform/mediastream/mac/CoreAudioCaptureSource.cpp	2017-05-03 23:50:18 UTC (rev 216156)
@@ -37,6 +37,7 @@
 #include "CoreAudioCaptureDeviceManager.h"
 #include "Logging.h"
 #include "MediaTimeAVFoundation.h"
+#include "WebAudioSourceProviderAVFObjC.h"
 #include <AudioToolbox/AudioConverter.h>
 #include <AudioUnit/AudioUnit.h>
 #include <CoreMedia/CMSync.h>
@@ -58,85 +59,209 @@
 const UInt32 outputBus = 0;
 const UInt32 inputBus = 1;
 
-CaptureSourceOrError CoreAudioCaptureSource::create(const String& deviceID, const MediaConstraints* constraints)
-{
-    String label;
-    uint32_t persistentID = 0;
-#if PLATFORM(MAC)
-    auto device = CoreAudioCaptureDeviceManager::singleton().coreAudioDeviceWithUID(deviceID);
-    if (!device)
-        return { };
+class CoreAudioSharedUnit {
+public:
+    static CoreAudioSharedUnit& singleton();
 
-    label = device->label();
-    persistentID = device->deviceID();
-#elif PLATFORM(IOS)
-    auto device = AVAudioSessionCaptureDeviceManager::singleton().audioSessionDeviceWithUID(deviceID);
-    if (!device)
-        return { };
+    void addClient(CoreAudioCaptureSource&);
+    void removeClient(CoreAudioCaptureSource&);
 
-    label = device->label();
+    void startProducingData();
+    void stopProducingData();
+    bool isProducingData() { return m_ioUnitStarted; }
+
+    OSStatus suspend();
+
+    OSStatus setupAudioUnits();
+    void cleanupAudioUnits();
+
+    void addEchoCancellationSource(AudioSampleDataSource&);
+    void removeEchoCancellationSource(AudioSampleDataSource&);
+
+    static size_t preferredIOBufferSize();
+
+    const CAAudioStreamDescription& microphoneFormat() const { return m_microphoneProcFormat; }
+
+private:
+    OSStatus configureSpeakerProc();
+    OSStatus configureMicrophoneProc();
+    OSStatus defaultOutputDevice(uint32_t*);
+    OSStatus defaultInputDevice(uint32_t*);
+
+    static OSStatus microphoneCallback(void*, AudioUnitRenderActionFlags*, const AudioTimeStamp*, UInt32, UInt32, AudioBufferList*);
+    OSStatus processMicrophoneSamples(AudioUnitRenderActionFlags&, const AudioTimeStamp&, UInt32, UInt32, AudioBufferList*);
+
+    static OSStatus speakerCallback(void*, AudioUnitRenderActionFlags*, const AudioTimeStamp*, UInt32, UInt32, AudioBufferList*);
+    OSStatus provideSpeakerData(AudioUnitRenderActionFlags&, const AudioTimeStamp&, UInt32, UInt32, AudioBufferList*);
+
+    Vector<CoreAudioCaptureSource*> m_clients;
+
+    AudioUnit m_ioUnit { nullptr };
+
+    // Only read/modified from the IO thread.
+    Vector<Ref<AudioSampleDataSource>> m_activeSources;
+
+    enum QueueAction { Add, Remove };
+    Vector<std::pair<QueueAction, Ref<AudioSampleDataSource>>> m_pendingSources;
+
+    uint32_t m_captureDeviceID { 0 };
+
+    CAAudioStreamDescription m_microphoneProcFormat;
+    RefPtr<AudioSampleBufferList> m_microphoneSampleBuffer;
+    uint64_t m_latestMicTimeStamp { 0 };
+
+    CAAudioStreamDescription m_speakerProcFormat;
+    RefPtr<AudioSampleBufferList> m_speakerSampleBuffer;
+
+    double m_DTSConversionRatio { 0 };
+
+    bool m_ioUnitInitialized { false };
+    bool m_ioUnitStarted { false };
+
+    Lock m_pendingSourceQueueLock;
+    Lock m_internalStateLock;
+
+    int32_t m_suspendCount { 0 };
+    int32_t m_producingCount { 0 };
+
+    mutable std::unique_ptr<RealtimeMediaSourceCapabilities> m_capabilities;
+    mutable RealtimeMediaSourceSupportedConstraints m_supportedConstraints;
+    mutable std::optional<RealtimeMediaSourceSettings> m_currentSettings;
+
+#if !LOG_DISABLED
+    void checkTimestamps(const AudioTimeStamp&, uint64_t, double);
+
+    String m_ioUnitName;
+    uint64_t m_speakerProcsCalled { 0 };
+    uint64_t m_microphoneProcsCalled { 0 };
 #endif
-    auto source = adoptRef(*new CoreAudioCaptureSource(deviceID, label, persistentID));
+};
 
-    if (constraints) {
-        auto result = source->applyConstraints(*constraints);
-        if (result)
-            return WTFMove(result.value().first);
+CoreAudioSharedUnit& CoreAudioSharedUnit::singleton()
+{
+    static NeverDestroyed<CoreAudioSharedUnit> singleton;
+    return singleton;
+}
+
+void CoreAudioSharedUnit::addClient(CoreAudioCaptureSource& client)
+{
+    m_clients.append(&client);
+}
+
+void CoreAudioSharedUnit::removeClient(CoreAudioCaptureSource& client)
+{
+    m_clients.removeAll(&client);
+}
+
+void CoreAudioSharedUnit::addEchoCancellationSource(AudioSampleDataSource& source)
+{
+    if (!source.setOutputFormat(m_speakerProcFormat)) {
+        LOG(Media, "CoreAudioSharedUnit::addEchoCancellationSource: source %p configureOutput failed", &source);
+        return;
     }
-    return CaptureSourceOrError(WTFMove(source));
+
+    std::lock_guard<Lock> lock(m_pendingSourceQueueLock);
+    m_pendingSources.append({ QueueAction::Add, source });
 }
 
-RealtimeMediaSource::AudioCaptureFactory& CoreAudioCaptureSource::factory()
+void CoreAudioSharedUnit::removeEchoCancellationSource(AudioSampleDataSource& source)
 {
-    static NeverDestroyed<CoreAudioCaptureSourceFactory> factory;
-    return factory.get();
+    std::lock_guard<Lock> lock(m_pendingSourceQueueLock);
+    m_pendingSources.append({ QueueAction::Remove, source });
 }
 
-CoreAudioCaptureSource::CoreAudioCaptureSource(const String& deviceID, const String& label, uint32_t persistentID)
-    : RealtimeMediaSource(deviceID, RealtimeMediaSource::Type::Audio, label)
-    , m_captureDeviceID(persistentID)
+size_t CoreAudioSharedUnit::preferredIOBufferSize()
 {
-    m_muted = true;
+    return AudioSession::sharedSession().bufferSize();
+}
 
-    setVolume(1.0);
-    setSampleRate(preferredSampleRate());
-    setEchoCancellation(true);
+OSStatus CoreAudioSharedUnit::setupAudioUnits()
+{
+    if (m_ioUnit)
+        return 0;
 
+    ASSERT(!m_clients.isEmpty());
+
     mach_timebase_info_data_t timebaseInfo;
     mach_timebase_info(&timebaseInfo);
     m_DTSConversionRatio = 1e-9 * static_cast<double>(timebaseInfo.numer) / static_cast<double>(timebaseInfo.denom);
-}
 
-CoreAudioCaptureSource::~CoreAudioCaptureSource()
-{
-    suspend();
-    cleanupAudioUnits();
+    AudioComponentDescription ioUnitDescription = { kAudioUnitType_Output, kAudioUnitSubType_VoiceProcessingIO, kAudioUnitManufacturer_Apple, 0, 0 };
+    AudioComponent ioComponent = AudioComponentFindNext(nullptr, &ioUnitDescription);
+    ASSERT(ioComponent);
+    if (!ioComponent) {
+        LOG(Media, "CoreAudioCaptureSource::setupAudioUnits(%p) unable to find vpio unit component", this);
+        return -1;
+    }
 
-    m_activeSources.clear();
-    m_pendingSources.clear();
-
 #if !LOG_DISABLED
-    m_speakerProcsCalled = 0;
-    m_microphoneProcsCalled  = 0;
+    CFStringRef name = nullptr;
+    AudioComponentCopyName(ioComponent, &name);
+    if (name) {
+        m_ioUnitName = name;
+        CFRelease(name);
+        LOG(Media, "CoreAudioCaptureSource::setupAudioUnits(%p) created \"%s\" component", this, m_ioUnitName.utf8().data());
+    }
 #endif
-}
 
-double CoreAudioCaptureSource::preferredSampleRate()
-{
-    return AudioSession::sharedSession().sampleRate();
-}
+    auto err = AudioComponentInstanceNew(ioComponent, &m_ioUnit);
+    if (err) {
+        LOG(Media, "CoreAudioCaptureSource::setupAudioUnits(%p) unable to open vpio unit, error %d (%.4s)", this, (int)err, (char*)&err);
+        return err;
+    }
 
-size_t CoreAudioCaptureSource::preferredIOBufferSize()
-{
-    return AudioSession::sharedSession().bufferSize();
+    uint32_t param = m_clients.first()->echoCancellation();
+    err = AudioUnitSetProperty(m_ioUnit, kAUVoiceIOProperty_VoiceProcessingEnableAGC, kAudioUnitScope_Global, inputBus, &param, sizeof(param));
+    if (err) {
+        LOG(Media, "CoreAudioCaptureSource::setupAudioUnits(%p) unable to set vpio unit echo cancellation, error %d (%.4s)", this, (int)err, (char*)&err);
+        return err;
+    }
+
+#if PLATFORM(IOS)
+    param = 1;
+    err = AudioUnitSetProperty(m_ioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, inputBus, &param, sizeof(param));
+    if (err) {
+        LOG(Media, "CoreAudioCaptureSource::setupAudioUnits(%p) unable to enable vpio unit input, error %d (%.4s)", this, (int)err, (char*)&err);
+        return err;
+    }
+#else
+    if (!m_captureDeviceID) {
+        err = defaultInputDevice(&m_captureDeviceID);
+        if (err)
+            return err;
+    }
+
+    err = AudioUnitSetProperty(m_ioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, inputBus, &m_captureDeviceID, sizeof(m_captureDeviceID));
+    if (err) {
+        LOG(Media, "CoreAudioCaptureSource::setupAudioUnits(%p) unable to set vpio unit capture device ID, error %d (%.4s)", this, (int)err, (char*)&err);
+        return err;
+    }
+#endif
+
+    err = configureMicrophoneProc();
+    if (err)
+        return err;
+
+    err = AudioUnitInitialize(m_ioUnit);
+    if (err) {
+        LOG(Media, "CoreAudioCaptureSource::setupAudioUnits(%p) AudioUnitInitialize() failed, error %d (%.4s)", this, (int)err, (char*)&err);
+        return err;
+    }
+    m_ioUnitInitialized = true;
+
+    err = configureSpeakerProc();
+    if (err)
+        return err;
+
+    return err;
 }
 
-OSStatus CoreAudioCaptureSource::configureMicrophoneProc()
+OSStatus CoreAudioSharedUnit::configureMicrophoneProc()
 {
     AURenderCallbackStruct callback = { microphoneCallback, this };
     auto err = AudioUnitSetProperty(m_ioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, inputBus, &callback, sizeof(callback));
     if (err) {
-        LOG(Media, "CoreAudioCaptureSource::configureMicrophoneProc(%p) unable to set vpio unit mic proc, error %d (%.4s)", this, (int)err, (char*)&err);
+        LOG(Media, "CoreAudioSharedUnit::configureMicrophoneProc(%p) unable to set vpio unit mic proc, error %d (%.4s)", this, (int)err, (char*)&err);
         return err;
     }
 
@@ -145,14 +270,14 @@
     UInt32 size = sizeof(microphoneProcFormat);
     err = AudioUnitGetProperty(m_ioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, inputBus, &microphoneProcFormat, &size);
     if (err) {
-        LOG(Media, "CoreAudioCaptureSource::configureMicrophoneProc(%p) unable to get output stream format, error %d (%.4s)", this, (int)err, (char*)&err);
+        LOG(Media, "CoreAudioSharedUnit::configureMicrophoneProc(%p) unable to get output stream format, error %d (%.4s)", this, (int)err, (char*)&err);
         return err;
     }
 
-    microphoneProcFormat.mSampleRate = sampleRate();
+    microphoneProcFormat.mSampleRate = m_clients.first()->sampleRate();
     err = AudioUnitSetProperty(m_ioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, inputBus, &microphoneProcFormat, size);
     if (err) {
-        LOG(Media, "CoreAudioCaptureSource::configureMicrophoneProc(%p) unable to set output stream format, error %d (%.4s)", this, (int)err, (char*)&err);
+        LOG(Media, "CoreAudioSharedUnit::configureMicrophoneProc(%p) unable to set output stream format, error %d (%.4s)", this, (int)err, (char*)&err);
         return err;
     }
 
@@ -162,12 +287,12 @@
     return err;
 }
 
-OSStatus CoreAudioCaptureSource::configureSpeakerProc()
+OSStatus CoreAudioSharedUnit::configureSpeakerProc()
 {
     AURenderCallbackStruct callback = { speakerCallback, this };
     auto err = AudioUnitSetProperty(m_ioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, outputBus, &callback, sizeof(callback));
     if (err) {
-        LOG(Media, "CoreAudioCaptureSource::configureSpeakerProc(%p) unable to set vpio unit speaker proc, error %d (%.4s)", this, (int)err, (char*)&err);
+        LOG(Media, "CoreAudioSharedUnit::configureSpeakerProc(%p) unable to set vpio unit speaker proc, error %d (%.4s)", this, (int)err, (char*)&err);
         return err;
     }
 
@@ -176,14 +301,14 @@
     UInt32 size = sizeof(speakerProcFormat);
     err = AudioUnitGetProperty(m_ioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, outputBus, &speakerProcFormat, &size);
     if (err) {
-        LOG(Media, "CoreAudioCaptureSource::configureSpeakerProc(%p) unable to get input stream format, error %d (%.4s)", this, (int)err, (char*)&err);
+        LOG(Media, "CoreAudioSharedUnit::configureSpeakerProc(%p) unable to get input stream format, error %d (%.4s)", this, (int)err, (char*)&err);
         return err;
     }
 
-    speakerProcFormat.mSampleRate = sampleRate();
+    speakerProcFormat.mSampleRate = m_clients.first()->sampleRate();
     err = AudioUnitSetProperty(m_ioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, outputBus, &speakerProcFormat, size);
     if (err) {
-        LOG(Media, "CoreAudioCaptureSource::configureSpeakerProc(%p) unable to get input stream format, error %d (%.4s)", this, (int)err, (char*)&err);
+        LOG(Media, "CoreAudioSharedUnit::configureSpeakerProc(%p) unable to get input stream format, error %d (%.4s)", this, (int)err, (char*)&err);
         return err;
     }
 
@@ -193,32 +318,15 @@
     return err;
 }
 
-void CoreAudioCaptureSource::addEchoCancellationSource(AudioSampleDataSource& source)
-{
-    if (!source.setOutputFormat(m_speakerProcFormat)) {
-        LOG(Media, "CoreAudioCaptureSource::addEchoCancellationSource: source %p configureOutput failed", &source);
-        return;
-    }
-
-    std::lock_guard<Lock> lock(m_pendingSourceQueueLock);
-    m_pendingSources.append({ QueueAction::Add, source });
-}
-
-void CoreAudioCaptureSource::removeEchoCancellationSource(AudioSampleDataSource& source)
-{
-    std::lock_guard<Lock> lock(m_pendingSourceQueueLock);
-    m_pendingSources.append({ QueueAction::Remove, source });
-}
-
 #if !LOG_DISABLED
-void CoreAudioCaptureSource::checkTimestamps(const AudioTimeStamp& timeStamp, uint64_t sampleTime, double hostTime)
+void CoreAudioSharedUnit::checkTimestamps(const AudioTimeStamp& timeStamp, uint64_t sampleTime, double hostTime)
 {
     if (!timeStamp.mSampleTime || sampleTime == m_latestMicTimeStamp || !hostTime)
-        LOG(Media, "CoreAudioCaptureSource::checkTimestamps: unusual timestamps, sample time = %lld, previous sample time = %lld, hostTime %f", sampleTime, m_latestMicTimeStamp, hostTime);
+        LOG(Media, "CoreAudioSharedUnit::checkTimestamps: unusual timestamps, sample time = %lld, previous sample time = %lld, hostTime %f", sampleTime, m_latestMicTimeStamp, hostTime);
 }
 #endif
 
-OSStatus CoreAudioCaptureSource::provideSpeakerData(AudioUnitRenderActionFlags& /*ioActionFlags*/, const AudioTimeStamp& timeStamp, UInt32 /*inBusNumber*/, UInt32 inNumberFrames, AudioBufferList* ioData)
+OSStatus CoreAudioSharedUnit::provideSpeakerData(AudioUnitRenderActionFlags& /*ioActionFlags*/, const AudioTimeStamp& timeStamp, UInt32 /*inBusNumber*/, UInt32 inNumberFrames, AudioBufferList* ioData)
 {
     // Called when the audio unit needs data to play through the speakers.
 #if !LOG_DISABLED
@@ -226,7 +334,7 @@
 #endif
 
     if (m_speakerSampleBuffer->sampleCapacity() < inNumberFrames) {
-        LOG(Media, "CoreAudioCaptureSource::provideSpeakerData: speaker sample buffer size (%d) too small for amount of sample data requested (%d)!", m_speakerSampleBuffer->sampleCapacity(), (int)inNumberFrames);
+        LOG(Media, "CoreAudioSharedUnit::provideSpeakerData: speaker sample buffer size (%d) too small for amount of sample data requested (%d)!", m_speakerSampleBuffer->sampleCapacity(), (int)inNumberFrames);
         return kAudio_ParamError;
     }
 
@@ -272,15 +380,15 @@
     return noErr;
 }
 
-OSStatus CoreAudioCaptureSource::speakerCallback(void *inRefCon, AudioUnitRenderActionFlags* ioActionFlags, const AudioTimeStamp* inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList* ioData)
+OSStatus CoreAudioSharedUnit::speakerCallback(void *inRefCon, AudioUnitRenderActionFlags* ioActionFlags, const AudioTimeStamp* inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList* ioData)
 {
     ASSERT(ioActionFlags);
     ASSERT(inTimeStamp);
-    auto dataSource = static_cast<CoreAudioCaptureSource*>(inRefCon);
+    auto dataSource = static_cast<CoreAudioSharedUnit*>(inRefCon);
     return dataSource->provideSpeakerData(*ioActionFlags, *inTimeStamp, inBusNumber, inNumberFrames, ioData);
 }
 
-OSStatus CoreAudioCaptureSource::processMicrophoneSamples(AudioUnitRenderActionFlags& ioActionFlags, const AudioTimeStamp& timeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList* /*ioData*/)
+OSStatus CoreAudioSharedUnit::processMicrophoneSamples(AudioUnitRenderActionFlags& ioActionFlags, const AudioTimeStamp& timeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList* /*ioData*/)
 {
 #if !LOG_DISABLED
     ++m_microphoneProcsCalled;
@@ -291,7 +399,7 @@
     AudioBufferList& bufferList = m_microphoneSampleBuffer->bufferList();
     auto err = AudioUnitRender(m_ioUnit, &ioActionFlags, &timeStamp, inBusNumber, inNumberFrames, &bufferList);
     if (err) {
-        LOG(Media, "CoreAudioCaptureSource::processMicrophoneSamples(%p) AudioUnitRender failed with error %d (%.4s)", this, (int)err, (char*)&err);
+        LOG(Media, "CoreAudioSharedUnit::processMicrophoneSamples(%p) AudioUnitRender failed with error %d (%.4s)", this, (int)err, (char*)&err);
         return err;
     }
 
@@ -303,27 +411,30 @@
     m_latestMicTimeStamp = sampleTime;
     m_microphoneSampleBuffer->setTimes(adjustedHostTime, sampleTime);
 
+    for (auto* client : m_clients) {
+        if (client->isProducingData())
+            client->audioSamplesAvailable(MediaTime(sampleTime, m_microphoneProcFormat.sampleRate()), m_microphoneSampleBuffer->bufferList(), m_microphoneProcFormat, inNumberFrames);
+    }
 
-    audioSamplesAvailable(MediaTime(sampleTime, m_microphoneProcFormat.sampleRate()), m_microphoneSampleBuffer->bufferList(), m_microphoneProcFormat, inNumberFrames);
-
     return noErr;
 }
 
-OSStatus CoreAudioCaptureSource::microphoneCallback(void *inRefCon, AudioUnitRenderActionFlags* ioActionFlags, const AudioTimeStamp* inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList* ioData)
+OSStatus CoreAudioSharedUnit::microphoneCallback(void *inRefCon, AudioUnitRenderActionFlags* ioActionFlags, const AudioTimeStamp* inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList* ioData)
 {
     ASSERT(ioActionFlags);
     ASSERT(inTimeStamp);
-    CoreAudioCaptureSource* dataSource = static_cast<CoreAudioCaptureSource*>(inRefCon);
+    CoreAudioSharedUnit* dataSource = static_cast<CoreAudioSharedUnit*>(inRefCon);
     return dataSource->processMicrophoneSamples(*ioActionFlags, *inTimeStamp, inBusNumber, inNumberFrames, ioData);
 }
 
-void CoreAudioCaptureSource::cleanupAudioUnits()
+void CoreAudioSharedUnit::cleanupAudioUnits()
 {
+    ASSERT(m_clients.isEmpty());
     if (m_ioUnitInitialized) {
         ASSERT(m_ioUnit);
         auto err = AudioUnitUninitialize(m_ioUnit);
         if (err)
-            LOG(Media, "CoreAudioCaptureSource::cleanupAudioUnits(%p) AudioUnitUninitialize failed with error %d (%.4s)", this, (int)err, (char*)&err);
+            LOG(Media, "CoreAudioSharedUnit::cleanupAudioUnits(%p) AudioUnitUninitialize failed with error %d (%.4s)", this, (int)err, (char*)&err);
         m_ioUnitInitialized = false;
     }
 
@@ -339,97 +450,13 @@
 #endif
 }
 
-OSStatus CoreAudioCaptureSource::defaultInputDevice(uint32_t* deviceID)
+void CoreAudioSharedUnit::startProducingData()
 {
-    ASSERT(m_ioUnit);
+    ASSERT(isMainThread());
 
-    UInt32 propertySize = sizeof(*deviceID);
-    auto err = AudioUnitGetProperty(m_ioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, inputBus, deviceID, &propertySize);
-    if (err)
-        LOG(Media, "CoreAudioCaptureSource::defaultInputDevice(%p) unable to get default input device ID, error %d (%.4s)", this, (int)err, (char*)&err);
+    if (++m_producingCount != 1)
+        return;
 
-    return err;
-}
-
-OSStatus CoreAudioCaptureSource::setupAudioUnits()
-{
-    if (m_ioUnit)
-        return 0;
-
-    AudioComponentDescription ioUnitDescription = { kAudioUnitType_Output, kAudioUnitSubType_VoiceProcessingIO, kAudioUnitManufacturer_Apple, 0, 0 };
-    AudioComponent ioComponent = AudioComponentFindNext(nullptr, &ioUnitDescription);
-    ASSERT(ioComponent);
-    if (!ioComponent) {
-        LOG(Media, "CoreAudioCaptureSource::setupAudioUnits(%p) unable to find vpio unit component", this);
-        return -1;
-    }
-
-#if !LOG_DISABLED
-    CFStringRef name = nullptr;
-    AudioComponentCopyName(ioComponent, &name);
-    if (name) {
-        m_ioUnitName = name;
-        CFRelease(name);
-        LOG(Media, "CoreAudioCaptureSource::setupAudioUnits(%p) created \"%s\" component", this, m_ioUnitName.utf8().data());
-    }
-#endif
-
-    auto err = AudioComponentInstanceNew(ioComponent, &m_ioUnit);
-    if (err) {
-        LOG(Media, "CoreAudioCaptureSource::setupAudioUnits(%p) unable to open vpio unit, error %d (%.4s)", this, (int)err, (char*)&err);
-        return err;
-    }
-
-    uint32_t param = echoCancellation();
-    err = AudioUnitSetProperty(m_ioUnit, kAUVoiceIOProperty_VoiceProcessingEnableAGC, kAudioUnitScope_Global, inputBus, &param, sizeof(param));
-    if (err) {
-        LOG(Media, "CoreAudioCaptureSource::setupAudioUnits(%p) unable to set vpio unit echo cancellation, error %d (%.4s)", this, (int)err, (char*)&err);
-        return err;
-    }
-
-#if PLATFORM(IOS)
-    param = 1;
-    err = AudioUnitSetProperty(m_ioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, inputBus, &param, sizeof(param));
-    if (err) {
-        LOG(Media, "CoreAudioCaptureSource::setupAudioUnits(%p) unable to enable vpio unit input, error %d (%.4s)", this, (int)err, (char*)&err);
-        return err;
-    }
-#else
-    if (!m_captureDeviceID) {
-        err = defaultInputDevice(&m_captureDeviceID);
-        if (err)
-            return err;
-    }
-
-    err = AudioUnitSetProperty(m_ioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, inputBus, &m_captureDeviceID, sizeof(m_captureDeviceID));
-    if (err) {
-        LOG(Media, "CoreAudioCaptureSource::setupAudioUnits(%p) unable to set vpio unit capture device ID, error %d (%.4s)", this, (int)err, (char*)&err);
-        return err;
-    }
-#endif
-
-    err = configureMicrophoneProc();
-    if (err)
-        return err;
-
-    err = AudioUnitInitialize(m_ioUnit);
-    if (err) {
-        LOG(Media, "CoreAudioCaptureSource::setupAudioUnits(%p) AudioUnitInitialize() failed, error %d (%.4s)", this, (int)err, (char*)&err);
-        return err;
-    }
-    m_ioUnitInitialized = true;
-
-    err = configureSpeakerProc();
-    if (err)
-        return err;
-
-    return err;
-}
-
-void CoreAudioCaptureSource::startProducingData()
-{
-    ASSERT(isMainThread());
-
     if (m_ioUnitStarted)
         return;
 
@@ -446,40 +473,44 @@
 
     err = AudioOutputUnitStart(m_ioUnit);
     if (err) {
-        LOG(Media, "CoreAudioCaptureSource::start(%p) AudioOutputUnitStart failed with error %d (%.4s)", this, (int)err, (char*)&err);
+        LOG(Media, "CoreAudioSharedUnit::start(%p) AudioOutputUnitStart failed with error %d (%.4s)", this, (int)err, (char*)&err);
         return;
     }
 
     m_ioUnitStarted = true;
-    m_muted = false;
 }
 
-void CoreAudioCaptureSource::stopProducingData()
+void CoreAudioSharedUnit::stopProducingData()
 {
     ASSERT(isMainThread());
 
+    if (--m_producingCount)
+        return;
+
     if (!m_ioUnit || !m_ioUnitStarted)
         return;
 
     auto err = AudioOutputUnitStop(m_ioUnit);
     if (err) {
-        LOG(Media, "CoreAudioCaptureSource::stop(%p) AudioOutputUnitStop failed with error %d (%.4s)", this, (int)err, (char*)&err);
+        LOG(Media, "CoreAudioSharedUnit::stop(%p) AudioOutputUnitStop failed with error %d (%.4s)", this, (int)err, (char*)&err);
         return;
     }
 
     m_ioUnitStarted = false;
-    m_muted = true;
 }
 
-OSStatus CoreAudioCaptureSource::suspend()
+OSStatus CoreAudioSharedUnit::suspend()
 {
     ASSERT(isMainThread());
 
+    m_activeSources.clear();
+    m_pendingSources.clear();
+
     if (m_ioUnitStarted) {
         ASSERT(m_ioUnit);
         auto err = AudioOutputUnitStop(m_ioUnit);
         if (err) {
-            LOG(Media, "CoreAudioCaptureSource::resume(%p) AudioOutputUnitStop failed with error %d (%.4s)", this, (int)err, (char*)&err);
+            LOG(Media, "CoreAudioSharedUnit::resume(%p) AudioOutputUnitStop failed with error %d (%.4s)", this, (int)err, (char*)&err);
             return err;
         }
         m_ioUnitStarted = false;
@@ -489,7 +520,7 @@
         ASSERT(m_ioUnit);
         auto err = AudioUnitUninitialize(m_ioUnit);
         if (err) {
-            LOG(Media, "CoreAudioCaptureSource::resume(%p) AudioUnitUninitialize failed with error %d (%.4s)", this, (int)err, (char*)&err);
+            LOG(Media, "CoreAudioSharedUnit::resume(%p) AudioUnitUninitialize failed with error %d (%.4s)", this, (int)err, (char*)&err);
             return err;
         }
         m_ioUnitInitialized = false;
@@ -498,22 +529,104 @@
     return 0;
 }
 
-OSStatus CoreAudioCaptureSource::resume()
+OSStatus CoreAudioSharedUnit::defaultInputDevice(uint32_t* deviceID)
 {
-    ASSERT(isMainThread());
     ASSERT(m_ioUnit);
-    ASSERT(!m_ioUnitStarted);
 
-    auto err = AudioOutputUnitStart(m_ioUnit);
-    if (err) {
-        LOG(Media, "CoreAudioCaptureSource::resume(%p) AudioOutputUnitStart failed with error %d (%.4s)", this, (int)err, (char*)&err);
-        return err;
+    UInt32 propertySize = sizeof(*deviceID);
+    auto err = AudioUnitGetProperty(m_ioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, inputBus, deviceID, &propertySize);
+    if (err)
+        LOG(Media, "CoreAudioSharedUnit::defaultInputDevice(%p) unable to get default input device ID, error %d (%.4s)", this, (int)err, (char*)&err);
+    
+    return err;
+}
+
+CaptureSourceOrError CoreAudioCaptureSource::create(const String& deviceID, const MediaConstraints* constraints)
+{
+    String label;
+    uint32_t persistentID = 0;
+#if PLATFORM(MAC)
+    auto device = CoreAudioCaptureDeviceManager::singleton().coreAudioDeviceWithUID(deviceID);
+    if (!device)
+        return { };
+
+    label = device->label();
+    persistentID = device->deviceID();
+#elif PLATFORM(IOS)
+    auto device = AVAudioSessionCaptureDeviceManager::singleton().audioSessionDeviceWithUID(deviceID);
+    if (!device)
+        return { };
+
+    label = device->label();
+#endif
+    auto source = adoptRef(*new CoreAudioCaptureSource(deviceID, label, persistentID));
+
+    if (constraints) {
+        auto result = source->applyConstraints(*constraints);
+        if (result)
+            return WTFMove(result.value().first);
     }
-    m_ioUnitStarted = false;
+    return CaptureSourceOrError(WTFMove(source));
+}
 
-    return err;
+RealtimeMediaSource::AudioCaptureFactory& CoreAudioCaptureSource::factory()
+{
+    static NeverDestroyed<CoreAudioCaptureSourceFactory> factory;
+    return factory.get();
 }
 
+CoreAudioCaptureSource::CoreAudioCaptureSource(const String& deviceID, const String& label, uint32_t persistentID)
+    : RealtimeMediaSource(deviceID, RealtimeMediaSource::Type::Audio, label)
+    , m_captureDeviceID(persistentID)
+{
+    m_muted = true;
+
+    setVolume(1.0);
+    setSampleRate(AudioSession::sharedSession().sampleRate());
+    setEchoCancellation(true);
+
+    CoreAudioSharedUnit::singleton().addClient(*this);
+}
+
+CoreAudioCaptureSource::~CoreAudioCaptureSource()
+{
+    CoreAudioSharedUnit::singleton().removeClient(*this);
+}
+
+void CoreAudioCaptureSource::addEchoCancellationSource(AudioSampleDataSource& source)
+{
+    CoreAudioSharedUnit::singleton().addEchoCancellationSource(source);
+}
+
+void CoreAudioCaptureSource::removeEchoCancellationSource(AudioSampleDataSource& source)
+{
+    CoreAudioSharedUnit::singleton().removeEchoCancellationSource(source);
+}
+
+void CoreAudioCaptureSource::startProducingData()
+{
+    CoreAudioSharedUnit::singleton().startProducingData();
+    m_isProducingData = CoreAudioSharedUnit::singleton().isProducingData();
+
+    if (!m_isProducingData)
+        return;
+
+    m_muted = false;
+
+    if (m_audioSourceProvider)
+        m_audioSourceProvider->prepare(&CoreAudioSharedUnit::singleton().microphoneFormat().streamDescription());
+}
+
+void CoreAudioCaptureSource::stopProducingData()
+{
+    CoreAudioSharedUnit::singleton().stopProducingData();
+    m_isProducingData = false;
+    m_muted = false;
+
+    if (m_audioSourceProvider)
+        m_audioSourceProvider->unprepare();
+}
+
 const RealtimeMediaSourceCapabilities& CoreAudioCaptureSource::capabilities() const
 {
     if (m_capabilities)
@@ -553,6 +666,17 @@
     RealtimeMediaSource::settingsDidChange();
 }
 
+AudioSourceProvider* CoreAudioCaptureSource::audioSourceProvider()
+{
+    if (!m_audioSourceProvider) {
+        m_audioSourceProvider = WebAudioSourceProviderAVFObjC::create(*this);
+        if (m_isProducingData)
+            m_audioSourceProvider->prepare(&CoreAudioSharedUnit::singleton().microphoneFormat().streamDescription());
+    }
+
+    return m_audioSourceProvider.get();
+}
+
 } // namespace WebCore
 
 #endif // ENABLE(MEDIA_STREAM)

Modified: trunk/Source/WebCore/platform/mediastream/mac/CoreAudioCaptureSource.h (216155 => 216156)


--- trunk/Source/WebCore/platform/mediastream/mac/CoreAudioCaptureSource.h	2017-05-03 23:48:35 UTC (rev 216155)
+++ trunk/Source/WebCore/platform/mediastream/mac/CoreAudioCaptureSource.h	2017-05-03 23:50:18 UTC (rev 216156)
@@ -49,6 +49,7 @@
 class AudioSampleBufferList;
 class AudioSampleDataSource;
 class CaptureDeviceInfo;
+class WebAudioSourceProviderAVFObjC;
 
 class CoreAudioCaptureSource : public RealtimeMediaSource {
 public:
@@ -71,14 +72,13 @@
     CoreAudioCaptureSource(const String& deviceID, const String& label, uint32_t persistentID);
     virtual ~CoreAudioCaptureSource();
 
+    friend class CoreAudioSharedUnit;
+
     bool isCaptureSource() const final { return true; }
     void startProducingData() final;
     void stopProducingData() final;
-    bool isProducingData() const final { return m_ioUnitStarted; }
+    bool isProducingData() const final { return m_isProducingData; }
 
-    OSStatus suspend();
-    OSStatus resume();
-
     bool applyVolume(double) override { return true; }
     bool applySampleRate(int) override { return true; }
     bool applyEchoCancellation(bool) override { return true; }
@@ -86,59 +86,18 @@
     const RealtimeMediaSourceCapabilities& capabilities() const final;
     const RealtimeMediaSourceSettings& settings() const final;
     void settingsDidChange() final;
+    AudioSourceProvider* audioSourceProvider() final;
 
-    OSStatus setupAudioUnits();
-    void cleanupAudioUnits();
-    OSStatus configureSpeakerProc();
-    OSStatus configureMicrophoneProc();
-    OSStatus defaultOutputDevice(uint32_t*);
-    OSStatus defaultInputDevice(uint32_t*);
-
-    static OSStatus microphoneCallback(void*, AudioUnitRenderActionFlags*, const AudioTimeStamp*, UInt32, UInt32, AudioBufferList*);
-    OSStatus processMicrophoneSamples(AudioUnitRenderActionFlags&, const AudioTimeStamp&, UInt32, UInt32, AudioBufferList*);
-
-    static OSStatus speakerCallback(void*, AudioUnitRenderActionFlags*, const AudioTimeStamp*, UInt32, UInt32, AudioBufferList*);
-    OSStatus provideSpeakerData(AudioUnitRenderActionFlags&, const AudioTimeStamp&, UInt32, UInt32, AudioBufferList*);
-
-    static double preferredSampleRate();
-    static size_t preferredIOBufferSize();
-
-    AudioUnit m_ioUnit { nullptr };
-
-    // Only read/modified from the IO thread.
-    Vector<Ref<AudioSampleDataSource>> m_activeSources;
-
-    enum QueueAction { Add, Remove };
-    Vector<std::pair<QueueAction, Ref<AudioSampleDataSource>>> m_pendingSources;
-
     uint32_t m_captureDeviceID { 0 };
 
-    CAAudioStreamDescription m_microphoneProcFormat;
-    RefPtr<AudioSampleBufferList> m_microphoneSampleBuffer;
-    uint64_t m_latestMicTimeStamp { 0 };
+    bool m_isProducingData { false };
+    bool m_isSuspended { false };
 
-    CAAudioStreamDescription m_speakerProcFormat;
-    RefPtr<AudioSampleBufferList> m_speakerSampleBuffer;
-
-    double m_DTSConversionRatio { 0 };
-
-    bool m_ioUnitInitialized { false };
-    bool m_ioUnitStarted { false };
-
-    Lock m_pendingSourceQueueLock;
-    Lock m_internalStateLock;
-
     mutable std::unique_ptr<RealtimeMediaSourceCapabilities> m_capabilities;
     mutable RealtimeMediaSourceSupportedConstraints m_supportedConstraints;
     mutable std::optional<RealtimeMediaSourceSettings> m_currentSettings;
 
-#if !LOG_DISABLED
-    void checkTimestamps(const AudioTimeStamp&, uint64_t, double);
-
-    String m_ioUnitName;
-    uint64_t m_speakerProcsCalled { 0 };
-    uint64_t m_microphoneProcsCalled { 0 };
-#endif
+    RefPtr<WebAudioSourceProviderAVFObjC> m_audioSourceProvider;
 };
 
 } // namespace WebCore

Modified: trunk/Source/WebCore/platform/mediastream/mac/RealtimeMediaSourceCenterMac.cpp (216155 => 216156)


--- trunk/Source/WebCore/platform/mediastream/mac/RealtimeMediaSourceCenterMac.cpp	2017-05-03 23:48:35 UTC (rev 216155)
+++ trunk/Source/WebCore/platform/mediastream/mac/RealtimeMediaSourceCenterMac.cpp	2017-05-03 23:50:18 UTC (rev 216156)
@@ -47,12 +47,12 @@
 
 void RealtimeMediaSourceCenterMac::setUseAVFoundationAudioCapture(bool enabled)
 {
-    static bool active = false;
-    if (active == enabled)
+    static std::optional<bool> active = std::nullopt;
+    if (active && active.value() == enabled)
         return;
 
     active = enabled;
-    if (active) {
+    if (active.value()) {
         RealtimeMediaSourceCenter::singleton().setAudioFactory(AVAudioCaptureSource::factory());
         RealtimeMediaSourceCenter::singleton().setAudioCaptureDeviceManager(AVCaptureDeviceManager::singleton());
     } else {
_______________________________________________
webkit-changes mailing list
[email protected]
https://lists.webkit.org/mailman/listinfo/webkit-changes

Reply via email to