Title: [212727] trunk/Source/WebCore
Revision
212727
Author
jer.no...@apple.com
Date
2017-02-21 10:45:10 -0800 (Tue, 21 Feb 2017)

Log Message

Make logging in high-priority audio threads less expensive
https://bugs.webkit.org/show_bug.cgi?id=168639

Reviewed by Jon Lee.

Logging from inside a high-priority audio thread will make a number of calls to malloc, block, and
therefore cause audio glitches. Make this logging less expensive by dispatching to the main thread
before creating and outputting the log string.

* WebCore.xcodeproj/project.pbxproj:
* platform/audio/mac/AudioSampleDataSource.mm: Renamed from Source/WebCore/platform/audio/mac/AudioSampleDataSource.cpp.
(WebCore::AudioSampleDataSource::pushSamplesInternal):
(WebCore::AudioSampleDataSource::pullSamplesInternal):

Modified Paths

Added Paths

Removed Paths

Diff

Modified: trunk/Source/WebCore/ChangeLog (212726 => 212727)


--- trunk/Source/WebCore/ChangeLog	2017-02-21 18:36:00 UTC (rev 212726)
+++ trunk/Source/WebCore/ChangeLog	2017-02-21 18:45:10 UTC (rev 212727)
@@ -1,5 +1,21 @@
 2017-02-21  Jer Noble  <jer.no...@apple.com>
 
+        Make logging in high-priority audio threads less expensive
+        https://bugs.webkit.org/show_bug.cgi?id=168639
+
+        Reviewed by Jon Lee.
+
+        Logging from inside a high-priority audio thread will make a number of calls to malloc, block, and
+        therefore cause audio glitches. Make this logging less expensive by dispatching to the main thread
+        before creating and outputting the log string.
+
+        * WebCore.xcodeproj/project.pbxproj:
+        * platform/audio/mac/AudioSampleDataSource.mm: Renamed from Source/WebCore/platform/audio/mac/AudioSampleDataSource.cpp.
+        (WebCore::AudioSampleDataSource::pushSamplesInternal):
+        (WebCore::AudioSampleDataSource::pullSamplesInternal):
+
+2017-02-21  Jer Noble  <jer.no...@apple.com>
+
         Give the Mock audio input a "hum" to make drop-outs more detectable
         https://bugs.webkit.org/show_bug.cgi?id=168641
 

Modified: trunk/Source/WebCore/WebCore.xcodeproj/project.pbxproj (212726 => 212727)


--- trunk/Source/WebCore/WebCore.xcodeproj/project.pbxproj	2017-02-21 18:36:00 UTC (rev 212726)
+++ trunk/Source/WebCore/WebCore.xcodeproj/project.pbxproj	2017-02-21 18:45:10 UTC (rev 212727)
@@ -144,7 +144,7 @@
 		07394ECA1BAB2CD700BE99CD /* MediaDevicesRequest.h in Headers */ = {isa = PBXBuildFile; fileRef = 07394EC91BAB2CD700BE99CD /* MediaDevicesRequest.h */; settings = {ATTRIBUTES = (Private, ); }; };
 		073B87661E4385AC0071C0EC /* AudioSampleBufferList.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 073B87621E43859D0071C0EC /* AudioSampleBufferList.cpp */; };
 		073B87671E4385AC0071C0EC /* AudioSampleBufferList.h in Headers */ = {isa = PBXBuildFile; fileRef = 073B87631E43859D0071C0EC /* AudioSampleBufferList.h */; };
-		073B87681E4385AC0071C0EC /* AudioSampleDataSource.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 073B87641E43859D0071C0EC /* AudioSampleDataSource.cpp */; };
+		073B87681E4385AC0071C0EC /* AudioSampleDataSource.mm in Sources */ = {isa = PBXBuildFile; fileRef = 073B87641E43859D0071C0EC /* AudioSampleDataSource.mm */; };
 		073B87691E4385AC0071C0EC /* AudioSampleDataSource.h in Headers */ = {isa = PBXBuildFile; fileRef = 073B87651E43859D0071C0EC /* AudioSampleDataSource.h */; };
 		073BE34017D17E01002BD431 /* JSNavigatorUserMedia.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 073BE33E17D17E01002BD431 /* JSNavigatorUserMedia.cpp */; };
 		073BE34117D17E01002BD431 /* JSNavigatorUserMedia.h in Headers */ = {isa = PBXBuildFile; fileRef = 073BE33F17D17E01002BD431 /* JSNavigatorUserMedia.h */; settings = {ATTRIBUTES = (Private, ); }; };
@@ -7270,7 +7270,7 @@
 		073B87581E40DCFD0071C0EC /* CAAudioStreamDescription.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CAAudioStreamDescription.h; sourceTree = "<group>"; };
 		073B87621E43859D0071C0EC /* AudioSampleBufferList.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = AudioSampleBufferList.cpp; sourceTree = "<group>"; };
 		073B87631E43859D0071C0EC /* AudioSampleBufferList.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AudioSampleBufferList.h; sourceTree = "<group>"; };
-		073B87641E43859D0071C0EC /* AudioSampleDataSource.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = AudioSampleDataSource.cpp; sourceTree = "<group>"; };
+		073B87641E43859D0071C0EC /* AudioSampleDataSource.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = AudioSampleDataSource.mm; sourceTree = "<group>"; };
 		073B87651E43859D0071C0EC /* AudioSampleDataSource.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AudioSampleDataSource.h; sourceTree = "<group>"; };
 		073BE33E17D17E01002BD431 /* JSNavigatorUserMedia.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JSNavigatorUserMedia.cpp; sourceTree = "<group>"; };
 		073BE33F17D17E01002BD431 /* JSNavigatorUserMedia.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSNavigatorUserMedia.h; sourceTree = "<group>"; };
@@ -25107,7 +25107,7 @@
 				CD2F4A2618D8A3490063746D /* AudioHardwareListenerMac.h */,
 				073B87621E43859D0071C0EC /* AudioSampleBufferList.cpp */,
 				073B87631E43859D0071C0EC /* AudioSampleBufferList.h */,
-				073B87641E43859D0071C0EC /* AudioSampleDataSource.cpp */,
+				073B87641E43859D0071C0EC /* AudioSampleDataSource.mm */,
 				073B87651E43859D0071C0EC /* AudioSampleDataSource.h */,
 				CD54DE4917469C6D005E5B36 /* AudioSessionMac.cpp */,
 				073B87571E40DCFD0071C0EC /* CAAudioStreamDescription.cpp */,
@@ -29327,7 +29327,7 @@
 				FD31608512B026F700C1A359 /* AudioResampler.cpp in Sources */,
 				FD31608712B026F700C1A359 /* AudioResamplerKernel.cpp in Sources */,
 				073B87661E4385AC0071C0EC /* AudioSampleBufferList.cpp in Sources */,
-				073B87681E4385AC0071C0EC /* AudioSampleDataSource.cpp in Sources */,
+				073B87681E4385AC0071C0EC /* AudioSampleDataSource.mm in Sources */,
 				FD8C46EB154608E700A5910C /* AudioScheduledSourceNode.cpp in Sources */,
 				CDA79824170A258300D45C55 /* AudioSession.cpp in Sources */,
 				CDA79827170A279100D45C55 /* AudioSessionIOS.mm in Sources */,

Deleted: trunk/Source/WebCore/platform/audio/mac/AudioSampleDataSource.cpp (212726 => 212727)


--- trunk/Source/WebCore/platform/audio/mac/AudioSampleDataSource.cpp	2017-02-21 18:36:00 UTC (rev 212726)
+++ trunk/Source/WebCore/platform/audio/mac/AudioSampleDataSource.cpp	2017-02-21 18:45:10 UTC (rev 212727)
@@ -1,355 +0,0 @@
-/*
- * Copyright (C) 2017 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "AudioSampleDataSource.h"
-
-#if ENABLE(MEDIA_STREAM)
-
-#include "CAAudioStreamDescription.h"
-#include "CARingBuffer.h"
-#include "Logging.h"
-#include "MediaTimeAVFoundation.h"
-#include <AudioToolbox/AudioConverter.h>
-#include <mach/mach.h>
-#include <mach/mach_time.h>
-#include <mutex>
-#include <syslog.h>
-#include <wtf/CurrentTime.h>
-#include <wtf/StringPrintStream.h>
-
-#include "CoreMediaSoftLink.h"
-
-namespace WebCore {
-
-using namespace JSC;
-
-Ref<AudioSampleDataSource> AudioSampleDataSource::create(size_t maximumSampleCount)
-{
-    return adoptRef(*new AudioSampleDataSource(maximumSampleCount));
-}
-
-AudioSampleDataSource::AudioSampleDataSource(size_t maximumSampleCount)
-    : m_inputSampleOffset(MediaTime::invalidTime())
-    , m_maximumSampleCount(maximumSampleCount)
-{
-}
-
-AudioSampleDataSource::~AudioSampleDataSource()
-{
-    m_inputDescription = nullptr;
-    m_outputDescription = nullptr;
-    m_ringBuffer = nullptr;
-    if (m_converter) {
-        AudioConverterDispose(m_converter);
-        m_converter = nullptr;
-    }
-}
-
-void AudioSampleDataSource::setPaused(bool paused)
-{
-    std::lock_guard<Lock> lock(m_lock);
-
-    if (paused == m_paused)
-        return;
-
-    m_transitioningFromPaused = m_paused;
-    m_paused = paused;
-}
-
-OSStatus AudioSampleDataSource::setupConverter()
-{
-    ASSERT(m_inputDescription && m_outputDescription);
-
-    if (m_converter) {
-        AudioConverterDispose(m_converter);
-        m_converter = nullptr;
-    }
-
-    if (*m_inputDescription == *m_outputDescription)
-        return 0;
-
-    OSStatus err = AudioConverterNew(&m_inputDescription->streamDescription(), &m_outputDescription->streamDescription(), &m_converter);
-    if (err)
-        LOG_ERROR("AudioSampleDataSource::setupConverter(%p) - AudioConverterNew returned error %d (%.4s)", this, (int)err, (char*)&err);
-
-    return err;
-
-}
-
-OSStatus AudioSampleDataSource::setInputFormat(const CAAudioStreamDescription& format)
-{
-    ASSERT(format.sampleRate() >= 0);
-
-    m_inputDescription = std::make_unique<CAAudioStreamDescription>(format);
-    if (m_outputDescription)
-        return setupConverter();
-
-    return 0;
-}
-
-OSStatus AudioSampleDataSource::setOutputFormat(const CAAudioStreamDescription& format)
-{
-    ASSERT(m_inputDescription);
-    ASSERT(format.sampleRate() >= 0);
-
-    m_outputDescription = std::make_unique<CAAudioStreamDescription>(format);
-    if (!m_ringBuffer)
-        m_ringBuffer = std::make_unique<CARingBuffer>();
-
-    m_ringBuffer->allocate(format, static_cast<size_t>(m_maximumSampleCount));
-    m_scratchBuffer = AudioSampleBufferList::create(m_outputDescription->streamDescription(), m_maximumSampleCount);
-
-    return setupConverter();
-}
-
-MediaTime AudioSampleDataSource::hostTime() const
-{
-    // Based on listing #2 from Apple Technical Q&A QA1398, modified to be thread-safe.
-    static double frequency;
-    static mach_timebase_info_data_t timebaseInfo;
-    static std::once_flag initializeTimerOnceFlag;
-    std::call_once(initializeTimerOnceFlag, [] {
-        kern_return_t kr = mach_timebase_info(&timebaseInfo);
-        frequency = 1e-9 * static_cast<double>(timebaseInfo.numer) / static_cast<double>(timebaseInfo.denom);
-        ASSERT_UNUSED(kr, kr == KERN_SUCCESS);
-        ASSERT(timebaseInfo.denom);
-    });
-
-    return MediaTime::createWithDouble(mach_absolute_time() * frequency);
-}
-
-void AudioSampleDataSource::pushSamplesInternal(const AudioBufferList& bufferList, const MediaTime& presentationTime, size_t sampleCount)
-{
-    ASSERT(m_lock.isHeld());
-
-    const AudioBufferList* sampleBufferList;
-    if (m_converter) {
-        m_scratchBuffer->reset();
-        OSStatus err = m_scratchBuffer->copyFrom(bufferList, m_converter);
-        if (err)
-            return;
-
-        sampleBufferList = m_scratchBuffer->bufferList().list();
-    } else
-        sampleBufferList = &bufferList;
-
-    MediaTime sampleTime = presentationTime;
-    if (m_inputSampleOffset == MediaTime::invalidTime()) {
-        m_inputSampleOffset = MediaTime(1 - sampleTime.timeValue(), sampleTime.timeScale());
-        if (m_inputSampleOffset.timeScale() != sampleTime.timeScale()) {
-            // FIXME: It should be possible to do this without calling CMTimeConvertScale.
-            m_inputSampleOffset = toMediaTime(CMTimeConvertScale(toCMTime(m_inputSampleOffset), sampleTime.timeScale(), kCMTimeRoundingMethod_Default));
-        }
-        LOG(MediaCaptureSamples, "@@ pushSamples: input sample offset is %lld, m_maximumSampleCount = %zu", m_inputSampleOffset.timeValue(), m_maximumSampleCount);
-    }
-    sampleTime += m_inputSampleOffset;
-
-#if !LOG_DISABLED
-    uint64_t startFrame1 = 0;
-    uint64_t endFrame1 = 0;
-    m_ringBuffer->getCurrentFrameBounds(startFrame1, endFrame1);
-#endif
-
-    m_ringBuffer->store(sampleBufferList, sampleCount, sampleTime.timeValue());
-    m_timeStamp = sampleTime.timeValue();
-
-    LOG(MediaCaptureSamples, "@@ pushSamples: added %ld samples for time = %s (was %s), mach time = %lld", sampleCount, toString(sampleTime).utf8().data(), toString(presentationTime).utf8().data(), mach_absolute_time());
-
-#if !LOG_DISABLED
-    uint64_t startFrame2 = 0;
-    uint64_t endFrame2 = 0;
-    m_ringBuffer->getCurrentFrameBounds(startFrame2, endFrame2);
-    LOG(MediaCaptureSamples, "@@ pushSamples: buffered range was [%lld .. %lld], is [%lld .. %lld]", startFrame1, endFrame1, startFrame2, endFrame2);
-#endif
-}
-
-void AudioSampleDataSource::pushSamples(const AudioStreamBasicDescription& sampleDescription, CMSampleBufferRef sampleBuffer)
-{
-    std::lock_guard<Lock> lock(m_lock);
-
-    ASSERT_UNUSED(sampleDescription, *m_inputDescription == sampleDescription);
-    ASSERT(m_ringBuffer);
-    
-    WebAudioBufferList list(*m_inputDescription, sampleBuffer);
-    pushSamplesInternal(list, toMediaTime(CMSampleBufferGetPresentationTimeStamp(sampleBuffer)), CMSampleBufferGetNumSamples(sampleBuffer));
-}
-
-void AudioSampleDataSource::pushSamples(const MediaTime& sampleTime, const PlatformAudioData& audioData, size_t sampleCount)
-{
-    std::unique_lock<Lock> lock(m_lock, std::try_to_lock);
-    ASSERT(is<WebAudioBufferList>(audioData));
-    pushSamplesInternal(*downcast<WebAudioBufferList>(audioData).list(), sampleTime, sampleCount);
-}
-
-bool AudioSampleDataSource::pullSamplesInternal(AudioBufferList& buffer, size_t& sampleCount, uint64_t timeStamp, double /*hostTime*/, PullMode mode)
-{
-    ASSERT(m_lock.isHeld());
-    size_t byteCount = sampleCount * m_outputDescription->bytesPerFrame();
-
-    ASSERT(buffer.mNumberBuffers == m_ringBuffer->channelCount());
-    if (buffer.mNumberBuffers != m_ringBuffer->channelCount()) {
-        AudioSampleBufferList::zeroABL(buffer, byteCount);
-        sampleCount = 0;
-        return false;
-    }
-
-    if (!m_ringBuffer || m_muted || m_inputSampleOffset == MediaTime::invalidTime()) {
-        AudioSampleBufferList::zeroABL(buffer, byteCount);
-        sampleCount = 0;
-        return false;
-    }
-
-    uint64_t startFrame = 0;
-    uint64_t endFrame = 0;
-    m_ringBuffer->getCurrentFrameBounds(startFrame, endFrame);
-
-    if (m_transitioningFromPaused) {
-        uint64_t buffered = endFrame - m_timeStamp;
-        if (buffered < sampleCount * 2) {
-            AudioSampleBufferList::zeroABL(buffer, byteCount);
-            sampleCount = 0;
-            return false;
-        }
-
-        const double twentyMS = .02;
-        const double tenMS = .01;
-        const double fiveMS = .005;
-        double sampleRate = m_outputDescription->sampleRate();
-        m_outputSampleOffset = timeStamp + m_timeStamp;
-        if (buffered > sampleRate * twentyMS)
-            m_outputSampleOffset -= sampleRate * twentyMS;
-        else if (buffered > sampleRate * tenMS)
-            m_outputSampleOffset -= sampleRate * tenMS;
-        else if (buffered > sampleRate * fiveMS)
-            m_outputSampleOffset -= sampleRate * fiveMS;
-
-        m_transitioningFromPaused = false;
-    }
-
-    timeStamp += m_outputSampleOffset;
-
-    LOG(MediaCaptureSamples, "** pullSamples: asking for %ld samples at time = %lld (was %lld)", sampleCount, timeStamp, timeStamp - m_outputSampleOffset);
-
-    uint64_t framesAvailable = sampleCount;
-    if (timeStamp < startFrame || timeStamp + sampleCount > endFrame) {
-        if (timeStamp + sampleCount < startFrame || timeStamp > endFrame)
-            framesAvailable = 0;
-        else if (timeStamp < startFrame)
-            framesAvailable = timeStamp + sampleCount - startFrame;
-        else
-            framesAvailable = timeStamp + sampleCount - endFrame;
-
-        LOG(MediaCaptureSamples, "** pullSamplesInternal: sample %lld is not completely in range [%lld .. %lld], returning %lld frames", timeStamp, startFrame, endFrame, framesAvailable);
-
-        if (framesAvailable < sampleCount) {
-            const double twentyMS = .02;
-            double sampleRate = m_outputDescription->sampleRate();
-            auto delta = static_cast<int64_t>(timeStamp) - endFrame;
-            if (delta > 0 && delta < sampleRate * twentyMS)
-                m_outputSampleOffset -= delta;
-        }
-
-        if (!framesAvailable) {
-            AudioSampleBufferList::zeroABL(buffer, byteCount);
-            return false;
-        }
-    }
-
-    if (m_volume >= .95) {
-        m_ringBuffer->fetch(&buffer, sampleCount, timeStamp, mode == Copy ? CARingBuffer::Copy : CARingBuffer::Mix);
-        return true;
-    }
-
-    if (m_scratchBuffer->copyFrom(*m_ringBuffer.get(), sampleCount, timeStamp, mode == Copy ? CARingBuffer::Copy : CARingBuffer::Mix)) {
-        AudioSampleBufferList::zeroABL(buffer, sampleCount);
-        return false;
-    }
-
-    m_scratchBuffer->applyGain(m_volume);
-    if (m_scratchBuffer->copyTo(buffer, sampleCount))
-        AudioSampleBufferList::zeroABL(buffer, byteCount);
-
-    return true;
-}
-
-bool AudioSampleDataSource::pullAvalaibleSamplesAsChunks(AudioBufferList& buffer, size_t sampleCountPerChunk, uint64_t timeStamp, Function<void()>&& consumeFilledBuffer)
-{
-    std::unique_lock<Lock> lock(m_lock, std::try_to_lock);
-    if (!lock.owns_lock() || !m_ringBuffer)
-        return false;
-
-    ASSERT(buffer.mNumberBuffers == m_ringBuffer->channelCount());
-    if (buffer.mNumberBuffers != m_ringBuffer->channelCount())
-        return false;
-
-    uint64_t startFrame = 0;
-    uint64_t endFrame = 0;
-    m_ringBuffer->getCurrentFrameBounds(startFrame, endFrame);
-    if (timeStamp < startFrame)
-        return false;
-
-    startFrame = timeStamp;
-    while (endFrame - startFrame >= sampleCountPerChunk) {
-        if (m_ringBuffer->fetch(&buffer, sampleCountPerChunk, startFrame, CARingBuffer::Copy))
-            return false;
-        consumeFilledBuffer();
-        startFrame += sampleCountPerChunk;
-    }
-    return true;
-}
-
-bool AudioSampleDataSource::pullSamples(AudioBufferList& buffer, size_t sampleCount, uint64_t timeStamp, double hostTime, PullMode mode)
-{
-    std::unique_lock<Lock> lock(m_lock, std::try_to_lock);
-    if (!lock.owns_lock() || !m_ringBuffer) {
-        size_t byteCount = sampleCount * m_outputDescription->bytesPerFrame();
-        AudioSampleBufferList::zeroABL(buffer, byteCount);
-        return false;
-    }
-
-    return pullSamplesInternal(buffer, sampleCount, timeStamp, hostTime, mode);
-}
-
-bool AudioSampleDataSource::pullSamples(AudioSampleBufferList& buffer, size_t sampleCount, uint64_t timeStamp, double hostTime, PullMode mode)
-{
-    std::unique_lock<Lock> lock(m_lock, std::try_to_lock);
-    if (!lock.owns_lock() || !m_ringBuffer) {
-        buffer.zero();
-        return false;
-    }
-
-    if (!pullSamplesInternal(buffer.bufferList(), sampleCount, timeStamp, hostTime, mode))
-        return false;
-
-    buffer.setTimes(timeStamp, hostTime);
-    buffer.setSampleCount(sampleCount);
-
-    return true;
-}
-
-} // namespace WebCore
-
-#endif // ENABLE(MEDIA_STREAM)

Copied: trunk/Source/WebCore/platform/audio/mac/AudioSampleDataSource.mm (from rev 212726, trunk/Source/WebCore/platform/audio/mac/AudioSampleDataSource.cpp) (0 => 212727)


--- trunk/Source/WebCore/platform/audio/mac/AudioSampleDataSource.mm	                        (rev 0)
+++ trunk/Source/WebCore/platform/audio/mac/AudioSampleDataSource.mm	2017-02-21 18:45:10 UTC (rev 212727)
@@ -0,0 +1,365 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AudioSampleDataSource.h"
+
+#if ENABLE(MEDIA_STREAM)
+
+#include "CAAudioStreamDescription.h"
+#include "CARingBuffer.h"
+#include "Logging.h"
+#include "MediaTimeAVFoundation.h"
+#include <AudioToolbox/AudioConverter.h>
+#include <mach/mach.h>
+#include <mach/mach_time.h>
+#include <mutex>
+#include <syslog.h>
+#include <wtf/CurrentTime.h>
+#include <wtf/StringPrintStream.h>
+
+#include "CoreMediaSoftLink.h"
+
+namespace WebCore {
+
+using namespace JSC;
+
+Ref<AudioSampleDataSource> AudioSampleDataSource::create(size_t maximumSampleCount)
+{
+    return adoptRef(*new AudioSampleDataSource(maximumSampleCount));
+}
+
+AudioSampleDataSource::AudioSampleDataSource(size_t maximumSampleCount)
+    : m_inputSampleOffset(MediaTime::invalidTime())
+    , m_maximumSampleCount(maximumSampleCount)
+{
+}
+
+AudioSampleDataSource::~AudioSampleDataSource()
+{
+    m_inputDescription = nullptr;
+    m_outputDescription = nullptr;
+    m_ringBuffer = nullptr;
+    if (m_converter) {
+        AudioConverterDispose(m_converter);
+        m_converter = nullptr;
+    }
+}
+
+void AudioSampleDataSource::setPaused(bool paused)
+{
+    std::lock_guard<Lock> lock(m_lock);
+
+    if (paused == m_paused)
+        return;
+
+    m_transitioningFromPaused = m_paused;
+    m_paused = paused;
+}
+
+OSStatus AudioSampleDataSource::setupConverter()
+{
+    ASSERT(m_inputDescription && m_outputDescription);
+
+    if (m_converter) {
+        AudioConverterDispose(m_converter);
+        m_converter = nullptr;
+    }
+
+    if (*m_inputDescription == *m_outputDescription)
+        return 0;
+
+    OSStatus err = AudioConverterNew(&m_inputDescription->streamDescription(), &m_outputDescription->streamDescription(), &m_converter);
+    if (err)
+        LOG_ERROR("AudioSampleDataSource::setupConverter(%p) - AudioConverterNew returned error %d (%.4s)", this, (int)err, (char*)&err);
+
+    return err;
+
+}
+
+OSStatus AudioSampleDataSource::setInputFormat(const CAAudioStreamDescription& format)
+{
+    ASSERT(format.sampleRate() >= 0);
+
+    m_inputDescription = std::make_unique<CAAudioStreamDescription>(format);
+    if (m_outputDescription)
+        return setupConverter();
+
+    return 0;
+}
+
+OSStatus AudioSampleDataSource::setOutputFormat(const CAAudioStreamDescription& format)
+{
+    ASSERT(m_inputDescription);
+    ASSERT(format.sampleRate() >= 0);
+
+    m_outputDescription = std::make_unique<CAAudioStreamDescription>(format);
+    if (!m_ringBuffer)
+        m_ringBuffer = std::make_unique<CARingBuffer>();
+
+    m_ringBuffer->allocate(format, static_cast<size_t>(m_maximumSampleCount));
+    m_scratchBuffer = AudioSampleBufferList::create(m_outputDescription->streamDescription(), m_maximumSampleCount);
+
+    return setupConverter();
+}
+
+MediaTime AudioSampleDataSource::hostTime() const
+{
+    // Based on listing #2 from Apple Technical Q&A QA1398, modified to be thread-safe.
+    static double frequency;
+    static mach_timebase_info_data_t timebaseInfo;
+    static std::once_flag initializeTimerOnceFlag;
+    std::call_once(initializeTimerOnceFlag, [] {
+        kern_return_t kr = mach_timebase_info(&timebaseInfo);
+        frequency = 1e-9 * static_cast<double>(timebaseInfo.numer) / static_cast<double>(timebaseInfo.denom);
+        ASSERT_UNUSED(kr, kr == KERN_SUCCESS);
+        ASSERT(timebaseInfo.denom);
+    });
+
+    return MediaTime::createWithDouble(mach_absolute_time() * frequency);
+}
+
+void AudioSampleDataSource::pushSamplesInternal(const AudioBufferList& bufferList, const MediaTime& presentationTime, size_t sampleCount)
+{
+    ASSERT(m_lock.isHeld());
+
+    const AudioBufferList* sampleBufferList;
+    if (m_converter) {
+        m_scratchBuffer->reset();
+        OSStatus err = m_scratchBuffer->copyFrom(bufferList, m_converter);
+        if (err)
+            return;
+
+        sampleBufferList = m_scratchBuffer->bufferList().list();
+    } else
+        sampleBufferList = &bufferList;
+
+    MediaTime sampleTime = presentationTime;
+    if (m_inputSampleOffset == MediaTime::invalidTime()) {
+        m_inputSampleOffset = MediaTime(1 - sampleTime.timeValue(), sampleTime.timeScale());
+        if (m_inputSampleOffset.timeScale() != sampleTime.timeScale()) {
+            // FIXME: It should be possible to do this without calling CMTimeConvertScale.
+            m_inputSampleOffset = toMediaTime(CMTimeConvertScale(toCMTime(m_inputSampleOffset), sampleTime.timeScale(), kCMTimeRoundingMethod_Default));
+        }
+        LOG(MediaCaptureSamples, "@@ pushSamples: input sample offset is %lld, m_maximumSampleCount = %zu", m_inputSampleOffset.timeValue(), m_maximumSampleCount);
+    }
+    sampleTime += m_inputSampleOffset;
+
+#if !LOG_DISABLED
+    uint64_t startFrame1 = 0;
+    uint64_t endFrame1 = 0;
+    m_ringBuffer->getCurrentFrameBounds(startFrame1, endFrame1);
+#endif
+
+    m_ringBuffer->store(sampleBufferList, sampleCount, sampleTime.timeValue());
+    m_timeStamp = sampleTime.timeValue();
+
+
+#if !LOG_DISABLED
+    uint64_t startFrame2 = 0;
+    uint64_t endFrame2 = 0;
+    m_ringBuffer->getCurrentFrameBounds(startFrame2, endFrame2);
+    dispatch_async(dispatch_get_main_queue(), [sampleCount, sampleTime, presentationTime, absoluteTime = mach_absolute_time(), startFrame1, endFrame1, startFrame2, endFrame2] {
+        LOG(MediaCaptureSamples, "@@ pushSamples: added %ld samples for time = %s (was %s), mach time = %lld", sampleCount, toString(sampleTime).utf8().data(), toString(presentationTime).utf8().data(), absoluteTime);
+        LOG(MediaCaptureSamples, "@@ pushSamples: buffered range was [%lld .. %lld], is [%lld .. %lld]", startFrame1, endFrame1, startFrame2, endFrame2);
+    });
+#endif
+}
+
+void AudioSampleDataSource::pushSamples(const AudioStreamBasicDescription& sampleDescription, CMSampleBufferRef sampleBuffer)
+{
+    std::lock_guard<Lock> lock(m_lock);
+
+    ASSERT_UNUSED(sampleDescription, *m_inputDescription == sampleDescription);
+    ASSERT(m_ringBuffer);
+    
+    WebAudioBufferList list(*m_inputDescription, sampleBuffer);
+    pushSamplesInternal(list, toMediaTime(CMSampleBufferGetPresentationTimeStamp(sampleBuffer)), CMSampleBufferGetNumSamples(sampleBuffer));
+}
+
+void AudioSampleDataSource::pushSamples(const MediaTime& sampleTime, const PlatformAudioData& audioData, size_t sampleCount)
+{
+    std::unique_lock<Lock> lock(m_lock, std::try_to_lock);
+    ASSERT(is<WebAudioBufferList>(audioData));
+    pushSamplesInternal(*downcast<WebAudioBufferList>(audioData).list(), sampleTime, sampleCount);
+}
+
+bool AudioSampleDataSource::pullSamplesInternal(AudioBufferList& buffer, size_t& sampleCount, uint64_t timeStamp, double /*hostTime*/, PullMode mode)
+{
+    ASSERT(m_lock.isHeld());
+    size_t byteCount = sampleCount * m_outputDescription->bytesPerFrame();
+
+    ASSERT(buffer.mNumberBuffers == m_ringBuffer->channelCount());
+    if (buffer.mNumberBuffers != m_ringBuffer->channelCount()) {
+        AudioSampleBufferList::zeroABL(buffer, byteCount);
+        sampleCount = 0;
+        return false;
+    }
+
+    if (!m_ringBuffer || m_muted || m_inputSampleOffset == MediaTime::invalidTime()) {
+        AudioSampleBufferList::zeroABL(buffer, byteCount);
+        sampleCount = 0;
+        return false;
+    }
+
+    uint64_t startFrame = 0;
+    uint64_t endFrame = 0;
+    m_ringBuffer->getCurrentFrameBounds(startFrame, endFrame);
+
+    if (m_transitioningFromPaused) {
+        uint64_t buffered = endFrame - m_timeStamp;
+        if (buffered < sampleCount * 2) {
+            AudioSampleBufferList::zeroABL(buffer, byteCount);
+            sampleCount = 0;
+            return false;
+        }
+
+        const double twentyMS = .02;
+        const double tenMS = .01;
+        const double fiveMS = .005;
+        double sampleRate = m_outputDescription->sampleRate();
+        m_outputSampleOffset = timeStamp + m_timeStamp;
+        if (buffered > sampleRate * twentyMS)
+            m_outputSampleOffset -= sampleRate * twentyMS;
+        else if (buffered > sampleRate * tenMS)
+            m_outputSampleOffset -= sampleRate * tenMS;
+        else if (buffered > sampleRate * fiveMS)
+            m_outputSampleOffset -= sampleRate * fiveMS;
+
+        m_transitioningFromPaused = false;
+    }
+
+    timeStamp += m_outputSampleOffset;
+
+#if !LOG_DISABLED
+    dispatch_async(dispatch_get_main_queue(), [sampleCount, timeStamp, sampleOffset = m_outputSampleOffset] {
+        LOG(MediaCaptureSamples, "** pullSamples: asking for %ld samples at time = %lld (was %lld)", sampleCount, timeStamp, timeStamp - sampleOffset);
+    });
+#endif
+
+    uint64_t framesAvailable = sampleCount;
+    if (timeStamp < startFrame || timeStamp + sampleCount > endFrame) {
+        if (timeStamp + sampleCount < startFrame || timeStamp > endFrame)
+            framesAvailable = 0;
+        else if (timeStamp < startFrame)
+            framesAvailable = timeStamp + sampleCount - startFrame;
+        else
+            framesAvailable = timeStamp + sampleCount - endFrame;
+
+#if !LOG_DISABLED
+        dispatch_async(dispatch_get_main_queue(), [timeStamp, startFrame, endFrame, framesAvailable] {
+            LOG(MediaCaptureSamples, "** pullSamplesInternal: sample %lld is not completely in range [%lld .. %lld], returning %lld frames", timeStamp, startFrame, endFrame, framesAvailable);
+        });
+#endif
+
+        if (framesAvailable < sampleCount) {
+            const double twentyMS = .02;
+            double sampleRate = m_outputDescription->sampleRate();
+            auto delta = static_cast<int64_t>(timeStamp) - endFrame;
+            if (delta > 0 && delta < sampleRate * twentyMS)
+                m_outputSampleOffset -= delta;
+        }
+
+        if (!framesAvailable) {
+            AudioSampleBufferList::zeroABL(buffer, byteCount);
+            return false;
+        }
+    }
+
+    if (m_volume >= .95) {
+        m_ringBuffer->fetch(&buffer, sampleCount, timeStamp, mode == Copy ? CARingBuffer::Copy : CARingBuffer::Mix);
+        return true;
+    }
+
+    if (m_scratchBuffer->copyFrom(*m_ringBuffer.get(), sampleCount, timeStamp, mode == Copy ? CARingBuffer::Copy : CARingBuffer::Mix)) {
+        AudioSampleBufferList::zeroABL(buffer, sampleCount);
+        return false;
+    }
+
+    m_scratchBuffer->applyGain(m_volume);
+    if (m_scratchBuffer->copyTo(buffer, sampleCount))
+        AudioSampleBufferList::zeroABL(buffer, byteCount);
+
+    return true;
+}
+
+bool AudioSampleDataSource::pullAvalaibleSamplesAsChunks(AudioBufferList& buffer, size_t sampleCountPerChunk, uint64_t timeStamp, Function<void()>&& consumeFilledBuffer)
+{
+    std::unique_lock<Lock> lock(m_lock, std::try_to_lock);
+    if (!lock.owns_lock() || !m_ringBuffer)
+        return false;
+
+    ASSERT(buffer.mNumberBuffers == m_ringBuffer->channelCount());
+    if (buffer.mNumberBuffers != m_ringBuffer->channelCount())
+        return false;
+
+    uint64_t startFrame = 0;
+    uint64_t endFrame = 0;
+    m_ringBuffer->getCurrentFrameBounds(startFrame, endFrame);
+    if (timeStamp < startFrame)
+        return false;
+
+    startFrame = timeStamp;
+    while (endFrame - startFrame >= sampleCountPerChunk) {
+        if (m_ringBuffer->fetch(&buffer, sampleCountPerChunk, startFrame, CARingBuffer::Copy))
+            return false;
+        consumeFilledBuffer();
+        startFrame += sampleCountPerChunk;
+    }
+    return true;
+}
+
+bool AudioSampleDataSource::pullSamples(AudioBufferList& buffer, size_t sampleCount, uint64_t timeStamp, double hostTime, PullMode mode)
+{
+    std::unique_lock<Lock> lock(m_lock, std::try_to_lock);
+    if (!lock.owns_lock() || !m_ringBuffer) {
+        size_t byteCount = sampleCount * m_outputDescription->bytesPerFrame();
+        AudioSampleBufferList::zeroABL(buffer, byteCount);
+        return false;
+    }
+
+    return pullSamplesInternal(buffer, sampleCount, timeStamp, hostTime, mode);
+}
+
+bool AudioSampleDataSource::pullSamples(AudioSampleBufferList& buffer, size_t sampleCount, uint64_t timeStamp, double hostTime, PullMode mode)
+{
+    std::unique_lock<Lock> lock(m_lock, std::try_to_lock);
+    if (!lock.owns_lock() || !m_ringBuffer) {
+        buffer.zero();
+        return false;
+    }
+
+    if (!pullSamplesInternal(buffer.bufferList(), sampleCount, timeStamp, hostTime, mode))
+        return false;
+
+    buffer.setTimes(timeStamp, hostTime);
+    buffer.setSampleCount(sampleCount);
+
+    return true;
+}
+
+} // namespace WebCore
+
+#endif // ENABLE(MEDIA_STREAM)
_______________________________________________
webkit-changes mailing list
webkit-changes@lists.webkit.org
https://lists.webkit.org/mailman/listinfo/webkit-changes

Reply via email to