Title: [265629] branches/safari-610.1-branch/Source/WebCore
Revision
265629
Author
[email protected]
Date
2020-08-13 15:20:04 -0700 (Thu, 13 Aug 2020)

Log Message

Cherry-pick r265280. rdar://problem/66945503

    Update AudioSampleDataSource offset computation
    https://bugs.webkit.org/show_bug.cgi?id=215127
    <rdar://problem/65938265>

    Reviewed by Eric Carlson.

    As per logs, it sometimes happens that the offset is so big that the timestamp is below the start of the window.
    In that case, our logic is not able to catch up and reduce the offset.
    To handle this, we special case if the timestamp is below the start frame and do as if we were starting from scratch.
    Otherwise, we continue our logic to fine tune the offset by slowly making it bigger to not hit the end of the window but still be close to it.
    Updated logging to help further debugging this issue if needed.

    * platform/audio/mac/AudioSampleDataSource.h:
    * platform/audio/mac/AudioSampleDataSource.mm:
    (WebCore::AudioSampleDataSource::pushSamplesInternal):
    (WebCore::computeOffsetDelay):
    (WebCore::AudioSampleDataSource::pullSamplesInternal):
    (WebCore::AudioSampleDataSource::pullAvalaibleSamplesAsChunks):
    * platform/mediastream/mac/RealtimeIncomingAudioSourceCocoa.cpp:
    (WebCore::RealtimeIncomingAudioSourceCocoa::OnData):

    git-svn-id: https://svn.webkit.org/repository/webkit/trunk@265280 268f45cc-cd09-0410-ab3c-d52691b4dbfc

Modified Paths

Diff

Modified: branches/safari-610.1-branch/Source/WebCore/ChangeLog (265628 => 265629)


--- branches/safari-610.1-branch/Source/WebCore/ChangeLog	2020-08-13 22:20:02 UTC (rev 265628)
+++ branches/safari-610.1-branch/Source/WebCore/ChangeLog	2020-08-13 22:20:04 UTC (rev 265629)
@@ -1,5 +1,56 @@
 2020-08-13  Russell Epstein  <[email protected]>
 
+        Cherry-pick r265280. rdar://problem/66945503
+
+    Update AudioSampleDataSource offset computation
+    https://bugs.webkit.org/show_bug.cgi?id=215127
+    <rdar://problem/65938265>
+    
+    Reviewed by Eric Carlson.
+    
+    As per logs, it sometimes happens that the offset is so big that the timestamp is below the start of the window.
+    In that case, our logic is not able to catch up and reduce the offset.
+    To handle this, we special case if the timestamp is below the start frame and do as if we were starting from scratch.
+    Otherwise, we continue our logic to fine tune the offset by slowly making it bigger to not hit the end of the window but still be close to it.
+    Updated logging to help further debugging this issue if needed.
+    
+    * platform/audio/mac/AudioSampleDataSource.h:
+    * platform/audio/mac/AudioSampleDataSource.mm:
+    (WebCore::AudioSampleDataSource::pushSamplesInternal):
+    (WebCore::computeOffsetDelay):
+    (WebCore::AudioSampleDataSource::pullSamplesInternal):
+    (WebCore::AudioSampleDataSource::pullAvalaibleSamplesAsChunks):
+    * platform/mediastream/mac/RealtimeIncomingAudioSourceCocoa.cpp:
+    (WebCore::RealtimeIncomingAudioSourceCocoa::OnData):
+    
+    
+    git-svn-id: https://svn.webkit.org/repository/webkit/trunk@265280 268f45cc-cd09-0410-ab3c-d52691b4dbfc
+
+    2020-08-05  Youenn Fablet  <[email protected]>
+
+            Update AudioSampleDataSource offset computation
+            https://bugs.webkit.org/show_bug.cgi?id=215127
+            <rdar://problem/65938265>
+
+            Reviewed by Eric Carlson.
+
+            As per logs, it sometimes happens that the offset is so big that the timestamp is below the start of the window.
+            In that case, our logic is not able to catch up and reduce the offset.
+            To handle this, we special case if the timestamp is below the start frame and do as if we were starting from scratch.
+            Otherwise, we continue our logic to fine tune the offset by slowly making it bigger to not hit the end of the window but still be close to it.
+            Updated logging to help further debugging this issue if needed.
+
+            * platform/audio/mac/AudioSampleDataSource.h:
+            * platform/audio/mac/AudioSampleDataSource.mm:
+            (WebCore::AudioSampleDataSource::pushSamplesInternal):
+            (WebCore::computeOffsetDelay):
+            (WebCore::AudioSampleDataSource::pullSamplesInternal):
+            (WebCore::AudioSampleDataSource::pullAvalaibleSamplesAsChunks):
+            * platform/mediastream/mac/RealtimeIncomingAudioSourceCocoa.cpp:
+            (WebCore::RealtimeIncomingAudioSourceCocoa::OnData):
+
+2020-08-13  Russell Epstein  <[email protected]>
+
         Cherry-pick r265244. rdar://problem/66945503
 
     Remove AudioSampleDataSource::setPaused

Modified: branches/safari-610.1-branch/Source/WebCore/platform/audio/mac/AudioSampleDataSource.h (265628 => 265629)


--- branches/safari-610.1-branch/Source/WebCore/platform/audio/mac/AudioSampleDataSource.h	2020-08-13 22:20:02 UTC (rev 265628)
+++ branches/safari-610.1-branch/Source/WebCore/platform/audio/mac/AudioSampleDataSource.h	2020-08-13 22:20:04 UTC (rev 265629)
@@ -113,7 +113,7 @@
 
     float m_volume { 1.0 };
     bool m_muted { false };
-    bool m_transitioningFromPaused { true };
+    bool m_shouldComputeOutputSampleOffset { true };
 
 #if !RELEASE_LOG_DISABLED
     Ref<const Logger> m_logger;

Modified: branches/safari-610.1-branch/Source/WebCore/platform/audio/mac/AudioSampleDataSource.mm (265628 => 265629)


--- branches/safari-610.1-branch/Source/WebCore/platform/audio/mac/AudioSampleDataSource.mm	2020-08-13 22:20:02 UTC (rev 265628)
+++ branches/safari-610.1-branch/Source/WebCore/platform/audio/mac/AudioSampleDataSource.mm	2020-08-13 22:20:04 UTC (rev 265629)
@@ -163,8 +163,8 @@
 
     if (m_inputSampleOffset == MediaTime::invalidTime()) {
         m_inputSampleOffset = MediaTime(1 - sampleTime.timeValue(), sampleTime.timeScale());
-        dispatch_async(dispatch_get_main_queue(), [inputSampleOffset = m_inputSampleOffset.timeValue(), maximumSampleCount = m_maximumSampleCount, this, protectedThis = makeRefPtr(*this)] {
-            ERROR_LOG("pushSamples: input sample offset is ", inputSampleOffset, ", maximumSampleCount = ", maximumSampleCount);
+        dispatch_async(dispatch_get_main_queue(), [logIdentifier = LOGIDENTIFIER, inputSampleOffset = m_inputSampleOffset.timeValue(), maximumSampleCount = m_maximumSampleCount, this, protectedThis = makeRefPtr(*this)] {
+            ALWAYS_LOG(logIdentifier, "input sample offset is ", inputSampleOffset, ", maximumSampleCount is ", maximumSampleCount);
         });
     }
     sampleTime += m_inputSampleOffset;
@@ -177,16 +177,6 @@
 
     m_ringBuffer->store(sampleBufferList, sampleCount, sampleTime.timeValue());
     m_lastPushedSampleCount = sampleCount;
-
-#if !LOG_DISABLED
-    uint64_t startFrame2 = 0;
-    uint64_t endFrame2 = 0;
-    m_ringBuffer->getCurrentFrameBounds(startFrame2, endFrame2);
-    dispatch_async(dispatch_get_main_queue(), [sampleCount, sampleTime, presentationTime, absoluteTime = mach_absolute_time(), startFrame1, endFrame1, startFrame2, endFrame2] {
-        LOG(MediaCaptureSamples, "@@ pushSamples: added %ld samples for time = %s (was %s), mach time = %lld", sampleCount, toString(sampleTime).utf8().data(), toString(presentationTime).utf8().data(), absoluteTime);
-        LOG(MediaCaptureSamples, "@@ pushSamples: buffered range was [%lld .. %lld], is [%lld .. %lld]", startFrame1, endFrame1, startFrame2, endFrame2);
-    });
-#endif
 }
 
 void AudioSampleDataSource::pushSamples(const AudioStreamBasicDescription& sampleDescription, CMSampleBufferRef sampleBuffer)
@@ -204,6 +194,21 @@
     pushSamplesInternal(*downcast<WebAudioBufferList>(audioData).list(), sampleTime, sampleCount);
 }
 
+static inline int64_t computeOffsetDelay(double sampleRate, uint64_t lastPushedSampleCount)
+{
+    const double twentyMS = .02;
+    const double tenMS = .01;
+    const double fiveMS = .005;
+
+    if (lastPushedSampleCount > sampleRate * twentyMS)
+        return sampleRate * twentyMS;
+    if (lastPushedSampleCount > sampleRate * tenMS)
+        return sampleRate * tenMS;
+    if (lastPushedSampleCount > sampleRate * fiveMS)
+        return sampleRate * fiveMS;
+    return 0;
+}
+
 bool AudioSampleDataSource::pullSamplesInternal(AudioBufferList& buffer, size_t& sampleCount, uint64_t timeStamp, double /*hostTime*/, PullMode mode)
 {
     size_t byteCount = sampleCount * m_outputDescription->bytesPerFrame();
@@ -225,7 +230,7 @@
     uint64_t endFrame = 0;
     m_ringBuffer->getCurrentFrameBounds(startFrame, endFrame);
 
-    if (m_transitioningFromPaused) {
+    if (m_shouldComputeOutputSampleOffset) {
         uint64_t buffered = endFrame - startFrame;
         if (buffered < sampleCount * 2) {
             AudioSampleBufferList::zeroABL(buffer, byteCount);
@@ -233,51 +238,35 @@
             return false;
         }
 
-        const double twentyMS = .02;
-        const double tenMS = .01;
-        const double fiveMS = .005;
-        double sampleRate = m_outputDescription->sampleRate();
+        m_shouldComputeOutputSampleOffset = false;
+
         m_outputSampleOffset = (endFrame - sampleCount) - timeStamp;
-        if (m_lastPushedSampleCount > sampleRate * twentyMS)
-            m_outputSampleOffset -= sampleRate * twentyMS;
-        else if (m_lastPushedSampleCount > sampleRate * tenMS)
-            m_outputSampleOffset -= sampleRate * tenMS;
-        else if (m_lastPushedSampleCount > sampleRate * fiveMS)
-            m_outputSampleOffset -= sampleRate * fiveMS;
-
-        m_transitioningFromPaused = false;
+        m_outputSampleOffset -= computeOffsetDelay(m_outputDescription->sampleRate(), m_lastPushedSampleCount);
+        dispatch_async(dispatch_get_main_queue(), [logIdentifier = LOGIDENTIFIER, outputSampleOffset = m_outputSampleOffset, this, protectedThis = makeRefPtr(*this)] {
+            ALWAYS_LOG(logIdentifier, "setting new offset to ", outputSampleOffset);
+        });
     }
 
     timeStamp += m_outputSampleOffset;
 
-#if !LOG_DISABLED
-    dispatch_async(dispatch_get_main_queue(), [sampleCount, timeStamp, sampleOffset = m_outputSampleOffset] {
-        LOG(MediaCaptureSamples, "** pullSamplesInternal: asking for %ld samples at time = %lld (was %lld)", sampleCount, timeStamp, timeStamp - sampleOffset);
-    });
-#endif
-
-    uint64_t framesAvailable = sampleCount;
     if (timeStamp < startFrame || timeStamp + sampleCount > endFrame) {
-        if (timeStamp + sampleCount < startFrame || timeStamp >= endFrame)
-            framesAvailable = 0;
-        else if (timeStamp < startFrame)
-            framesAvailable = timeStamp + sampleCount - startFrame;
-        else
-            framesAvailable = timeStamp + sampleCount - endFrame;
-
-#if !RELEASE_LOG_DISABLED
-        dispatch_async(dispatch_get_main_queue(), [timeStamp, startFrame, endFrame, framesAvailable, sampleCount, this, protectedThis = makeRefPtr(*this)] {
-            ALWAYS_LOG("sample ", timeStamp, " is not completely in range [", startFrame, " .. ", endFrame, "], returning ", framesAvailable, " frames");
-            if (framesAvailable < sampleCount)
-                ERROR_LOG("not enough data available, returning zeroes");
+        dispatch_async(dispatch_get_main_queue(), [logIdentifier = LOGIDENTIFIER, timeStamp, startFrame, endFrame, sampleCount, outputSampleOffset = m_outputSampleOffset, this, protectedThis = makeRefPtr(*this)] {
+            ERROR_LOG(logIdentifier, "not enough data, sample ", timeStamp, " with offset ", outputSampleOffset, ", trying to get ", sampleCount, " samples, but not completely in range [", startFrame, " .. ", endFrame, "]");
         });
-#endif
 
-        if (framesAvailable < sampleCount) {
+        if (timeStamp < startFrame || timeStamp >= endFrame) {
+            // We are out of the window, let's restart the offset computation.
+            m_shouldComputeOutputSampleOffset = true;
+        } else {
+            // We are too close from endFrame, let's back up a little bit.
+            uint64_t framesAvailable = endFrame - timeStamp;
             m_outputSampleOffset -= sampleCount - framesAvailable;
-            AudioSampleBufferList::zeroABL(buffer, byteCount);
-            return false;
+            dispatch_async(dispatch_get_main_queue(), [logIdentifier = LOGIDENTIFIER, outputSampleOffset = m_outputSampleOffset, this, protectedThis = makeRefPtr(*this)] {
+                ALWAYS_LOG(logIdentifier, "updating offset to ", outputSampleOffset);
+            });
         }
+        AudioSampleBufferList::zeroABL(buffer, byteCount);
+        return false;
     }
 
     if (mode == Copy) {
@@ -315,9 +304,9 @@
     uint64_t startFrame = 0;
     uint64_t endFrame = 0;
     m_ringBuffer->getCurrentFrameBounds(startFrame, endFrame);
-    if (m_transitioningFromPaused) {
+    if (m_shouldComputeOutputSampleOffset) {
         m_outputSampleOffset = timeStamp + (endFrame - sampleCountPerChunk);
-        m_transitioningFromPaused = false;
+        m_shouldComputeOutputSampleOffset = false;
     }
 
     timeStamp += m_outputSampleOffset;

Modified: branches/safari-610.1-branch/Source/WebCore/platform/mediastream/mac/RealtimeIncomingAudioSourceCocoa.cpp (265628 => 265629)


--- branches/safari-610.1-branch/Source/WebCore/platform/mediastream/mac/RealtimeIncomingAudioSourceCocoa.cpp	2020-08-13 22:20:02 UTC (rev 265628)
+++ branches/safari-610.1-branch/Source/WebCore/platform/mediastream/mac/RealtimeIncomingAudioSourceCocoa.cpp	2020-08-13 22:20:04 UTC (rev 265629)
@@ -75,10 +75,6 @@
     }
 #endif
 
-    CMTime startTime = CMTimeMake(m_numberOfFrames, sampleRate);
-    auto mediaTime = PAL::toMediaTime(startTime);
-    m_numberOfFrames += numberOfFrames;
-
     if (!m_audioBufferList || m_sampleRate != sampleRate || m_numberOfChannels != numberOfChannels) {
         callOnMainThread([identifier = LOGIDENTIFIER, this, protectedThis = makeRef(*this), sampleRate, numberOfChannels] {
             ALWAYS_LOG_IF(loggerPtr(), identifier, "new audio buffer list for sampleRate ", sampleRate, " and ", numberOfChannels, " channel(s)");
@@ -88,8 +84,16 @@
         m_numberOfChannels = numberOfChannels;
         m_streamDescription = streamDescription(sampleRate, numberOfChannels);
         m_audioBufferList = makeUnique<WebAudioBufferList>(m_streamDescription);
+        if (m_sampleRate && m_numberOfFrames)
+            m_numberOfFrames = m_numberOfFrames * sampleRate / m_sampleRate;
+        else
+            m_numberOfFrames = 0;
     }
 
+    CMTime startTime = CMTimeMake(m_numberOfFrames, sampleRate);
+    auto mediaTime = PAL::toMediaTime(startTime);
+    m_numberOfFrames += numberOfFrames;
+
     auto& bufferList = *m_audioBufferList->buffer(0);
     bufferList.mDataByteSize = numberOfChannels * numberOfFrames * bitsPerSample / 8;
     bufferList.mNumberChannels = numberOfChannels;
_______________________________________________
webkit-changes mailing list
[email protected]
https://lists.webkit.org/mailman/listinfo/webkit-changes

Reply via email to