Title: [265627] branches/safari-610.1-branch/Source/WebCore

Diff

Modified: branches/safari-610.1-branch/Source/WebCore/ChangeLog (265626 => 265627)


--- branches/safari-610.1-branch/Source/WebCore/ChangeLog	2020-08-13 22:19:15 UTC (rev 265626)
+++ branches/safari-610.1-branch/Source/WebCore/ChangeLog	2020-08-13 22:19:59 UTC (rev 265627)
@@ -1,55 +1,5 @@
 2020-08-12  Alan Coon  <[email protected]>
 
-        Cherry-pick r265280. rdar://problem/66945503
-
-    Update AudioSampleDataSource offset computation
-    https://bugs.webkit.org/show_bug.cgi?id=215127
-    <rdar://problem/65938265>
-    
-    Reviewed by Eric Carlson.
-    
-    As per logs, it sometimes happens that the offset is so big that the timestamp is below the start of the window.
-    In that case, our logic is not able to catch up and reduce the offset.
-    To handle this, we special case if the timestamp is below the start frame and do as if we were starting from scratch.
-    Otherwise, we continue our logic to fine tune the offset by slowly making it bigger to not hit the end of the window but still be close to it.
-    Updated logging to help further debugging this issue if needed.
-    
-    * platform/audio/mac/AudioSampleDataSource.h:
-    * platform/audio/mac/AudioSampleDataSource.mm:
-    (WebCore::AudioSampleDataSource::pushSamplesInternal):
-    (WebCore::computeOffsetDelay):
-    (WebCore::AudioSampleDataSource::pullSamplesInternal):
-    (WebCore::AudioSampleDataSource::pullAvalaibleSamplesAsChunks):
-    * platform/mediastream/mac/RealtimeIncomingAudioSourceCocoa.cpp:
-    (WebCore::RealtimeIncomingAudioSourceCocoa::OnData):
-    
-    git-svn-id: https://svn.webkit.org/repository/webkit/trunk@265280 268f45cc-cd09-0410-ab3c-d52691b4dbfc
-
-    2020-08-05  Youenn Fablet  <[email protected]>
-
-            Update AudioSampleDataSource offset computation
-            https://bugs.webkit.org/show_bug.cgi?id=215127
-            <rdar://problem/65938265>
-
-            Reviewed by Eric Carlson.
-
-            As per logs, it sometimes happens that the offset is so big that the timestamp is below the start of the window.
-            In that case, our logic is not able to catch up and reduce the offset.
-            To handle this, we special case if the timestamp is below the start frame and do as if we were starting from scratch.
-            Otherwise, we continue our logic to fine tune the offset by slowly making it bigger to not hit the end of the window but still be close to it.
-            Updated logging to help further debugging this issue if needed.
-
-            * platform/audio/mac/AudioSampleDataSource.h:
-            * platform/audio/mac/AudioSampleDataSource.mm:
-            (WebCore::AudioSampleDataSource::pushSamplesInternal):
-            (WebCore::computeOffsetDelay):
-            (WebCore::AudioSampleDataSource::pullSamplesInternal):
-            (WebCore::AudioSampleDataSource::pullAvalaibleSamplesAsChunks):
-            * platform/mediastream/mac/RealtimeIncomingAudioSourceCocoa.cpp:
-            (WebCore::RealtimeIncomingAudioSourceCocoa::OnData):
-
-2020-08-12  Alan Coon  <[email protected]>
-
         Cherry-pick r265480. rdar://problem/66943866
 
     AuxiliaryProcess::didReceiveInvalidMessage() for WebPage::PerformDragControllerAction IPC

Modified: branches/safari-610.1-branch/Source/WebCore/platform/audio/mac/AudioSampleDataSource.h (265626 => 265627)


--- branches/safari-610.1-branch/Source/WebCore/platform/audio/mac/AudioSampleDataSource.h	2020-08-13 22:19:15 UTC (rev 265626)
+++ branches/safari-610.1-branch/Source/WebCore/platform/audio/mac/AudioSampleDataSource.h	2020-08-13 22:19:59 UTC (rev 265627)
@@ -116,7 +116,7 @@
     float m_volume { 1.0 };
     bool m_muted { false };
     bool m_paused { true };
-    bool m_shouldComputeOutputSampleOffset { true };
+    bool m_transitioningFromPaused { true };
 
 #if !RELEASE_LOG_DISABLED
     Ref<const Logger> m_logger;

Modified: branches/safari-610.1-branch/Source/WebCore/platform/audio/mac/AudioSampleDataSource.mm (265626 => 265627)


--- branches/safari-610.1-branch/Source/WebCore/platform/audio/mac/AudioSampleDataSource.mm	2020-08-13 22:19:15 UTC (rev 265626)
+++ branches/safari-610.1-branch/Source/WebCore/platform/audio/mac/AudioSampleDataSource.mm	2020-08-13 22:19:59 UTC (rev 265627)
@@ -172,8 +172,8 @@
 
     if (m_inputSampleOffset == MediaTime::invalidTime()) {
         m_inputSampleOffset = MediaTime(1 - sampleTime.timeValue(), sampleTime.timeScale());
-        dispatch_async(dispatch_get_main_queue(), [logIdentifier = LOGIDENTIFIER, inputSampleOffset = m_inputSampleOffset.timeValue(), maximumSampleCount = m_maximumSampleCount, this, protectedThis = makeRefPtr(*this)] {
-            ALWAYS_LOG(logIdentifier, "input sample offset is ", inputSampleOffset, ", maximumSampleCount is ", maximumSampleCount);
+        dispatch_async(dispatch_get_main_queue(), [inputSampleOffset = m_inputSampleOffset.timeValue(), maximumSampleCount = m_maximumSampleCount, this, protectedThis = makeRefPtr(*this)] {
+            ERROR_LOG("pushSamples: input sample offset is ", inputSampleOffset, ", maximumSampleCount = ", maximumSampleCount);
         });
     }
     sampleTime += m_inputSampleOffset;
@@ -186,6 +186,16 @@
 
     m_ringBuffer->store(sampleBufferList, sampleCount, sampleTime.timeValue());
     m_lastPushedSampleCount = sampleCount;
+
+#if !LOG_DISABLED
+    uint64_t startFrame2 = 0;
+    uint64_t endFrame2 = 0;
+    m_ringBuffer->getCurrentFrameBounds(startFrame2, endFrame2);
+    dispatch_async(dispatch_get_main_queue(), [sampleCount, sampleTime, presentationTime, absoluteTime = mach_absolute_time(), startFrame1, endFrame1, startFrame2, endFrame2] {
+        LOG(MediaCaptureSamples, "@@ pushSamples: added %ld samples for time = %s (was %s), mach time = %lld", sampleCount, toString(sampleTime).utf8().data(), toString(presentationTime).utf8().data(), absoluteTime);
+        LOG(MediaCaptureSamples, "@@ pushSamples: buffered range was [%lld .. %lld], is [%lld .. %lld]", startFrame1, endFrame1, startFrame2, endFrame2);
+    });
+#endif
 }
 
 void AudioSampleDataSource::pushSamples(const AudioStreamBasicDescription& sampleDescription, CMSampleBufferRef sampleBuffer)
@@ -203,21 +213,6 @@
     pushSamplesInternal(*downcast<WebAudioBufferList>(audioData).list(), sampleTime, sampleCount);
 }
 
-static inline int64_t computeOffsetDelay(double sampleRate, uint64_t lastPushedSampleCount)
-{
-    const double twentyMS = .02;
-    const double tenMS = .01;
-    const double fiveMS = .005;
-
-    if (lastPushedSampleCount > sampleRate * twentyMS)
-        return sampleRate * twentyMS;
-    if (lastPushedSampleCount > sampleRate * tenMS)
-        return sampleRate * tenMS;
-    if (lastPushedSampleCount > sampleRate * fiveMS)
-        return sampleRate * fiveMS;
-    return 0;
-}
-
 bool AudioSampleDataSource::pullSamplesInternal(AudioBufferList& buffer, size_t& sampleCount, uint64_t timeStamp, double /*hostTime*/, PullMode mode)
 {
     size_t byteCount = sampleCount * m_outputDescription->bytesPerFrame();
@@ -239,7 +234,7 @@
     uint64_t endFrame = 0;
     m_ringBuffer->getCurrentFrameBounds(startFrame, endFrame);
 
-    if (m_shouldComputeOutputSampleOffset) {
+    if (m_transitioningFromPaused) {
         uint64_t buffered = endFrame - startFrame;
         if (buffered < sampleCount * 2) {
             AudioSampleBufferList::zeroABL(buffer, byteCount);
@@ -247,35 +242,51 @@
             return false;
         }
 
-        m_shouldComputeOutputSampleOffset = false;
+        const double twentyMS = .02;
+        const double tenMS = .01;
+        const double fiveMS = .005;
+        double sampleRate = m_outputDescription->sampleRate();
+        m_outputSampleOffset = (endFrame - sampleCount) - timeStamp;
+        if (m_lastPushedSampleCount > sampleRate * twentyMS)
+            m_outputSampleOffset -= sampleRate * twentyMS;
+        else if (m_lastPushedSampleCount > sampleRate * tenMS)
+            m_outputSampleOffset -= sampleRate * tenMS;
+        else if (m_lastPushedSampleCount > sampleRate * fiveMS)
+            m_outputSampleOffset -= sampleRate * fiveMS;
 
-        m_outputSampleOffset = (endFrame - sampleCount) - timeStamp;
-        m_outputSampleOffset -= computeOffsetDelay(m_outputDescription->sampleRate(), m_lastPushedSampleCount);
-        dispatch_async(dispatch_get_main_queue(), [logIdentifier = LOGIDENTIFIER, outputSampleOffset = m_outputSampleOffset, this, protectedThis = makeRefPtr(*this)] {
-            ALWAYS_LOG(logIdentifier, "setting new offset to ", outputSampleOffset);
-        });
+        m_transitioningFromPaused = false;
     }
 
     timeStamp += m_outputSampleOffset;
 
+#if !LOG_DISABLED
+    dispatch_async(dispatch_get_main_queue(), [sampleCount, timeStamp, sampleOffset = m_outputSampleOffset] {
+        LOG(MediaCaptureSamples, "** pullSamplesInternal: asking for %ld samples at time = %lld (was %lld)", sampleCount, timeStamp, timeStamp - sampleOffset);
+    });
+#endif
+
+    uint64_t framesAvailable = sampleCount;
     if (timeStamp < startFrame || timeStamp + sampleCount > endFrame) {
-        dispatch_async(dispatch_get_main_queue(), [logIdentifier = LOGIDENTIFIER, timeStamp, startFrame, endFrame, sampleCount, outputSampleOffset = m_outputSampleOffset, this, protectedThis = makeRefPtr(*this)] {
-            ERROR_LOG(logIdentifier, "not enough data, sample ", timeStamp, " with offset ", outputSampleOffset, ", trying to get ", sampleCount, " samples, but not completely in range [", startFrame, " .. ", endFrame, "]");
+        if (timeStamp + sampleCount < startFrame || timeStamp >= endFrame)
+            framesAvailable = 0;
+        else if (timeStamp < startFrame)
+            framesAvailable = timeStamp + sampleCount - startFrame;
+        else
+            framesAvailable = timeStamp + sampleCount - endFrame;
+
+#if !RELEASE_LOG_DISABLED
+        dispatch_async(dispatch_get_main_queue(), [timeStamp, startFrame, endFrame, framesAvailable, sampleCount, this, protectedThis = makeRefPtr(*this)] {
+            ALWAYS_LOG("sample ", timeStamp, " is not completely in range [", startFrame, " .. ", endFrame, "], returning ", framesAvailable, " frames");
+            if (framesAvailable < sampleCount)
+                ERROR_LOG("not enough data available, returning zeroes");
         });
+#endif
 
-        if (timeStamp < startFrame || timeStamp >= endFrame) {
-            // We are out of the window, let's restart the offset computation.
-            m_shouldComputeOutputSampleOffset = true;
-        } else {
-            // We are too close from endFrame, let's back up a little bit.
-            uint64_t framesAvailable = endFrame - timeStamp;
+        if (framesAvailable < sampleCount) {
             m_outputSampleOffset -= sampleCount - framesAvailable;
-            dispatch_async(dispatch_get_main_queue(), [logIdentifier = LOGIDENTIFIER, outputSampleOffset = m_outputSampleOffset, this, protectedThis = makeRefPtr(*this)] {
-                ALWAYS_LOG(logIdentifier, "updating offset to ", outputSampleOffset);
-            });
+            AudioSampleBufferList::zeroABL(buffer, byteCount);
+            return false;
         }
-        AudioSampleBufferList::zeroABL(buffer, byteCount);
-        return false;
     }
 
     if (mode == Copy) {
@@ -313,9 +324,9 @@
     uint64_t startFrame = 0;
     uint64_t endFrame = 0;
     m_ringBuffer->getCurrentFrameBounds(startFrame, endFrame);
-    if (m_shouldComputeOutputSampleOffset) {
+    if (m_transitioningFromPaused) {
         m_outputSampleOffset = timeStamp + (endFrame - sampleCountPerChunk);
-        m_shouldComputeOutputSampleOffset = false;
+        m_transitioningFromPaused = false;
     }
 
     timeStamp += m_outputSampleOffset;

Modified: branches/safari-610.1-branch/Source/WebCore/platform/mediastream/mac/RealtimeIncomingAudioSourceCocoa.cpp (265626 => 265627)


--- branches/safari-610.1-branch/Source/WebCore/platform/mediastream/mac/RealtimeIncomingAudioSourceCocoa.cpp	2020-08-13 22:19:15 UTC (rev 265626)
+++ branches/safari-610.1-branch/Source/WebCore/platform/mediastream/mac/RealtimeIncomingAudioSourceCocoa.cpp	2020-08-13 22:19:59 UTC (rev 265627)
@@ -75,6 +75,10 @@
     }
 #endif
 
+    CMTime startTime = CMTimeMake(m_numberOfFrames, sampleRate);
+    auto mediaTime = PAL::toMediaTime(startTime);
+    m_numberOfFrames += numberOfFrames;
+
     if (!m_audioBufferList || m_sampleRate != sampleRate || m_numberOfChannels != numberOfChannels) {
         callOnMainThread([identifier = LOGIDENTIFIER, this, protectedThis = makeRef(*this), sampleRate, numberOfChannels] {
             ALWAYS_LOG_IF(loggerPtr(), identifier, "new audio buffer list for sampleRate ", sampleRate, " and ", numberOfChannels, " channel(s)");
@@ -84,16 +88,8 @@
         m_numberOfChannels = numberOfChannels;
         m_streamDescription = streamDescription(sampleRate, numberOfChannels);
         m_audioBufferList = makeUnique<WebAudioBufferList>(m_streamDescription);
-        if (m_sampleRate && m_numberOfFrames)
-            m_numberOfFrames = m_numberOfFrames * sampleRate / m_sampleRate;
-        else
-            m_numberOfFrames = 0;
     }
 
-    CMTime startTime = CMTimeMake(m_numberOfFrames, sampleRate);
-    auto mediaTime = PAL::toMediaTime(startTime);
-    m_numberOfFrames += numberOfFrames;
-
     auto& bufferList = *m_audioBufferList->buffer(0);
     bufferList.mDataByteSize = numberOfChannels * numberOfFrames * bitsPerSample / 8;
     bufferList.mNumberChannels = numberOfChannels;
_______________________________________________
webkit-changes mailing list
[email protected]
https://lists.webkit.org/mailman/listinfo/webkit-changes

Reply via email to