Title: [269077] trunk
Revision
269077
Author
[email protected]
Date
2020-10-27 15:21:28 -0700 (Tue, 27 Oct 2020)

Log Message

[Mac] Audio and Video element creation up to 300x slower than other browsers
https://bugs.webkit.org/show_bug.cgi?id=218206
<rdar://problem/62451019>

Reviewed by Eric Carlson.

PerformanceTests:

* Media/AudioElementCreation.html: Added.
* Media/VideoElementCreation.html: Added.

Source/WebCore:

Tests: PerformanceTests/Media/AudioElementCreation.html
       PerformanceTests/Media/VideoElementCreation.html

Currently, a large percent of the element creation code occurrs as a result of adding its
session to PlatformMediaSessionManager, which forces iterating over all extant sessions and
then to set various properties of the audio hardware in response. This patch addresses the
bulk of those expensive calls, but more performance optimizations are available to further
reduce media element creation costs.

When an <audio> element is created, we set the preferred audio output buffer size to a large
value for performance reasons. However, there's no need to repeatedly call into CoreAudio if
the buffer size is already set to that same high value. Store the result of setting the
preferred buffer size, and also add a property change listener to detect other callers
modifying that same value, so that all set operations with identical sizes become no-ops,
and all queries just return cached values.

When any media element is created, the entire list of extant sessions is iterated and
properties on each are queried. Rather than do these inside the same run-loop, use a
TaskQueue to enqueue a task to query the list of created elements during the next run-loop.

Between these two optimization, the runtime cost of creating 1000 audio elements is reduced
(on this engineer's machine) from 2s to 40ms.

* platform/audio/PlatformMediaSessionManager.cpp:
(WebCore::PlatformMediaSessionManager::beginInterruption):
(WebCore::PlatformMediaSessionManager::addSession):
(WebCore::PlatformMediaSessionManager::removeSession):
(WebCore::PlatformMediaSessionManager::sessionStateChanged):
(WebCore::PlatformMediaSessionManager::forEachDocumentSession):
(WebCore::PlatformMediaSessionManager::forEachSession):
(WebCore::PlatformMediaSessionManager::anyOfSessions const):
* platform/audio/PlatformMediaSessionManager.h:
* platform/audio/mac/AudioSessionMac.mm:
(WebCore::AudioSessionPrivate::addSampleRateObserverIfNeeded):
(WebCore::AudioSessionPrivate::handleSampleRateChange):
(WebCore::AudioSessionPrivate::addBufferSizeObserverIfNeeded):
(WebCore::AudioSessionPrivate::handleBufferSizeChange):
(WebCore::AudioSession::sampleRate const):
(WebCore::AudioSession::bufferSize const):
(WebCore::AudioSession::preferredBufferSize const):
(WebCore::AudioSession::setPreferredBufferSize):

Modified Paths

Added Paths

Diff

Modified: trunk/PerformanceTests/ChangeLog (269076 => 269077)


--- trunk/PerformanceTests/ChangeLog	2020-10-27 21:54:25 UTC (rev 269076)
+++ trunk/PerformanceTests/ChangeLog	2020-10-27 22:21:28 UTC (rev 269077)
@@ -1,3 +1,14 @@
+2020-10-27  Jer Noble  <[email protected]>
+
+        [Mac] Audio and Video element creation up to 300x slower than other browsers
+        https://bugs.webkit.org/show_bug.cgi?id=218206
+        <rdar://problem/62451019>
+
+        Reviewed by Eric Carlson.
+
+        * Media/AudioElementCreation.html: Added.
+        * Media/VideoElementCreation.html: Added.
+
 2020-10-27  Caio Lima  <[email protected]>
 
         Make WebAssembly tests on JetStream 2 be feature detactable

Added: trunk/PerformanceTests/Media/AudioElementCreation.html (0 => 269077)


--- trunk/PerformanceTests/Media/AudioElementCreation.html	                        (rev 0)
+++ trunk/PerformanceTests/Media/AudioElementCreation.html	2020-10-27 22:21:28 UTC (rev 269077)
@@ -0,0 +1,43 @@
+<!DOCTYPE html>
+<html>
+<head>
+<script src=""
+<script>
+    window.addEventListener('load', event => {
+        const numberOfIterations = 20;
+        const numberOfItems = 1000;
+
+        PerfTestRunner.prepareToMeasureValuesAsync({
+            customIterationCount: numberOfIterations,
+            unit: 'ms',
+            done: function () {
+                PerfTestRunner.gc();
+            }
+        });
+
+        function startIteration()
+        {
+            testGenerator = runIteration();
+            testGenerator.next();
+        }
+
+        function *runIteration()
+        {
+            var startTime = PerfTestRunner.now();
+
+            for (let i = 0; i < numberOfItems; ++i)
+                document.createElement('audio');
+
+            if (!PerfTestRunner.measureValueAsync(PerfTestRunner.now() - startTime))
+                return;
+
+            PerfTestRunner.gc();
+            setTimeout(startIteration, 0);
+        }
+
+        startIteration();
+    })
+</script>
+</head>
+<body></body>
+</html>
\ No newline at end of file

Added: trunk/PerformanceTests/Media/VideoElementCreation.html (0 => 269077)


--- trunk/PerformanceTests/Media/VideoElementCreation.html	                        (rev 0)
+++ trunk/PerformanceTests/Media/VideoElementCreation.html	2020-10-27 22:21:28 UTC (rev 269077)
@@ -0,0 +1,43 @@
+<!DOCTYPE html>
+<html>
+<head>
+<script src=""
+<script>
+    window.addEventListener('load', event => {
+        const numberOfIterations = 20;
+        const numberOfItems = 1000;
+
+        PerfTestRunner.prepareToMeasureValuesAsync({
+            customIterationCount: numberOfIterations,
+            unit: 'ms',
+            done: function () {
+                PerfTestRunner.gc();
+            }
+        });
+
+        function startIteration()
+        {
+            testGenerator = runIteration();
+            testGenerator.next();
+        }
+
+        function *runIteration()
+        {
+            var startTime = PerfTestRunner.now();
+
+            for (let i = 0; i < numberOfItems; ++i)
+                document.createElement('video');
+
+            if (!PerfTestRunner.measureValueAsync(PerfTestRunner.now() - startTime))
+                return;
+
+            PerfTestRunner.gc();
+            setTimeout(startIteration, 0);
+        }
+
+        startIteration();
+    })
+</script>
+</head>
+<body></body>
+</html>
\ No newline at end of file

Modified: trunk/Source/WebCore/ChangeLog (269076 => 269077)


--- trunk/Source/WebCore/ChangeLog	2020-10-27 21:54:25 UTC (rev 269076)
+++ trunk/Source/WebCore/ChangeLog	2020-10-27 22:21:28 UTC (rev 269077)
@@ -1,3 +1,53 @@
+2020-10-27  Jer Noble  <[email protected]>
+
+        [Mac] Audio and Video element creation up to 300x slower than other browsers
+        https://bugs.webkit.org/show_bug.cgi?id=218206
+        <rdar://problem/62451019>
+
+        Reviewed by Eric Carlson.
+
+        Tests: PerformanceTests/Media/AudioElementCreation.html
+               PerformanceTests/Media/VideoElementCreation.html
+
+        Currently, a large percent of the element creation code occurrs as a result of adding its
+        session to PlatformMediaSessionManager, which forces iterating over all extant sessions and
+        then to set various properties of the audio hardware in response. This patch addresses the
+        bulk of those expensive calls, but more performance optimizations are available to further
+        reduce media element creation costs.
+
+        When an <audio> element is created, we set the preferred audio output buffer size to a large
+        value for performance reasons. However, there's no need to repeatedly call into CoreAudio if
+        the buffer size is already set to that same high value. Store the result of setting the
+        preferred buffer size, and also add a property change listener to detect other callers
+        modifying that same value, so that all set operations with identical sizes become no-ops,
+        and all queries just return cached values.
+
+        When any media element is created, the entire list of extant sessions is iterated and
+        properties on each are queried. Rather than do these inside the same run-loop, use a
+        TaskQueue to enqueue a task to query the list of created elements during the next run-loop.
+
+        Between these two optimization, the runtime cost of creating 1000 audio elements is reduced
+        (on this engineer's machine) from 2s to 40ms.
+
+        * platform/audio/PlatformMediaSessionManager.cpp:
+        (WebCore::PlatformMediaSessionManager::beginInterruption):
+        (WebCore::PlatformMediaSessionManager::addSession):
+        (WebCore::PlatformMediaSessionManager::removeSession):
+        (WebCore::PlatformMediaSessionManager::sessionStateChanged):
+        (WebCore::PlatformMediaSessionManager::forEachDocumentSession):
+        (WebCore::PlatformMediaSessionManager::forEachSession):
+        (WebCore::PlatformMediaSessionManager::anyOfSessions const):
+        * platform/audio/PlatformMediaSessionManager.h:
+        * platform/audio/mac/AudioSessionMac.mm:
+        (WebCore::AudioSessionPrivate::addSampleRateObserverIfNeeded):
+        (WebCore::AudioSessionPrivate::handleSampleRateChange):
+        (WebCore::AudioSessionPrivate::addBufferSizeObserverIfNeeded):
+        (WebCore::AudioSessionPrivate::handleBufferSizeChange):
+        (WebCore::AudioSession::sampleRate const):
+        (WebCore::AudioSession::bufferSize const):
+        (WebCore::AudioSession::preferredBufferSize const):
+        (WebCore::AudioSession::setPreferredBufferSize):
+
 2020-10-27  Chris Dumez  <[email protected]>
 
         [GPUProcess] Use async IPC for RemoteAudioDestinationManager's StartAudioDestination / StopAudioDestination

Modified: trunk/Source/WebCore/platform/audio/PlatformMediaSessionManager.cpp (269076 => 269077)


--- trunk/Source/WebCore/platform/audio/PlatformMediaSessionManager.cpp	2020-10-27 21:54:25 UTC (rev 269076)
+++ trunk/Source/WebCore/platform/audio/PlatformMediaSessionManager.cpp	2020-10-27 22:21:28 UTC (rev 269077)
@@ -139,7 +139,7 @@
     forEachSession([type] (auto& session) {
         session.beginInterruption(type);
     });
-    updateSessionState();
+    scheduleUpdateSessionState();
 }
 
 void PlatformMediaSessionManager::endInterruption(PlatformMediaSession::EndInterruptionFlags flags)
@@ -163,7 +163,7 @@
     m_logger->addLogger(session.logger());
 #endif
 
-    updateSessionState();
+    scheduleUpdateSessionState();
 }
 
 bool PlatformMediaSessionManager::hasNoSession() const
@@ -190,7 +190,7 @@
     m_logger->removeLogger(session.logger());
 #endif
 
-    updateSessionState();
+    scheduleUpdateSessionState();
 }
 
 void PlatformMediaSessionManager::addRestriction(PlatformMediaSession::MediaType type, SessionRestrictions restriction)
@@ -281,7 +281,7 @@
 
 void PlatformMediaSessionManager::sessionStateChanged(PlatformMediaSession&)
 {
-    updateSessionState();
+    scheduleUpdateSessionState();
 }
 
 void PlatformMediaSessionManager::setCurrentSession(PlatformMediaSession& session)
@@ -561,7 +561,7 @@
 {
     ASSERT(!m_audioCaptureSources.contains(source));
     m_audioCaptureSources.add(source);
-    updateSessionState();
+    scheduleUpdateSessionState();
 }
 
 
@@ -569,9 +569,19 @@
 {
     ASSERT(m_audioCaptureSources.contains(source));
     m_audioCaptureSources.remove(source);
-    updateSessionState();
+    scheduleUpdateSessionState();
 }
 
+void PlatformMediaSessionManager::scheduleUpdateSessionState()
+{
+    if (updateSessionStateQueue.hasPendingTasks())
+        return;
+
+    updateSessionStateQueue.enqueueTask([this] {
+        updateSessionState();
+    });
+}
+
 #if USE(AUDIO_SESSION)
 void PlatformMediaSessionManager::maybeDeactivateAudioSession()
 {

Modified: trunk/Source/WebCore/platform/audio/PlatformMediaSessionManager.h (269076 => 269077)


--- trunk/Source/WebCore/platform/audio/PlatformMediaSessionManager.h	2020-10-27 21:54:25 UTC (rev 269076)
+++ trunk/Source/WebCore/platform/audio/PlatformMediaSessionManager.h	2020-10-27 22:21:28 UTC (rev 269077)
@@ -27,6 +27,7 @@
 #define PlatformMediaSessionManager_h
 
 #include "DocumentIdentifier.h"
+#include "GenericTaskQueue.h"
 #include "MediaSessionIdentifier.h"
 #include "PlatformMediaSession.h"
 #include "Timer.h"
@@ -179,6 +180,7 @@
 private:
     friend class Internals;
 
+    void scheduleUpdateSessionState();
     virtual void updateSessionState() { }
 
     Vector<WeakPtr<PlatformMediaSession>> sessionsMatching(const Function<bool(const PlatformMediaSession&)>&) const;
@@ -197,6 +199,7 @@
 #endif
 
     WeakHashSet<PlatformMediaSession::AudioCaptureSource> m_audioCaptureSources;
+    GenericTaskQueue<Timer> updateSessionStateQueue;
 
 #if !RELEASE_LOG_DISABLED
     Ref<AggregateLogger> m_logger;

Modified: trunk/Source/WebCore/platform/audio/mac/AudioSessionMac.mm (269076 => 269077)


--- trunk/Source/WebCore/platform/audio/mac/AudioSessionMac.mm	2020-10-27 21:54:25 UTC (rev 269076)
+++ trunk/Source/WebCore/platform/audio/mac/AudioSessionMac.mm	2020-10-27 22:21:28 UTC (rev 269077)
@@ -81,6 +81,13 @@
     WTF_MAKE_FAST_ALLOCATED;
 public:
     explicit AudioSessionPrivate() = default;
+
+    void addSampleRateObserverIfNeeded();
+    void addBufferSizeObserverIfNeeded();
+
+    static OSStatus handleSampleRateChange(AudioObjectID, UInt32, const AudioObjectPropertyAddress*, void* inClientData);
+    static OSStatus handleBufferSizeChange(AudioObjectID, UInt32, const AudioObjectPropertyAddress*, void* inClientData);
+
     Optional<bool> lastMutedState;
     AudioSession::CategoryType category { AudioSession::None };
 #if ENABLE(ROUTING_ARBITRATION)
@@ -90,8 +97,67 @@
 #endif
     AudioSession::CategoryType m_categoryOverride;
     bool inRoutingArbitration { false };
+    bool hasSampleRateObserver { false };
+    bool hasBufferSizeObserver { false };
+    Optional<double> sampleRate;
+    Optional<size_t> bufferSize;
 };
 
+void AudioSessionPrivate::addSampleRateObserverIfNeeded()
+{
+    if (hasSampleRateObserver)
+        return;
+    hasSampleRateObserver = true;
+
+    AudioObjectPropertyAddress nominalSampleRateAddress = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
+    AudioObjectAddPropertyListener(defaultDevice(), &nominalSampleRateAddress, handleSampleRateChange, this);
+}
+
+OSStatus AudioSessionPrivate::handleSampleRateChange(AudioObjectID device, UInt32, const AudioObjectPropertyAddress* sampleRateAddress, void* inClientData)
+{
+    ASSERT(inClientData);
+    if (!inClientData)
+        return noErr;
+
+    auto* sessionPrivate = static_cast<AudioSessionPrivate*>(inClientData);
+
+    Float64 nominalSampleRate;
+    UInt32 nominalSampleRateSize = sizeof(Float64);
+    OSStatus result = AudioObjectGetPropertyData(device, sampleRateAddress, 0, 0, &nominalSampleRateSize, (void*)&nominalSampleRate);
+    if (result)
+        return result;
+
+    sessionPrivate->sampleRate = narrowPrecisionToFloat(nominalSampleRate);
+    return noErr;
+}
+
+void AudioSessionPrivate::addBufferSizeObserverIfNeeded()
+{
+    if (hasBufferSizeObserver)
+        return;
+
+    AudioObjectPropertyAddress bufferSizeAddress = { kAudioDevicePropertyBufferFrameSize, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
+    AudioObjectAddPropertyListener(defaultDevice(), &bufferSizeAddress, handleBufferSizeChange, this);
+}
+
+OSStatus AudioSessionPrivate::handleBufferSizeChange(AudioObjectID device, UInt32, const AudioObjectPropertyAddress* bufferSizeAddress, void* inClientData)
+{
+    ASSERT(inClientData);
+    if (!inClientData)
+        return noErr;
+
+    auto* sessionPrivate = static_cast<AudioSessionPrivate*>(inClientData);
+
+    UInt32 bufferSize;
+    UInt32 bufferSizeSize = sizeof(bufferSize);
+    OSStatus result = AudioObjectGetPropertyData(device, bufferSizeAddress, 0, 0, &bufferSizeSize, &bufferSize);
+    if (result)
+        return result;
+
+    sessionPrivate->bufferSize = bufferSize;
+    return noErr;
+}
+
 AudioSession::AudioSession()
     : m_private(makeUnique<AudioSessionPrivate>())
 {
@@ -187,6 +253,11 @@
 
 float AudioSession::sampleRate() const
 {
+    if (m_private->sampleRate)
+        return *m_private->sampleRate;
+
+    m_private->addSampleRateObserverIfNeeded();
+
     Float64 nominalSampleRate;
     UInt32 nominalSampleRateSize = sizeof(Float64);
 
@@ -198,11 +269,18 @@
     if (result)
         return 0;
 
+    m_private->sampleRate = narrowPrecisionToFloat(nominalSampleRate);
+
     return narrowPrecisionToFloat(nominalSampleRate);
 }
 
 size_t AudioSession::bufferSize() const
 {
+    if (m_private->bufferSize)
+        return *m_private->bufferSize;
+
+    m_private->addBufferSizeObserverIfNeeded();
+
     UInt32 bufferSize;
     UInt32 bufferSizeSize = sizeof(bufferSize);
 
@@ -214,6 +292,9 @@
 
     if (result)
         return 0;
+
+    m_private->bufferSize = bufferSize;
+
     return bufferSize;
 }
 
@@ -267,22 +348,14 @@
 
 size_t AudioSession::preferredBufferSize() const
 {
-    UInt32 bufferSize;
-    UInt32 bufferSizeSize = sizeof(bufferSize);
-
-    AudioObjectPropertyAddress preferredBufferSizeAddress = {
-        kAudioDevicePropertyBufferFrameSize,
-        kAudioObjectPropertyScopeGlobal,
-        kAudioObjectPropertyElementMaster };
-    OSStatus result = AudioObjectGetPropertyData(defaultDevice(), &preferredBufferSizeAddress, 0, 0, &bufferSizeSize, &bufferSize);
-
-    if (result)
-        return 0;
-    return bufferSize;
+    return bufferSize();
 }
 
 void AudioSession::setPreferredBufferSize(size_t bufferSize)
 {
+    if (m_private->bufferSize == bufferSize)
+        return;
+
     AudioValueRange bufferSizeRange = {0, 0};
     UInt32 bufferSizeRangeSize = sizeof(AudioValueRange);
     AudioObjectPropertyAddress bufferSizeRangeAddress = {
@@ -305,9 +378,10 @@
 
     result = AudioObjectSetPropertyData(defaultDevice(), &preferredBufferSizeAddress, 0, 0, sizeof(bufferSizeOut), (void*)&bufferSizeOut);
 
-#if LOG_DISABLED
-    UNUSED_PARAM(result);
-#else
+    if (!result)
+        m_private->bufferSize = bufferSizeOut;
+
+#if !LOG_DISABLED
     if (result)
         LOG(Media, "AudioSession::setPreferredBufferSize(%zu) - failed with error %d", bufferSize, static_cast<int>(result));
     else
_______________________________________________
webkit-changes mailing list
[email protected]
https://lists.webkit.org/mailman/listinfo/webkit-changes

Reply via email to