Title: [277553] trunk/Source/WebCore
Revision
277553
Author
[email protected]
Date
2021-05-15 18:05:18 -0700 (Sat, 15 May 2021)

Log Message

Clean up BaseAudioContext now that legacy/prefixed WebAudio is gone
https://bugs.webkit.org/show_bug.cgi?id=225843

Reviewed by Darin Adler.

The BaseAudioContext class hierarchy used to be a bit complicated when
we supposed legacy WebAudio because OfflineAudioContext would subclass
BaseAudioContext directly, while WebKitOfflineAudioContext would
subclass BaseAudioContext via AudioContext. The class hierarchy is now
a lot simpler, BaseAudioContext is the base class and it has exactly
2 subclasses: OfflineAudioContext and AudioContext (for real-time
rendering). Now that the legacy WebAudio code is gone, this patch
cleans up BaseAudioContext and moves as much code as possible to its
subclasses (OfflineAudioContext & AudioContext).

* Modules/webaudio/AudioBuffer.cpp:
(WebCore::AudioBuffer::create):
* Modules/webaudio/AudioBufferSourceNode.cpp:
(WebCore::AudioBufferSourceNode::setBuffer):
* Modules/webaudio/AudioContext.cpp:
(WebCore::AudioContext::create):
(WebCore::AudioContext::AudioContext):
(WebCore::AudioContext::uninitialize):
(WebCore::AudioContext::lazyInitialize):
(WebCore::AudioContext::activeDOMObjectName const):
* Modules/webaudio/AudioContext.h:
(isType):
* Modules/webaudio/AudioContextState.h:
* Modules/webaudio/AudioNode.cpp:
(WebCore::AudioNode::setChannelCount):
* Modules/webaudio/AudioNodeOutput.cpp:
(WebCore::AudioNodeOutput::AudioNodeOutput):
(WebCore::AudioNodeOutput::setNumberOfChannels):
* Modules/webaudio/AudioWorkletNode.cpp:
(WebCore::AudioWorkletNode::create):
* Modules/webaudio/BaseAudioContext.cpp:
(WebCore::generateContextID):
(WebCore::BaseAudioContext::BaseAudioContext):
(WebCore::BaseAudioContext::lazyInitialize):
(WebCore::BaseAudioContext::uninitialize):
(WebCore::BaseAudioContext::stop):
(WebCore::BaseAudioContext::createScriptProcessor):
(WebCore::BaseAudioContext::derefFinishedSourceNodes):
(WebCore::BaseAudioContext::lockInternal):
(WebCore::BaseAudioContext::tryLock):
(WebCore::BaseAudioContext::unlock):
(WebCore::BaseAudioContext::handlePostRenderTasks):
(WebCore::BaseAudioContext::deleteMarkedNodes):
(WebCore::BaseAudioContext::updateAutomaticPullNodes):
(WebCore::BaseAudioContext::postTask):
(WebCore::BaseAudioContext::workletIsReady):
* Modules/webaudio/BaseAudioContext.h:
(WebCore::BaseAudioContext::isInitialized const):
(WebCore::BaseAudioContext::currentSampleFrame const):
(WebCore::BaseAudioContext::currentTime const):
(WebCore::BaseAudioContext::sampleRate const):
(WebCore::BaseAudioContext::listener):
(WebCore::BaseAudioContext::incrementConnectionCount):
(WebCore::BaseAudioContext::isAudioThread const):
(WebCore::BaseAudioContext::isAudioThreadFinished const):
(WebCore::BaseAudioContext::isGraphOwner const):
* Modules/webaudio/ChannelMergerNode.cpp:
(WebCore::ChannelMergerNode::create):
* Modules/webaudio/ChannelSplitterNode.cpp:
(WebCore::ChannelSplitterNode::create):
* Modules/webaudio/DefaultAudioDestinationNode.cpp:
(WebCore::DefaultAudioDestinationNode::DefaultAudioDestinationNode):
(WebCore::DefaultAudioDestinationNode::context):
(WebCore::DefaultAudioDestinationNode::context const):
* Modules/webaudio/DefaultAudioDestinationNode.h:
* Modules/webaudio/MediaElementAudioSourceNode.cpp:
(WebCore::MediaElementAudioSourceNode::setFormat):
* Modules/webaudio/MediaStreamAudioSourceNode.cpp:
(WebCore::MediaStreamAudioSourceNode::setFormat):
* Modules/webaudio/OfflineAudioContext.cpp:
(WebCore::OfflineAudioContext::OfflineAudioContext):
(WebCore::OfflineAudioContext::create):
(WebCore::OfflineAudioContext::uninitialize):
(WebCore::OfflineAudioContext::activeDOMObjectName const):
(WebCore::OfflineAudioContext::startRendering):
(WebCore::OfflineAudioContext::suspendRendering):
(WebCore::OfflineAudioContext::resumeRendering):
(WebCore::OfflineAudioContext::didSuspendRendering):
(WebCore::OfflineAudioContext::finishedRendering):
(WebCore::OfflineAudioContext::settleRenderingPromise):
(WebCore::OfflineAudioContext::dispatchEvent):
* Modules/webaudio/OfflineAudioContext.h:
(isType):
* Modules/webaudio/OfflineAudioContext.idl:
* Modules/webaudio/OfflineAudioDestinationNode.cpp:
(WebCore::OfflineAudioDestinationNode::OfflineAudioDestinationNode):
(WebCore::OfflineAudioDestinationNode::context):
(WebCore::OfflineAudioDestinationNode::context const):
* Modules/webaudio/OfflineAudioDestinationNode.h:
* Modules/webaudio/ScriptProcessorNode.cpp:
(WebCore::ScriptProcessorNode::ScriptProcessorNode):

Modified Paths

Diff

Modified: trunk/Source/WebCore/ChangeLog (277552 => 277553)


--- trunk/Source/WebCore/ChangeLog	2021-05-15 23:55:58 UTC (rev 277552)
+++ trunk/Source/WebCore/ChangeLog	2021-05-16 01:05:18 UTC (rev 277553)
@@ -1,3 +1,102 @@
+2021-05-15  Chris Dumez  <[email protected]>
+
+        Clean up BaseAudioContext now that legacy/prefixed WebAudio is gone
+        https://bugs.webkit.org/show_bug.cgi?id=225843
+
+        Reviewed by Darin Adler.
+
+        The BaseAudioContext class hierarchy used to be a bit complicated when
+        we supposed legacy WebAudio because OfflineAudioContext would subclass
+        BaseAudioContext directly, while WebKitOfflineAudioContext would
+        subclass BaseAudioContext via AudioContext. The class hierarchy is now
+        a lot simpler, BaseAudioContext is the base class and it has exactly
+        2 subclasses: OfflineAudioContext and AudioContext (for real-time
+        rendering). Now that the legacy WebAudio code is gone, this patch
+        cleans up BaseAudioContext and moves as much code as possible to its
+        subclasses (OfflineAudioContext & AudioContext).
+
+        * Modules/webaudio/AudioBuffer.cpp:
+        (WebCore::AudioBuffer::create):
+        * Modules/webaudio/AudioBufferSourceNode.cpp:
+        (WebCore::AudioBufferSourceNode::setBuffer):
+        * Modules/webaudio/AudioContext.cpp:
+        (WebCore::AudioContext::create):
+        (WebCore::AudioContext::AudioContext):
+        (WebCore::AudioContext::uninitialize):
+        (WebCore::AudioContext::lazyInitialize):
+        (WebCore::AudioContext::activeDOMObjectName const):
+        * Modules/webaudio/AudioContext.h:
+        (isType):
+        * Modules/webaudio/AudioContextState.h:
+        * Modules/webaudio/AudioNode.cpp:
+        (WebCore::AudioNode::setChannelCount):
+        * Modules/webaudio/AudioNodeOutput.cpp:
+        (WebCore::AudioNodeOutput::AudioNodeOutput):
+        (WebCore::AudioNodeOutput::setNumberOfChannels):
+        * Modules/webaudio/AudioWorkletNode.cpp:
+        (WebCore::AudioWorkletNode::create):
+        * Modules/webaudio/BaseAudioContext.cpp:
+        (WebCore::generateContextID):
+        (WebCore::BaseAudioContext::BaseAudioContext):
+        (WebCore::BaseAudioContext::lazyInitialize):
+        (WebCore::BaseAudioContext::uninitialize):
+        (WebCore::BaseAudioContext::stop):
+        (WebCore::BaseAudioContext::createScriptProcessor):
+        (WebCore::BaseAudioContext::derefFinishedSourceNodes):
+        (WebCore::BaseAudioContext::lockInternal):
+        (WebCore::BaseAudioContext::tryLock):
+        (WebCore::BaseAudioContext::unlock):
+        (WebCore::BaseAudioContext::handlePostRenderTasks):
+        (WebCore::BaseAudioContext::deleteMarkedNodes):
+        (WebCore::BaseAudioContext::updateAutomaticPullNodes):
+        (WebCore::BaseAudioContext::postTask):
+        (WebCore::BaseAudioContext::workletIsReady):
+        * Modules/webaudio/BaseAudioContext.h:
+        (WebCore::BaseAudioContext::isInitialized const):
+        (WebCore::BaseAudioContext::currentSampleFrame const):
+        (WebCore::BaseAudioContext::currentTime const):
+        (WebCore::BaseAudioContext::sampleRate const):
+        (WebCore::BaseAudioContext::listener):
+        (WebCore::BaseAudioContext::incrementConnectionCount):
+        (WebCore::BaseAudioContext::isAudioThread const):
+        (WebCore::BaseAudioContext::isAudioThreadFinished const):
+        (WebCore::BaseAudioContext::isGraphOwner const):
+        * Modules/webaudio/ChannelMergerNode.cpp:
+        (WebCore::ChannelMergerNode::create):
+        * Modules/webaudio/ChannelSplitterNode.cpp:
+        (WebCore::ChannelSplitterNode::create):
+        * Modules/webaudio/DefaultAudioDestinationNode.cpp:
+        (WebCore::DefaultAudioDestinationNode::DefaultAudioDestinationNode):
+        (WebCore::DefaultAudioDestinationNode::context):
+        (WebCore::DefaultAudioDestinationNode::context const):
+        * Modules/webaudio/DefaultAudioDestinationNode.h:
+        * Modules/webaudio/MediaElementAudioSourceNode.cpp:
+        (WebCore::MediaElementAudioSourceNode::setFormat):
+        * Modules/webaudio/MediaStreamAudioSourceNode.cpp:
+        (WebCore::MediaStreamAudioSourceNode::setFormat):
+        * Modules/webaudio/OfflineAudioContext.cpp:
+        (WebCore::OfflineAudioContext::OfflineAudioContext):
+        (WebCore::OfflineAudioContext::create):
+        (WebCore::OfflineAudioContext::uninitialize):
+        (WebCore::OfflineAudioContext::activeDOMObjectName const):
+        (WebCore::OfflineAudioContext::startRendering):
+        (WebCore::OfflineAudioContext::suspendRendering):
+        (WebCore::OfflineAudioContext::resumeRendering):
+        (WebCore::OfflineAudioContext::didSuspendRendering):
+        (WebCore::OfflineAudioContext::finishedRendering):
+        (WebCore::OfflineAudioContext::settleRenderingPromise):
+        (WebCore::OfflineAudioContext::dispatchEvent):
+        * Modules/webaudio/OfflineAudioContext.h:
+        (isType):
+        * Modules/webaudio/OfflineAudioContext.idl:
+        * Modules/webaudio/OfflineAudioDestinationNode.cpp:
+        (WebCore::OfflineAudioDestinationNode::OfflineAudioDestinationNode):
+        (WebCore::OfflineAudioDestinationNode::context):
+        (WebCore::OfflineAudioDestinationNode::context const):
+        * Modules/webaudio/OfflineAudioDestinationNode.h:
+        * Modules/webaudio/ScriptProcessorNode.cpp:
+        (WebCore::ScriptProcessorNode::ScriptProcessorNode):
+
 2021-05-15  Alan Bujtas  <[email protected]>
 
         [LFC] Move inline formatting geometry to its own class

Modified: trunk/Source/WebCore/Modules/webaudio/AudioBuffer.cpp (277552 => 277553)


--- trunk/Source/WebCore/Modules/webaudio/AudioBuffer.cpp	2021-05-15 23:55:58 UTC (rev 277552)
+++ trunk/Source/WebCore/Modules/webaudio/AudioBuffer.cpp	2021-05-16 01:05:18 UTC (rev 277553)
@@ -42,7 +42,7 @@
 
 RefPtr<AudioBuffer> AudioBuffer::create(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, LegacyPreventDetaching preventDetaching)
 {
-    if (!BaseAudioContext::isSupportedSampleRate(sampleRate) || !numberOfChannels || numberOfChannels > AudioContext::maxNumberOfChannels() || !numberOfFrames)
+    if (!BaseAudioContext::isSupportedSampleRate(sampleRate) || !numberOfChannels || numberOfChannels > AudioContext::maxNumberOfChannels || !numberOfFrames)
         return nullptr;
 
     auto buffer = adoptRef(*new AudioBuffer(numberOfChannels, numberOfFrames, sampleRate, preventDetaching));
@@ -57,7 +57,7 @@
     if (!options.numberOfChannels)
         return Exception { NotSupportedError, "Number of channels cannot be 0."_s };
 
-    if (options.numberOfChannels > AudioContext::maxNumberOfChannels())
+    if (options.numberOfChannels > AudioContext::maxNumberOfChannels)
         return Exception { NotSupportedError, "Number of channels cannot be more than max supported."_s };
     
     if (!options.length)

Modified: trunk/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.cpp (277552 => 277553)


--- trunk/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.cpp	2021-05-15 23:55:58 UTC (rev 277552)
+++ trunk/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.cpp	2021-05-16 01:05:18 UTC (rev 277553)
@@ -426,7 +426,7 @@
 
         // Do any necesssary re-configuration to the buffer's number of channels.
         unsigned numberOfChannels = buffer->numberOfChannels();
-        ASSERT(numberOfChannels <= AudioContext::maxNumberOfChannels());
+        ASSERT(numberOfChannels <= AudioContext::maxNumberOfChannels);
 
         output(0)->setNumberOfChannels(numberOfChannels);
 

Modified: trunk/Source/WebCore/Modules/webaudio/AudioContext.cpp (277552 => 277553)


--- trunk/Source/WebCore/Modules/webaudio/AudioContext.cpp	2021-05-15 23:55:58 UTC (rev 277552)
+++ trunk/Source/WebCore/Modules/webaudio/AudioContext.cpp	2021-05-16 01:05:18 UTC (rev 277553)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2010 Google Inc. All rights reserved.
- * Copyright (C) 2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2016-2021 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -28,6 +28,7 @@
 #if ENABLE(WEB_AUDIO)
 
 #include "AudioContext.h"
+#include "AudioContextOptions.h"
 #include "AudioTimestamp.h"
 #include "DOMWindow.h"
 #include "JSDOMPromiseDeferred.h"
@@ -63,6 +64,10 @@
 
 WTF_MAKE_ISO_ALLOCATED_IMPL(AudioContext);
 
+#if OS(WINDOWS)
+static unsigned hardwareContextCount;
+#endif
+
 static Optional<float>& defaultSampleRateForTesting()
 {
     static Optional<float> sampleRate;
@@ -85,7 +90,7 @@
 {
     ASSERT(isMainThread());
 #if OS(WINDOWS)
-    if (s_hardwareContextCount >= maxHardwareContexts)
+    if (hardwareContextCount >= maxHardwareContexts)
         return Exception { QuotaExceededError, "Reached maximum number of hardware contexts on this platform"_s };
 #endif
     
@@ -105,11 +110,15 @@
     return audioContext;
 }
 
-// Constructor for rendering to the audio hardware.
 AudioContext::AudioContext(Document& document, const AudioContextOptions& contextOptions)
-    : BaseAudioContext(document, contextOptions)
+    : BaseAudioContext(document)
+    , m_destinationNode(makeUniqueRef<DefaultAudioDestinationNode>(*this, contextOptions.sampleRate))
     , m_mediaSession(PlatformMediaSession::create(PlatformMediaSessionManager::sharedManager(), *this))
 {
+    // According to spec AudioContext must die only after page navigate.
+    // Lets mark it as ActiveDOMObject with pending activity and unmark it in clear method.
+    setPendingActivity();
+
     constructCommon();
 
     // Initialize the destination node's muted state to match the page's current muted state.
@@ -117,6 +126,14 @@
 
     document.addAudioProducer(*this);
     document.registerForVisibilityStateChangedCallbacks(*this);
+
+    // Unlike OfflineAudioContext, AudioContext does not require calling resume() to start rendering.
+    // Lazy initialization starts rendering so we schedule a task here to make sure lazy initialization
+    // ends up happening, even if no audio node gets constructed.
+    postTask([this] {
+        if (!isStopped())
+            lazyInitialize();
+    });
 }
 
 void AudioContext::constructCommon()
@@ -140,6 +157,21 @@
     }
 }
 
+void AudioContext::uninitialize()
+{
+    if (!isInitialized())
+        return;
+
+    BaseAudioContext::uninitialize();
+
+#if OS(WINDOWS)
+    ASSERT(hardwareContextCount);
+    --hardwareContextCount;
+#endif
+
+    setState(State::Closed);
+}
+
 double AudioContext::baseLatency()
 {
     lazyInitialize();
@@ -186,16 +218,6 @@
     });
 }
 
-DefaultAudioDestinationNode& AudioContext::destination()
-{
-    return static_cast<DefaultAudioDestinationNode&>(BaseAudioContext::destination());
-}
-
-const DefaultAudioDestinationNode& AudioContext::destination() const
-{
-    return static_cast<const DefaultAudioDestinationNode&>(BaseAudioContext::destination());
-}
-
 void AudioContext::suspendRendering(DOMPromiseDeferred<void>&& promise)
 {
     if (isOfflineContext()) {
@@ -309,7 +331,9 @@
             // NOTE: for now default AudioContext does not need an explicit startRendering() call from _javascript_.
             // We may want to consider requiring it for symmetry with OfflineAudioContext.
             startRendering();
-            ++s_hardwareContextCount;
+#if OS(WINDOWS)
+            ++hardwareContextCount;
+#endif
         }
     }
 }
@@ -435,6 +459,11 @@
     document()->updateIsPlayingMedia();
 }
 
+const char* AudioContext::activeDOMObjectName() const
+{
+    return "AudioContext";
+}
+
 void AudioContext::suspendPlayback()
 {
     if (state() == State::Closed || !isInitialized())

Modified: trunk/Source/WebCore/Modules/webaudio/AudioContext.h (277552 => 277553)


--- trunk/Source/WebCore/Modules/webaudio/AudioContext.h	2021-05-15 23:55:58 UTC (rev 277552)
+++ trunk/Source/WebCore/Modules/webaudio/AudioContext.h	2021-05-16 01:05:18 UTC (rev 277553)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2010 Google Inc. All rights reserved.
- * Copyright (C) 2016-2020 Apple Inc. All rights reserved.
+ * Copyright (C) 2016-2021 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -25,7 +25,6 @@
 
 #pragma once
 
-#include "AudioContextOptions.h"
 #include "BaseAudioContext.h"
 #include "DefaultAudioDestinationNode.h"
 #include "MediaCanStartListener.h"
@@ -32,11 +31,17 @@
 #include "MediaProducer.h"
 #include "PlatformMediaSession.h"
 #include "VisibilityChangeClient.h"
+#include <wtf/UniqueRef.h>
 
 namespace WebCore {
 
 class DOMWindow;
+class HTMLMediaElement;
+class MediaStream;
+class MediaStreamAudioDestinationNode;
+class MediaStreamAudioSourceNode;
 
+struct AudioContextOptions;
 struct AudioTimestamp;
 
 class AudioContext final
@@ -48,7 +53,7 @@
     WTF_MAKE_ISO_ALLOCATED(AudioContext);
 public:
     // Create an AudioContext for rendering to the audio hardware.
-    static ExceptionOr<Ref<AudioContext>> create(Document&, AudioContextOptions&& = { });
+    static ExceptionOr<Ref<AudioContext>> create(Document&, AudioContextOptions&&);
     ~AudioContext();
 
     WEBCORE_EXPORT static void setDefaultSampleRateForTesting(Optional<float>);
@@ -55,8 +60,9 @@
 
     void close(DOMPromiseDeferred<void>&&);
 
-    DefaultAudioDestinationNode& destination();
-    const DefaultAudioDestinationNode& destination() const;
+    DefaultAudioDestinationNode& destination() final { return m_destinationNode.get(); }
+    const DefaultAudioDestinationNode& destination() const final { return m_destinationNode.get(); }
+
     double baseLatency();
 
     AudioTimestamp getOutputTimestamp(DOMWindow&);
@@ -88,8 +94,8 @@
     void addBehaviorRestriction(BehaviorRestrictions restriction) { m_restrictions |= restriction; }
     void removeBehaviorRestriction(BehaviorRestrictions restriction) { m_restrictions &= ~restriction; }
 
-protected:
-    explicit AudioContext(Document&, const AudioContextOptions& = { });
+private:
+    AudioContext(Document&, const AudioContextOptions&);
 
     bool willBeginPlayback();
 
@@ -97,7 +103,6 @@
     const Logger& logger() const final;
 #endif
 
-private:
     void constructCommon();
 
     bool userGestureRequiredForAudioStart() const { return !isOfflineContext() && m_restrictions & RequireUserGestureForAudioStartRestriction; }
@@ -105,6 +110,9 @@
 
     bool willPausePlayback();
 
+    void uninitialize() final;
+    bool isOfflineContext() const final { return false; }
+
     // MediaProducer
     MediaProducer::MediaStateFlags mediaState() const final;
     void pageMutedStateDidChange() final;
@@ -129,9 +137,11 @@
     void visibilityStateChanged() final;
 
     // ActiveDOMObject
+    const char* activeDOMObjectName() const final;
     void suspend(ReasonForSuspension) final;
     void resume() final;
 
+    UniqueRef<DefaultAudioDestinationNode> m_destinationNode;
     std::unique_ptr<PlatformMediaSession> m_mediaSession;
 
     BehaviorRestrictions m_restrictions { NoRestrictions };
@@ -142,3 +152,7 @@
 };
 
 } // WebCore
+
+SPECIALIZE_TYPE_TRAITS_BEGIN(WebCore::AudioContext)
+    static bool isType(const WebCore::BaseAudioContext& context) { return !context.isOfflineContext(); }
+SPECIALIZE_TYPE_TRAITS_END()

Modified: trunk/Source/WebCore/Modules/webaudio/AudioContextState.h (277552 => 277553)


--- trunk/Source/WebCore/Modules/webaudio/AudioContextState.h	2021-05-15 23:55:58 UTC (rev 277552)
+++ trunk/Source/WebCore/Modules/webaudio/AudioContextState.h	2021-05-16 01:05:18 UTC (rev 277553)
@@ -27,6 +27,6 @@
 
 namespace WebCore {
 
-enum class AudioContextState { Suspended, Running, Interrupted, Closed };
+enum class AudioContextState : uint8_t { Suspended, Running, Interrupted, Closed };
 
 }

Modified: trunk/Source/WebCore/Modules/webaudio/AudioNode.cpp (277552 => 277553)


--- trunk/Source/WebCore/Modules/webaudio/AudioNode.cpp	2021-05-15 23:55:58 UTC (rev 277552)
+++ trunk/Source/WebCore/Modules/webaudio/AudioNode.cpp	2021-05-16 01:05:18 UTC (rev 277553)
@@ -387,7 +387,7 @@
     if (!channelCount)
         return Exception { NotSupportedError, "Channel count cannot be 0"_s };
     
-    if (channelCount > AudioContext::maxNumberOfChannels())
+    if (channelCount > AudioContext::maxNumberOfChannels)
         return Exception { IndexSizeError, "Channel count exceeds maximum limit"_s };
 
     if (m_channelCount == channelCount)

Modified: trunk/Source/WebCore/Modules/webaudio/AudioNodeOutput.cpp (277552 => 277553)


--- trunk/Source/WebCore/Modules/webaudio/AudioNodeOutput.cpp	2021-05-15 23:55:58 UTC (rev 277552)
+++ trunk/Source/WebCore/Modules/webaudio/AudioNodeOutput.cpp	2021-05-16 01:05:18 UTC (rev 277553)
@@ -42,7 +42,7 @@
     , m_numberOfChannels(numberOfChannels)
     , m_desiredNumberOfChannels(numberOfChannels)
 {
-    ASSERT(numberOfChannels <= AudioContext::maxNumberOfChannels());
+    ASSERT(numberOfChannels <= AudioContext::maxNumberOfChannels);
 
     m_internalBus = AudioBus::create(numberOfChannels, AudioUtilities::renderQuantumSize);
 }
@@ -49,7 +49,7 @@
 
 void AudioNodeOutput::setNumberOfChannels(unsigned numberOfChannels)
 {
-    ASSERT(numberOfChannels <= AudioContext::maxNumberOfChannels());
+    ASSERT(numberOfChannels <= AudioContext::maxNumberOfChannels);
     ASSERT(context().isGraphOwner());
 
     m_desiredNumberOfChannels = numberOfChannels;

Modified: trunk/Source/WebCore/Modules/webaudio/AudioWorkletNode.cpp (277552 => 277553)


--- trunk/Source/WebCore/Modules/webaudio/AudioWorkletNode.cpp	2021-05-15 23:55:58 UTC (rev 277552)
+++ trunk/Source/WebCore/Modules/webaudio/AudioWorkletNode.cpp	2021-05-16 01:05:18 UTC (rev 277553)
@@ -68,7 +68,7 @@
             return Exception { IndexSizeError, "Length of specified outputChannelCount does not match the given number of outputs"_s };
 
         for (auto& channelCount : *options.outputChannelCount) {
-            if (channelCount < 1 || channelCount > AudioContext::maxNumberOfChannels())
+            if (channelCount < 1 || channelCount > AudioContext::maxNumberOfChannels)
                 return Exception { NotSupportedError, "Provided number of channels for output is outside supported range"_s };
         }
     }

Modified: trunk/Source/WebCore/Modules/webaudio/BaseAudioContext.cpp (277552 => 277553)


--- trunk/Source/WebCore/Modules/webaudio/BaseAudioContext.cpp	2021-05-15 23:55:58 UTC (rev 277552)
+++ trunk/Source/WebCore/Modules/webaudio/BaseAudioContext.cpp	2021-05-16 01:05:18 UTC (rev 277553)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2010 Google Inc. All rights reserved.
- * Copyright (C) 2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2016-2021 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -50,7 +50,6 @@
 #include "ConstantSourceNode.h"
 #include "ConstantSourceOptions.h"
 #include "ConvolverNode.h"
-#include "DefaultAudioDestinationNode.h"
 #include "DelayNode.h"
 #include "DelayOptions.h"
 #include "Document.h"
@@ -68,8 +67,6 @@
 #include "JSDOMPromiseDeferred.h"
 #include "Logging.h"
 #include "NetworkingContext.h"
-#include "OfflineAudioCompletionEvent.h"
-#include "OfflineAudioDestinationNode.h"
 #include "OscillatorNode.h"
 #include "Page.h"
 #include "PannerNode.h"
@@ -80,8 +77,14 @@
 #include "StereoPannerNode.h"
 #include "StereoPannerOptions.h"
 #include "WaveShaperNode.h"
+#include <_javascript_Core/ArrayBuffer.h>
 #include <_javascript_Core/ScriptCallStack.h>
+#include <wtf/Atomics.h>
+#include <wtf/IsoMallocInlines.h>
+#include <wtf/MainThread.h>
+#include <wtf/Ref.h>
 #include <wtf/Scope.h>
+#include <wtf/text/WTFString.h>
 
 #if DEBUG_AUDIONODE_REFERENCES
 #include <stdio.h>
@@ -91,20 +94,6 @@
 #include "GStreamerCommon.h"
 #endif
 
-#if PLATFORM(IOS_FAMILY)
-#include "ScriptController.h"
-#include "Settings.h"
-#endif
-
-#include <_javascript_Core/ArrayBuffer.h>
-#include <wtf/Atomics.h>
-#include <wtf/IsoMallocInlines.h>
-#include <wtf/MainThread.h>
-#include <wtf/Ref.h>
-#include <wtf/RefCounted.h>
-#include <wtf/Scope.h>
-#include <wtf/text/WTFString.h>
-
 namespace WebCore {
 
 WTF_MAKE_ISO_ALLOCATED_IMPL(BaseAudioContext);
@@ -114,9 +103,7 @@
     return sampleRate >= 3000 && sampleRate <= 384000;
 }
 
-unsigned BaseAudioContext::s_hardwareContextCount;
-
-static uint64_t generateAudioContextID()
+static uint64_t generateContextID()
 {
     ASSERT(isMainThread());
     static uint64_t contextIDSeed = 0;
@@ -130,55 +117,21 @@
     return contexts;
 }
 
-// Constructor for rendering to the audio hardware.
-BaseAudioContext::BaseAudioContext(Document& document, const AudioContextOptions& contextOptions)
+BaseAudioContext::BaseAudioContext(Document& document)
     : ActiveDOMObject(document)
 #if !RELEASE_LOG_DISABLED
     , m_logger(document.logger())
     , m_logIdentifier(uniqueLogIdentifier())
 #endif
-    , m_contextID(generateAudioContextID())
+    , m_contextID(generateContextID())
     , m_worklet(AudioWorklet::create(*this))
-    , m_destinationNode(makeUniqueRef<DefaultAudioDestinationNode>(*this, contextOptions.sampleRate))
     , m_listener(AudioListener::create(*this))
 {
     liveAudioContexts().add(m_contextID);
 
-    // According to spec AudioContext must die only after page navigate.
-    // Lets mark it as ActiveDOMObject with pending activity and unmark it in clear method.
-    setPendingActivity();
-
     FFTFrame::initialize();
-
-    // Unlike OfflineAudioContext, AudioContext does not require calling resume() to start rendering.
-    // Lazy initialization starts rendering so we schedule a task here to make sure lazy initialization
-    // ends up happening, even if no audio node gets constructed.
-    postTask([this] {
-        if (m_isStopScheduled)
-            return;
-
-        lazyInitialize();
-    });
 }
 
-// Constructor for offline (non-realtime) rendering.
-BaseAudioContext::BaseAudioContext(Document& document, unsigned numberOfChannels, float sampleRate, RefPtr<AudioBuffer>&& renderTarget)
-    : ActiveDOMObject(document)
-#if !RELEASE_LOG_DISABLED
-    , m_logger(document.logger())
-    , m_logIdentifier(uniqueLogIdentifier())
-#endif
-    , m_contextID(generateAudioContextID())
-    , m_worklet(AudioWorklet::create(*this))
-    , m_isOfflineContext(true)
-    , m_renderTarget(WTFMove(renderTarget))
-    , m_destinationNode(makeUniqueRef<OfflineAudioDestinationNode>(*this, numberOfChannels, sampleRate, m_renderTarget.copyRef()))
-    , m_listener(AudioListener::create(*this))
-{
-    liveAudioContexts().add(m_contextID);
-    FFTFrame::initialize();
-}
-
 BaseAudioContext::~BaseAudioContext()
 {
     liveAudioContexts().remove(m_contextID);
@@ -212,7 +165,7 @@
     if (m_isAudioThreadFinished)
         return;
 
-    m_destinationNode->initialize();
+    destination().initialize();
 
     m_isInitialized = true;
 }
@@ -240,19 +193,11 @@
         return;
 
     // This stops the audio thread and all audio rendering.
-    m_destinationNode->uninitialize();
+    destination().uninitialize();
 
     // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
     m_isAudioThreadFinished = true;
 
-    if (!isOfflineContext()) {
-        ASSERT(s_hardwareContextCount);
-        --s_hardwareContextCount;
-
-        // Offline contexts move to 'Closed' state when dispatching the completion event.
-        setState(State::Closed);
-    }
-
     {
         AutoLocker locker(*this);
         // This should have been called from handlePostRenderTasks() at the end of rendering.
@@ -268,11 +213,6 @@
     m_isInitialized = false;
 }
 
-bool BaseAudioContext::isInitialized() const
-{
-    return m_isInitialized;
-}
-
 void BaseAudioContext::addReaction(State state, DOMPromiseDeferred<void>&& promise)
 {
     size_t stateIndex = static_cast<size_t>(state);
@@ -302,14 +242,15 @@
 
 void BaseAudioContext::stop()
 {
+    ASSERT(isMainThread());
     ALWAYS_LOG(LOGIDENTIFIER);
     
-    ASSERT(isMainThread());
-    auto protectedThis = makeRef(*this);
-
     // Usually ScriptExecutionContext calls stop twice.
     if (m_isStopScheduled)
         return;
+
+    auto protectedThis = makeRef(*this);
+
     m_isStopScheduled = true;
 
     ASSERT(document());
@@ -319,21 +260,11 @@
     clear();
 }
 
-const char* BaseAudioContext::activeDOMObjectName() const
-{
-    return "AudioContext";
-}
-
 Document* BaseAudioContext::document() const
 {
     return downcast<Document>(m_scriptExecutionContext);
 }
 
-float BaseAudioContext::sampleRate() const
-{
-    return m_destinationNode->sampleRate();
-}
-
 bool BaseAudioContext::wouldTaintOrigin(const URL& url) const
 {
     if (url.protocolIsData())
@@ -378,11 +309,6 @@
     });
 }
 
-AudioListener& WebCore::BaseAudioContext::listener()
-{
-    return m_listener;
-}
-
 ExceptionOr<Ref<AudioBufferSourceNode>> BaseAudioContext::createBufferSource()
 {
     ALWAYS_LOG(LOGIDENTIFIER);
@@ -435,13 +361,13 @@
     // This parameter [numberOfInputChannels] determines the number of channels for this node's input. Values of
     // up to 32 must be supported. A NotSupportedError must be thrown if the number of channels is not supported.
 
-    if (numberOfInputChannels > maxNumberOfChannels())
+    if (numberOfInputChannels > maxNumberOfChannels)
         return Exception { NotSupportedError, "numberOfInputChannels exceeds maximum number of channels"_s };
 
     // This parameter [numberOfOutputChannels] determines the number of channels for this node's output. Values of
     // up to 32 must be supported. A NotSupportedError must be thrown if the number of channels is not supported.
 
-    if (numberOfOutputChannels > maxNumberOfChannels())
+    if (numberOfOutputChannels > maxNumberOfChannels)
         return Exception { NotSupportedError, "numberOfOutputChannels exceeds maximum number of channels"_s };
 
     return ScriptProcessorNode::create(*this, bufferSize, numberOfInputChannels, numberOfOutputChannels);
@@ -583,11 +509,6 @@
     return IIRFilterNode::create(scriptExecutionContext, *this, WTFMove(options));
 }
 
-static bool isFinishedSourceNode(const AudioConnectionRefPtr<AudioNode>& node)
-{
-    return node->isFinishedSourceNode();
-}
-
 void BaseAudioContext::derefFinishedSourceNodes()
 {
     ASSERT(isGraphOwner());
@@ -596,7 +517,7 @@
     if (!m_hasFinishedAudioSourceNodes)
         return;
 
-    m_referencedSourceNodes.removeAllMatching(isFinishedSourceNode);
+    m_referencedSourceNodes.removeAllMatching([](auto& node) { return node->isFinishedSourceNode(); });
     m_hasFinishedAudioSourceNodes = false;
 }
 
@@ -641,7 +562,7 @@
         mustReleaseLock = false;
     } else {
         // Acquire the lock.
-        m_contextGraphMutex.lock();
+        m_contextGraphLock.lock();
         m_graphOwnerThread = &thisThread;
         mustReleaseLock = true;
     }
@@ -649,13 +570,10 @@
 
 bool BaseAudioContext::tryLock(bool& mustReleaseLock)
 {
-    Thread& thisThread = Thread::current();
-    bool isAudioThread = &thisThread == audioThread();
-
     // Try to catch cases of using try lock on main thread - it should use regular lock.
-    ASSERT(isAudioThread || isAudioThreadFinished());
+    ASSERT(isAudioThread() || isAudioThreadFinished());
     
-    if (!isAudioThread) {
+    if (!isAudioThread()) {
         // In release build treat tryLock() as lock() (since above ASSERT(isAudioThread) never fires) - this is the best we can do.
         lock(mustReleaseLock);
         return true;
@@ -663,16 +581,16 @@
     
     bool hasLock;
     
-    if (&thisThread == m_graphOwnerThread) {
+    if (isGraphOwner()) {
         // Thread already has the lock.
         hasLock = true;
         mustReleaseLock = false;
     } else {
         // Don't already have the lock - try to acquire it.
-        hasLock = m_contextGraphMutex.tryLock();
+        hasLock = m_contextGraphLock.tryLock();
         
         if (hasLock)
-            m_graphOwnerThread = &thisThread;
+            m_graphOwnerThread = &Thread::current();
 
         mustReleaseLock = hasLock;
     }
@@ -685,19 +603,9 @@
     ASSERT(m_graphOwnerThread == &Thread::current());
 
     m_graphOwnerThread = nullptr;
-    m_contextGraphMutex.unlock();
+    m_contextGraphLock.unlock();
 }
 
-bool BaseAudioContext::isAudioThread() const
-{
-    return m_audioThread == &Thread::current();
-}
-
-bool BaseAudioContext::isGraphOwner() const
-{
-    return m_graphOwnerThread == &Thread::current();
-}
-
 void BaseAudioContext::addDeferredDecrementConnectionCount(AudioNode* node)
 {
     ASSERT(isAudioThread());
@@ -739,26 +647,27 @@
     // The worst that can happen is that there will be some nodes which will take slightly longer than usual to be deleted or removed
     // from the render graph (in which case they'll render silence).
     bool mustReleaseLock;
-    if (tryLock(mustReleaseLock)) {
-        // Take care of finishing any derefs where the tryLock() failed previously.
-        handleDeferredDecrementConnectionCounts();
+    if (!tryLock(mustReleaseLock))
+        return;
 
-        // Dynamically clean up nodes which are no longer needed.
-        derefFinishedSourceNodes();
+    // Take care of finishing any derefs where the tryLock() failed previously.
+    handleDeferredDecrementConnectionCounts();
 
-        // Don't delete in the real-time thread. Let the main thread do it.
-        // Ref-counted objects held by certain AudioNodes may not be thread-safe.
-        scheduleNodeDeletion();
+    // Dynamically clean up nodes which are no longer needed.
+    derefFinishedSourceNodes();
 
-        // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
-        handleDirtyAudioSummingJunctions();
-        handleDirtyAudioNodeOutputs();
+    // Don't delete in the real-time thread. Let the main thread do it.
+    // Ref-counted objects held by certain AudioNodes may not be thread-safe.
+    scheduleNodeDeletion();
 
-        updateAutomaticPullNodes();
+    // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
+    handleDirtyAudioSummingJunctions();
+    handleDirtyAudioNodeOutputs();
 
-        if (mustReleaseLock)
-            unlock();
-    }
+    updateAutomaticPullNodes();
+
+    if (mustReleaseLock)
+        unlock();
 }
 
 void BaseAudioContext::handleDeferredDecrementConnectionCounts()
@@ -820,29 +729,28 @@
 
     // Protect this object from being deleted before we release the mutex locked by AutoLocker.
     auto protectedThis = makeRef(*this);
-    {
-        AutoLocker locker(*this);
 
-        while (m_nodesToDelete.size()) {
-            AudioNode* node = m_nodesToDelete.takeLast();
+    AutoLocker locker(*this);
 
-            // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions.
-            unsigned numberOfInputs = node->numberOfInputs();
-            for (unsigned i = 0; i < numberOfInputs; ++i)
-                m_dirtySummingJunctions.remove(node->input(i));
+    while (m_nodesToDelete.size()) {
+        AudioNode* node = m_nodesToDelete.takeLast();
 
-            // Before deleting the node, clear out any AudioNodeOutputs from m_dirtyAudioNodeOutputs.
-            unsigned numberOfOutputs = node->numberOfOutputs();
-            for (unsigned i = 0; i < numberOfOutputs; ++i)
-                m_dirtyAudioNodeOutputs.remove(node->output(i));
+        // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions.
+        unsigned numberOfInputs = node->numberOfInputs();
+        for (unsigned i = 0; i < numberOfInputs; ++i)
+            m_dirtySummingJunctions.remove(node->input(i));
 
-            ASSERT_WITH_MESSAGE(node->nodeType() != AudioNode::NodeTypeDestination, "Destination node is owned by the BaseAudioContext");
+        // Before deleting the node, clear out any AudioNodeOutputs from m_dirtyAudioNodeOutputs.
+        unsigned numberOfOutputs = node->numberOfOutputs();
+        for (unsigned i = 0; i < numberOfOutputs; ++i)
+            m_dirtyAudioNodeOutputs.remove(node->output(i));
 
-            // Finally, delete it.
-            delete node;
-        }
-        m_isDeletionScheduled = false;
+        ASSERT_WITH_MESSAGE(node->nodeType() != AudioNode::NodeTypeDestination, "Destination node is owned by the BaseAudioContext");
+
+        // Finally, delete it.
+        delete node;
     }
+    m_isDeletionScheduled = false;
 }
 
 void BaseAudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunction)
@@ -915,20 +823,21 @@
 {
     ASSERT(isGraphOwner());
 
-    if (m_automaticPullNodesNeedUpdating) {
-        // Heap allocations are forbidden on the audio thread for performance reasons so we need to
-        // explicitly allow the following allocation(s).
-        DisableMallocRestrictionsForCurrentThreadScope disableMallocRestrictions;
+    if (!m_automaticPullNodesNeedUpdating)
+        return;
 
-        // Copy from m_automaticPullNodes to m_renderingAutomaticPullNodes.
-        m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
+    // Heap allocations are forbidden on the audio thread for performance reasons so we need to
+    // explicitly allow the following allocation(s).
+    DisableMallocRestrictionsForCurrentThreadScope disableMallocRestrictions;
 
-        unsigned i = 0;
-        for (auto& output : m_automaticPullNodes)
-            m_renderingAutomaticPullNodes[i++] = output;
+    // Copy from m_automaticPullNodes to m_renderingAutomaticPullNodes.
+    m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
 
-        m_automaticPullNodesNeedUpdating = false;
-    }
+    unsigned i = 0;
+    for (auto& output : m_automaticPullNodes)
+        m_renderingAutomaticPullNodes[i++] = output;
+
+    m_automaticPullNodesNeedUpdating = false;
 }
 
 void BaseAudioContext::processAutomaticPullNodes(size_t framesToProcess)
@@ -944,6 +853,7 @@
     return ActiveDOMObject::scriptExecutionContext();
 }
 
+// FIXME: This should probably move to AudioContext.
 void BaseAudioContext::isPlayingAudioDidChange()
 {
     // Heap allocations are forbidden on the audio thread for performance reasons so we need to
@@ -958,53 +868,6 @@
     });
 }
 
-// FIXME: Move to OfflineAudioContext once WebKitOfflineAudioContext gets removed.
-void BaseAudioContext::finishedRendering(bool didRendering)
-{
-    ASSERT(isOfflineContext());
-    ASSERT(isMainThread());
-    auto finishedRenderingScope = WTF::makeScopeExit([this] {
-        didFinishOfflineRendering(Exception { InvalidStateError, "Offline rendering failed"_s });
-    });
-
-    if (!isMainThread())
-        return;
-
-    auto clearPendingActivityIfExitEarly = WTF::makeScopeExit([this] {
-        clearPendingActivity();
-    });
-
-
-    ALWAYS_LOG(LOGIDENTIFIER);
-
-    if (!didRendering)
-        return;
-
-    RefPtr<AudioBuffer> renderedBuffer = m_renderTarget.get();
-    setState(State::Closed);
-
-    ASSERT(renderedBuffer);
-    if (!renderedBuffer)
-        return;
-
-    // Avoid firing the event if the document has already gone away.
-    if (m_isStopScheduled)
-        return;
-
-    clearPendingActivityIfExitEarly.release();
-    queueTaskToDispatchEvent(*this, TaskSource::MediaElement, OfflineAudioCompletionEvent::create(*renderedBuffer));
-
-    finishedRenderingScope.release();
-    didFinishOfflineRendering(renderedBuffer.releaseNonNull());
-}
-
-void BaseAudioContext::dispatchEvent(Event& event)
-{
-    EventTarget::dispatchEvent(event);
-    if (event.eventInterface() == OfflineAudioCompletionEventInterfaceType)
-        clearPendingActivity();
-}
-
 void BaseAudioContext::incrementActiveSourceCount()
 {
     ++m_activeSourceCount;
@@ -1015,18 +878,11 @@
     --m_activeSourceCount;
 }
 
-void BaseAudioContext::didSuspendRendering(size_t)
+void BaseAudioContext::postTask(Function<void()>&& task)
 {
-    setState(State::Suspended);
-}
-
-void BaseAudioContext::postTask(WTF::Function<void()>&& task)
-{
     ASSERT(isMainThread());
-    if (m_isStopScheduled)
-        return;
-
-    queueTaskKeepingObjectAlive(*this, TaskSource::MediaElement, WTFMove(task));
+    if (!m_isStopScheduled)
+        queueTaskKeepingObjectAlive(*this, TaskSource::MediaElement, WTFMove(task));
 }
 
 const SecurityOrigin* BaseAudioContext::origin() const
@@ -1104,7 +960,7 @@
 
     // If we're already rendering when the worklet becomes ready, we need to restart
     // rendering in order to switch to the audio worklet thread.
-    m_destinationNode->restartRendering();
+    destination().restartRendering();
 }
 
 #if !RELEASE_LOG_DISABLED

Modified: trunk/Source/WebCore/Modules/webaudio/BaseAudioContext.h (277552 => 277553)


--- trunk/Source/WebCore/Modules/webaudio/BaseAudioContext.h	2021-05-15 23:55:58 UTC (rev 277552)
+++ trunk/Source/WebCore/Modules/webaudio/BaseAudioContext.h	2021-05-16 01:05:18 UTC (rev 277553)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2010 Google Inc. All rights reserved.
- * Copyright (C) 2016-2020 Apple Inc. All rights reserved.
+ * Copyright (C) 2016-2021 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -27,9 +27,6 @@
 
 #if ENABLE(WEB_AUDIO)
 #include "ActiveDOMObject.h"
-#include "AsyncAudioDecoder.h"
-#include "AudioBus.h"
-#include "AudioContextOptions.h"
 #include "AudioContextState.h"
 #include "AudioDestinationNode.h"
 #include "EventTarget.h"
@@ -36,28 +33,22 @@
 #include "JSDOMPromiseDeferred.h"
 #include "OscillatorType.h"
 #include "PeriodicWaveConstraints.h"
-#include "ScriptExecutionContext.h"
-#include <_javascript_Core/ConsoleTypes.h>
-#include <_javascript_Core/Float32Array.h>
 #include <atomic>
-#include <wtf/HashSet.h>
+#include <wtf/Forward.h>
 #include <wtf/LoggerHelper.h>
 #include <wtf/MainThread.h>
-#include <wtf/RefPtr.h>
 #include <wtf/ThreadSafeRefCounted.h>
 #include <wtf/Threading.h>
-#include <wtf/UniqueRef.h>
-#include <wtf/Vector.h>
-#include <wtf/WeakPtr.h>
-#include <wtf/text/AtomStringHash.h>
 
 namespace WebCore {
 
 class AnalyserNode;
+class AsyncAudioDecoder;
 class AudioBuffer;
 class AudioBufferCallback;
 class AudioBufferSourceNode;
 class AudioListener;
+class AudioNodeOutput;
 class AudioSummingJunction;
 class AudioWorklet;
 class BiquadFilterNode;
@@ -69,21 +60,17 @@
 class Document;
 class DynamicsCompressorNode;
 class GainNode;
-class HTMLMediaElement;
 class IIRFilterNode;
 class MediaElementAudioSourceNode;
-class MediaStream;
-class MediaStreamAudioDestinationNode;
-class MediaStreamAudioSourceNode;
 class OscillatorNode;
 class PannerNode;
 class PeriodicWave;
-class ScriptExecutionContext;
 class ScriptProcessorNode;
 class SecurityOrigin;
 class StereoPannerNode;
 class WaveShaperNode;
 
+struct AudioIOPosition;
 struct AudioParamDescriptor;
 
 template<typename IDLType> class DOMPromiseDeferred;
@@ -112,34 +99,25 @@
     uint64_t contextID() const { return m_contextID; }
 
     Document* document() const;
-    bool isInitialized() const;
+    bool isInitialized() const { return m_isInitialized; }
     
-    bool isOfflineContext() const { return m_isOfflineContext; }
+    virtual bool isOfflineContext() const = 0;
+    virtual AudioDestinationNode& destination() = 0;
+    virtual const AudioDestinationNode& destination() const = 0;
 
-    AudioDestinationNode& destination() { return m_destinationNode.get(); }
-    const AudioDestinationNode& destination() const { return m_destinationNode.get(); }
-
-    size_t currentSampleFrame() const { return m_destinationNode->currentSampleFrame(); }
-    double currentTime() const { return m_destinationNode->currentTime(); }
-    float sampleRate() const;
+    size_t currentSampleFrame() const { return destination().currentSampleFrame(); }
+    double currentTime() const { return destination().currentTime(); }
+    float sampleRate() const { return destination().sampleRate(); }
     unsigned long activeSourceCount() const { return static_cast<unsigned long>(m_activeSourceCount); }
 
     void incrementActiveSourceCount();
     void decrementActiveSourceCount();
 
-    virtual bool shouldSuspend() { return false; }
-    
-    ExceptionOr<Ref<AudioBuffer>> createBuffer(unsigned numberOfChannels, unsigned length, float sampleRate);
-
     // Asynchronous audio file data decoding.
     void decodeAudioData(Ref<ArrayBuffer>&&, RefPtr<AudioBufferCallback>&&, RefPtr<AudioBufferCallback>&&, Optional<Ref<DeferredPromise>>&& = WTF::nullopt);
 
-    AudioListener& listener();
+    AudioListener& listener() { return m_listener; }
 
-    virtual void didSuspendRendering(size_t frame);
-
-    AudioBuffer* renderTarget() const { return m_renderTarget.get(); }
-
     using State = AudioContextState;
     State state() const { return m_state; }
     bool isClosed() const { return m_state == State::Closed; }
@@ -166,6 +144,7 @@
     ExceptionOr<Ref<ConstantSourceNode>> createConstantSource();
     ExceptionOr<Ref<StereoPannerNode>> createStereoPanner();
     ExceptionOr<Ref<IIRFilterNode>> createIIRFilter(ScriptExecutionContext&, Vector<double>&& feedforward, Vector<double>&& feedback);
+    ExceptionOr<Ref<AudioBuffer>> createBuffer(unsigned numberOfChannels, unsigned length, float sampleRate);
 
     // Called at the start of each render quantum.
     void handlePreRenderTasks(const AudioIOPosition& outputPosition);
@@ -175,9 +154,6 @@
     // Called at the end of each render quantum.
     void handlePostRenderTasks();
 
-    // Called periodically at the end of each render quantum to dereference finished source nodes.
-    void derefFinishedSourceNodes();
-
     // We schedule deletion of all marked nodes at the end of each realtime render quantum.
     void markForDeletion(AudioNode&);
     void deleteMarkedNodes();
@@ -194,7 +170,7 @@
     void incrementConnectionCount()
     {
         ASSERT(isMainThread());
-        m_connectionCount++;
+        ++m_connectionCount;
     }
 
     unsigned connectionCount() const { return m_connectionCount; }
@@ -204,11 +180,10 @@
     //
     
     void setAudioThread(Thread& thread) { m_audioThread = &thread; } // FIXME: check either not initialized or the same
-    Thread* audioThread() const { return m_audioThread; }
-    bool isAudioThread() const;
+    bool isAudioThread() const { return m_audioThread == &Thread::current(); }
 
     // Returns true only after the audio thread has been started and then shutdown.
-    bool isAudioThreadFinished() { return m_isAudioThreadFinished; }
+    bool isAudioThreadFinished() const { return m_isAudioThreadFinished; }
 
     // mustReleaseLock is set to true if we acquired the lock in this method call and caller must unlock(), false if it was previously acquired.
     void lock(bool& mustReleaseLock);
@@ -220,17 +195,15 @@
     void unlock();
 
     // Returns true if this thread owns the context's lock.
-    bool isGraphOwner() const;
+    bool isGraphOwner() const { return m_graphOwnerThread == &Thread::current(); }
 
-    // Returns the maximum number of channels we can support.
-    static unsigned maxNumberOfChannels() { return MaxNumberOfChannels; }
+    // This is considering 32 is large enough for multiple channels audio.
+    // It is somewhat arbitrary and could be increased if necessary.
+    static constexpr unsigned maxNumberOfChannels = 32;
     
     // In AudioNode::decrementConnectionCount() a tryLock() is used for calling decrementConnectionCountWithLock(), but if it fails keep track here.
     void addDeferredDecrementConnectionCount(AudioNode*);
 
-    // In the audio thread at the start of each render cycle, we'll call handleDeferredDecrementConnectionCounts().
-    void handleDeferredDecrementConnectionCounts();
-
     // Only accessed when the graph lock is held.
     void markSummingJunctionDirty(AudioSummingJunction*);
     void markAudioNodeOutputDirty(AudioNodeOutput*);
@@ -239,15 +212,8 @@
     void removeMarkedSummingJunction(AudioSummingJunction*);
 
     // EventTarget
-    EventTargetInterface eventTargetInterface() const final;
     ScriptExecutionContext* scriptExecutionContext() const final;
-    void refEventTarget() override { ref(); }
-    void derefEventTarget() override { deref(); }
 
-    void finishedRendering(bool didRendering);
-
-    static unsigned s_hardwareContextCount;
-
     void isPlayingAudioDidChange();
 
     virtual void sourceNodeWillBeginPlayback(AudioNode&);
@@ -262,7 +228,7 @@
     const void* nextAudioParameterLogIdentifier() { return childLogIdentifier(m_logIdentifier, ++m_nextAudioParameterIdentifier); }
 #endif
 
-    void postTask(WTF::Function<void()>&&);
+    void postTask(Function<void()>&&);
     bool isStopped() const { return m_isStopScheduled; }
     const SecurityOrigin* origin() const;
     void addConsoleMessage(MessageSource, MessageLevel, const String& message);
@@ -296,8 +262,7 @@
     const HashMap<String, Vector<AudioParamDescriptor>>& parameterDescriptorMap() const { return m_parameterDescriptorMap; }
 
 protected:
-    explicit BaseAudioContext(Document&, const AudioContextOptions& = { });
-    BaseAudioContext(Document&, unsigned numberOfChannels, float sampleRate, RefPtr<AudioBuffer>&& renderTarget);
+    explicit BaseAudioContext(Document&);
     
     void clearPendingActivity();
     void setPendingActivity();
@@ -313,7 +278,6 @@
     void addReaction(State, DOMPromiseDeferred<void>&&);
     void setState(State);
 
-    virtual void didFinishOfflineRendering(ExceptionOr<Ref<AudioBuffer>>&&) { }
     void clear();
 
 private:
@@ -327,12 +291,19 @@
     void refSourceNode(AudioNode&);
     void derefSourceNode(AudioNode&);
 
+    // Called periodically at the end of each render quantum to dereference finished source nodes.
+    void derefFinishedSourceNodes();
+
+    // In the audio thread at the start of each render cycle, we'll call handleDeferredDecrementConnectionCounts().
+    void handleDeferredDecrementConnectionCounts();
+
     // EventTarget
-    void dispatchEvent(Event&) final;
+    EventTargetInterface eventTargetInterface() const final;
+    void refEventTarget() override { ref(); }
+    void derefEventTarget() override { deref(); }
 
     // ActiveDOMObject API.
     void stop() override;
-    const char* activeDOMObjectName() const override;
 
     // When the context goes away, there might still be some sources which haven't finished playing.
     // Make sure to dereference them here.
@@ -366,13 +337,6 @@
     // They will be scheduled for deletion (on the main thread) at the end of a render cycle (in realtime thread).
     Vector<AudioNode*> m_nodesToDelete;
 
-    bool m_isDeletionScheduled { false };
-    bool m_isStopScheduled { false };
-    bool m_isInitialized { false };
-    bool m_isAudioThreadFinished { false };
-    bool m_automaticPullNodesNeedUpdating { false };
-    bool m_isOfflineContext { false };
-
     // Only accessed when the graph lock is held.
     HashSet<AudioSummingJunction*> m_dirtySummingJunctions;
     HashSet<AudioNodeOutput*> m_dirtyAudioNodeOutputs;
@@ -385,35 +349,18 @@
     Vector<AudioNode*> m_deferredBreakConnectionList;
     Vector<Vector<DOMPromiseDeferred<void>>> m_stateReactions;
 
-    RefPtr<AudioBuffer> m_renderTarget;
-    UniqueRef<AudioDestinationNode> m_destinationNode;
     Ref<AudioListener> m_listener;
 
-    unsigned m_connectionCount { 0 };
+    std::atomic<Thread*> m_audioThread;
+    std::atomic<Thread*> m_graphOwnerThread; // if the lock is held then this is the thread which owns it, otherwise == nullptr.
 
-    // Graph locking.
-    Lock m_contextGraphMutex;
-    // FIXME: Using volatile seems incorrect.
-    // https://bugs.webkit.org/show_bug.cgi?id=180332
-    Thread* volatile m_audioThread { nullptr };
-    Thread* volatile m_graphOwnerThread { nullptr }; // if the lock is held then this is the thread which owns it, otherwise == nullptr.
-
     std::unique_ptr<AsyncAudioDecoder> m_audioDecoder;
 
-    // This is considering 32 is large enough for multiple channels audio. 
-    // It is somewhat arbitrary and could be increased if necessary.
-    enum { MaxNumberOfChannels = 32 };
-
-    // Number of AudioBufferSourceNodes that are active (playing).
-    std::atomic<int> m_activeSourceCount { 0 };
-
-    State m_state { State::Suspended };
     RefPtr<PendingActivity<BaseAudioContext>> m_pendingActivity;
 
     AudioIOPosition m_outputPosition;
 
     HashMap<String, Vector<AudioParamDescriptor>> m_parameterDescriptorMap;
-    bool m_hasFinishedAudioSourceNodes { false };
 
     // These are cached per audio context for performance reasons. They cannot be
     // static because they rely on the sample rate.
@@ -421,6 +368,19 @@
     RefPtr<PeriodicWave> m_cachedPeriodicWaveSquare;
     RefPtr<PeriodicWave> m_cachedPeriodicWaveSawtooth;
     RefPtr<PeriodicWave> m_cachedPeriodicWaveTriangle;
+
+    // Number of AudioBufferSourceNodes that are active (playing).
+    std::atomic<int> m_activeSourceCount;
+
+    unsigned m_connectionCount { 0 };
+    State m_state { State::Suspended };
+    Lock m_contextGraphLock;
+    bool m_isDeletionScheduled { false };
+    bool m_isStopScheduled { false };
+    bool m_isInitialized { false };
+    bool m_isAudioThreadFinished { false };
+    bool m_automaticPullNodesNeedUpdating { false };
+    bool m_hasFinishedAudioSourceNodes { false };
 };
 
 } // WebCore

Modified: trunk/Source/WebCore/Modules/webaudio/ChannelMergerNode.cpp (277552 => 277553)


--- trunk/Source/WebCore/Modules/webaudio/ChannelMergerNode.cpp	2021-05-15 23:55:58 UTC (rev 277552)
+++ trunk/Source/WebCore/Modules/webaudio/ChannelMergerNode.cpp	2021-05-16 01:05:18 UTC (rev 277553)
@@ -43,7 +43,7 @@
 
 ExceptionOr<Ref<ChannelMergerNode>> ChannelMergerNode::create(BaseAudioContext& context, const ChannelMergerOptions& options)
 {
-    if (options.numberOfInputs > AudioContext::maxNumberOfChannels() || !options.numberOfInputs)
+    if (options.numberOfInputs > AudioContext::maxNumberOfChannels || !options.numberOfInputs)
         return Exception { IndexSizeError, "Number of inputs is not in the allowed range."_s };
     
     auto merger = adoptRef(*new ChannelMergerNode(context, options.numberOfInputs));

Modified: trunk/Source/WebCore/Modules/webaudio/ChannelSplitterNode.cpp (277552 => 277553)


--- trunk/Source/WebCore/Modules/webaudio/ChannelSplitterNode.cpp	2021-05-15 23:55:58 UTC (rev 277552)
+++ trunk/Source/WebCore/Modules/webaudio/ChannelSplitterNode.cpp	2021-05-16 01:05:18 UTC (rev 277553)
@@ -39,7 +39,7 @@
 
 ExceptionOr<Ref<ChannelSplitterNode>> ChannelSplitterNode::create(BaseAudioContext& context, const ChannelSplitterOptions& options)
 {
-    if (options.numberOfOutputs > AudioContext::maxNumberOfChannels() || !options.numberOfOutputs)
+    if (options.numberOfOutputs > AudioContext::maxNumberOfChannels || !options.numberOfOutputs)
         return Exception { IndexSizeError, "Number of outputs is not in the allowed range"_s };
     
     auto splitter = adoptRef(*new ChannelSplitterNode(context, options.numberOfOutputs));

Modified: trunk/Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.cpp (277552 => 277553)


--- trunk/Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.cpp	2021-05-15 23:55:58 UTC (rev 277552)
+++ trunk/Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.cpp	2021-05-16 01:05:18 UTC (rev 277553)
@@ -46,7 +46,7 @@
 
 WTF_MAKE_ISO_ALLOCATED_IMPL(DefaultAudioDestinationNode);
 
-DefaultAudioDestinationNode::DefaultAudioDestinationNode(BaseAudioContext& context, Optional<float> sampleRate)
+DefaultAudioDestinationNode::DefaultAudioDestinationNode(AudioContext& context, Optional<float> sampleRate)
     : AudioDestinationNode(context, sampleRate.valueOr(AudioDestination::hardwareSampleRate()))
 {
     ASSERT(BaseAudioContext::isSupportedSampleRate(AudioDestination::hardwareSampleRate()));
@@ -58,6 +58,16 @@
     uninitialize();
 }
 
+AudioContext& DefaultAudioDestinationNode::context()
+{
+    return downcast<AudioContext>(AudioDestinationNode::context());
+}
+
+const AudioContext& DefaultAudioDestinationNode::context() const
+{
+    return downcast<AudioContext>(AudioDestinationNode::context());
+}
+
 void DefaultAudioDestinationNode::initialize()
 {
     ASSERT(isMainThread()); 

Modified: trunk/Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.h (277552 => 277553)


--- trunk/Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.h	2021-05-15 23:55:58 UTC (rev 277552)
+++ trunk/Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.h	2021-05-16 01:05:18 UTC (rev 277553)
@@ -28,12 +28,16 @@
 
 namespace WebCore {
 
+class AudioContext;
 class AudioDestination;
     
 class DefaultAudioDestinationNode final : public AudioDestinationNode {
     WTF_MAKE_ISO_ALLOCATED(DefaultAudioDestinationNode);
 public:
-    explicit DefaultAudioDestinationNode(BaseAudioContext&, Optional<float> = WTF::nullopt);
+    explicit DefaultAudioDestinationNode(AudioContext&, Optional<float> = WTF::nullopt);
+
+    AudioContext& context();
+    const AudioContext& context() const;
     
     virtual ~DefaultAudioDestinationNode();
 

Modified: trunk/Source/WebCore/Modules/webaudio/MediaElementAudioSourceNode.cpp (277552 => 277553)


--- trunk/Source/WebCore/Modules/webaudio/MediaElementAudioSourceNode.cpp	2021-05-15 23:55:58 UTC (rev 277552)
+++ trunk/Source/WebCore/Modules/webaudio/MediaElementAudioSourceNode.cpp	2021-05-16 01:05:18 UTC (rev 277553)
@@ -86,7 +86,7 @@
     m_muted = wouldTaintOrigin();
 
     if (numberOfChannels != m_sourceNumberOfChannels || sourceSampleRate != m_sourceSampleRate) {
-        if (!numberOfChannels || numberOfChannels > AudioContext::maxNumberOfChannels() || sourceSampleRate < minSampleRate || sourceSampleRate > maxSampleRate) {
+        if (!numberOfChannels || numberOfChannels > AudioContext::maxNumberOfChannels || sourceSampleRate < minSampleRate || sourceSampleRate > maxSampleRate) {
             // process() will generate silence for these uninitialized values.
             LOG(Media, "MediaElementAudioSourceNode::setFormat(%u, %f) - unhandled format change", static_cast<unsigned>(numberOfChannels), sourceSampleRate);
             m_sourceNumberOfChannels = 0;

Modified: trunk/Source/WebCore/Modules/webaudio/MediaStreamAudioSourceNode.cpp (277552 => 277553)


--- trunk/Source/WebCore/Modules/webaudio/MediaStreamAudioSourceNode.cpp	2021-05-15 23:55:58 UTC (rev 277552)
+++ trunk/Source/WebCore/Modules/webaudio/MediaStreamAudioSourceNode.cpp	2021-05-16 01:05:18 UTC (rev 277553)
@@ -94,7 +94,7 @@
         return;
 
     // The sample-rate must be equal to the context's sample-rate.
-    if (!numberOfChannels || numberOfChannels > AudioContext::maxNumberOfChannels()) {
+    if (!numberOfChannels || numberOfChannels > AudioContext::maxNumberOfChannels) {
         // process() will generate silence for these uninitialized values.
         LOG(Media, "MediaStreamAudioSourceNode::setFormat(%u, %f) - unhandled format change", static_cast<unsigned>(numberOfChannels), sourceSampleRate);
         m_sourceNumberOfChannels = 0;

Modified: trunk/Source/WebCore/Modules/webaudio/OfflineAudioContext.cpp (277552 => 277553)


--- trunk/Source/WebCore/Modules/webaudio/OfflineAudioContext.cpp	2021-05-15 23:55:58 UTC (rev 277552)
+++ trunk/Source/WebCore/Modules/webaudio/OfflineAudioContext.cpp	2021-05-16 01:05:18 UTC (rev 277553)
@@ -1,5 +1,6 @@
 /*
  * Copyright (C) 2012, Google Inc. All rights reserved.
+ * Copyright (C) 2020-2021, Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -32,6 +33,7 @@
 #include "AudioUtilities.h"
 #include "Document.h"
 #include "JSAudioBuffer.h"
+#include "OfflineAudioContextOptions.h"
 #include <wtf/IsoMallocInlines.h>
 #include <wtf/Scope.h>
 
@@ -39,53 +41,60 @@
 
 WTF_MAKE_ISO_ALLOCATED_IMPL(OfflineAudioContext);
 
-inline OfflineAudioContext::OfflineAudioContext(Document& document, unsigned numberOfChannels, unsigned length, float sampleRate, RefPtr<AudioBuffer>&& renderTarget)
-    : BaseAudioContext(document, numberOfChannels, sampleRate, WTFMove(renderTarget))
-    , m_length(length)
+OfflineAudioContext::OfflineAudioContext(Document& document, const OfflineAudioContextOptions& options)
+    : BaseAudioContext(document)
+    , m_destinationNode(makeUniqueRef<OfflineAudioDestinationNode>(*this, options.numberOfChannels, options.sampleRate, AudioBuffer::create(options.numberOfChannels, options.length, options.sampleRate)))
+    , m_length(options.length)
 {
+    if (!renderTarget())
+        document.addConsoleMessage(MessageSource::JS, MessageLevel::Warning, makeString("Failed to construct internal AudioBuffer with ", options.numberOfChannels, " channel(s), a sample rate of ", options.sampleRate, " and a length of ", options.length, "."));
 }
 
-ExceptionOr<Ref<OfflineAudioContext>> OfflineAudioContext::create(ScriptExecutionContext& context, unsigned numberOfChannels, unsigned length, float sampleRate)
+ExceptionOr<Ref<OfflineAudioContext>> OfflineAudioContext::create(ScriptExecutionContext& context, const OfflineAudioContextOptions& options)
 {
     if (!is<Document>(context))
         return Exception { NotSupportedError, "OfflineAudioContext is only supported in Document contexts"_s };
-    if (!numberOfChannels || numberOfChannels > maxNumberOfChannels())
+    if (!options.numberOfChannels || options.numberOfChannels > maxNumberOfChannels)
         return Exception { SyntaxError, "Number of channels is not in range"_s };
-    if (!length)
+    if (!options.length)
         return Exception { SyntaxError, "length cannot be 0"_s };
-    if (!isSupportedSampleRate(sampleRate))
+    if (!isSupportedSampleRate(options.sampleRate))
         return Exception { SyntaxError, "sampleRate is not in range"_s };
 
-    auto renderTarget = AudioBuffer::create(numberOfChannels, length, sampleRate);
-    if (!renderTarget)
-        context.addConsoleMessage(MessageSource::JS, MessageLevel::Warning, makeString("Failed to construct internal AudioBuffer with ", numberOfChannels, " channel(s), a sample rate of ", sampleRate, " and a length of ", length, "."));
-
-    auto audioContext = adoptRef(*new OfflineAudioContext(downcast<Document>(context), numberOfChannels, length, sampleRate, WTFMove(renderTarget)));
+    auto audioContext = adoptRef(*new OfflineAudioContext(downcast<Document>(context), options));
     audioContext->suspendIfNeeded();
     return audioContext;
 }
 
-ExceptionOr<Ref<OfflineAudioContext>> OfflineAudioContext::create(ScriptExecutionContext& context, const OfflineAudioContextOptions& contextOptions)
+ExceptionOr<Ref<OfflineAudioContext>> OfflineAudioContext::create(ScriptExecutionContext& context, unsigned numberOfChannels, unsigned length, float sampleRate)
 {
-    return create(context, contextOptions.numberOfChannels, contextOptions.length, contextOptions.sampleRate);
+    return create(context, { numberOfChannels, length, sampleRate });
 }
 
 void OfflineAudioContext::uninitialize()
 {
+    if (!isInitialized())
+        return;
+
     BaseAudioContext::uninitialize();
 
-    if (auto promise = std::exchange(m_pendingOfflineRenderingPromise, nullptr))
+    if (auto promise = std::exchange(m_pendingRenderingPromise, nullptr))
         promise->reject(Exception { InvalidStateError, "Context is going away"_s });
 }
 
-void OfflineAudioContext::startOfflineRendering(Ref<DeferredPromise>&& promise)
+const char* OfflineAudioContext::activeDOMObjectName() const
 {
+    return "OfflineAudioContext";
+}
+
+void OfflineAudioContext::startRendering(Ref<DeferredPromise>&& promise)
+{
     if (isStopped()) {
         promise->reject(Exception { InvalidStateError, "Context is stopped"_s });
         return;
     }
 
-    if (m_didStartOfflineRendering) {
+    if (m_didStartRendering) {
         promise->reject(Exception { InvalidStateError, "Rendering was already started"_s });
         return;
     }
@@ -104,13 +113,13 @@
         }
 
         setPendingActivity();
-        m_pendingOfflineRenderingPromise = WTFMove(promise);
-        m_didStartOfflineRendering = true;
+        m_pendingRenderingPromise = WTFMove(promise);
+        m_didStartRendering = true;
         setState(State::Running);
     });
 }
 
-void OfflineAudioContext::suspendOfflineRendering(double suspendTime, Ref<DeferredPromise>&& promise)
+void OfflineAudioContext::suspendRendering(double suspendTime, Ref<DeferredPromise>&& promise)
 {
     if (isStopped()) {
         promise->reject(Exception { InvalidStateError, "Context is stopped"_s });
@@ -143,9 +152,9 @@
     }
 }
 
-void OfflineAudioContext::resumeOfflineRendering(Ref<DeferredPromise>&& promise)
+void OfflineAudioContext::resumeRendering(Ref<DeferredPromise>&& promise)
 {
-    if (!m_didStartOfflineRendering) {
+    if (!m_didStartRendering) {
         promise->reject(Exception { InvalidStateError, "Cannot resume an offline audio context that has not started"_s });
         return;
     }
@@ -182,7 +191,7 @@
 
 void OfflineAudioContext::didSuspendRendering(size_t frame)
 {
-    BaseAudioContext::didSuspendRendering(frame);
+    setState(State::Suspended);
 
     clearPendingActivity();
 
@@ -196,17 +205,38 @@
         promise->resolve();
 }
 
-void OfflineAudioContext::didFinishOfflineRendering(ExceptionOr<Ref<AudioBuffer>>&& result)
+void OfflineAudioContext::finishedRendering(bool didRendering)
 {
-    auto finishedRenderingScope = WTF::makeScopeExit([this] {
+    ASSERT(isMainThread());
+    ALWAYS_LOG(LOGIDENTIFIER);
+
+    auto uninitializeOnExit = WTF::makeScopeExit([this] {
         uninitialize();
         clear();
     });
 
-    if (!m_pendingOfflineRenderingPromise)
+    setState(State::Closed);
+
+    // Avoid firing the event if the document has already gone away.
+    if (isStopped())
         return;
 
-    auto promise = std::exchange(m_pendingOfflineRenderingPromise, nullptr);
+    RefPtr<AudioBuffer> renderedBuffer = renderTarget();
+    ASSERT(renderedBuffer);
+
+    if (didRendering) {
+        queueTaskToDispatchEvent(*this, TaskSource::MediaElement, OfflineAudioCompletionEvent::create(*renderedBuffer));
+        settleRenderingPromise(renderedBuffer.releaseNonNull());
+    } else
+        settleRenderingPromise(Exception { InvalidStateError, "Offline rendering failed"_s });
+}
+
+void OfflineAudioContext::settleRenderingPromise(ExceptionOr<Ref<AudioBuffer>>&& result)
+{
+    auto promise = std::exchange(m_pendingRenderingPromise, nullptr);
+    if (!promise)
+        return;
+
     if (result.hasException()) {
         promise->reject(result.releaseException());
         return;
@@ -220,6 +250,13 @@
     lockInternal(mustReleaseLock);
 }
 
+void OfflineAudioContext::dispatchEvent(Event& event)
+{
+    BaseAudioContext::dispatchEvent(event);
+    if (event.eventInterface() == OfflineAudioCompletionEventInterfaceType)
+        clearPendingActivity();
+}
+
 } // namespace WebCore
 
 #endif // ENABLE(WEB_AUDIO)

Modified: trunk/Source/WebCore/Modules/webaudio/OfflineAudioContext.h (277552 => 277553)


--- trunk/Source/WebCore/Modules/webaudio/OfflineAudioContext.h	2021-05-15 23:55:58 UTC (rev 277552)
+++ trunk/Source/WebCore/Modules/webaudio/OfflineAudioContext.h	2021-05-16 01:05:18 UTC (rev 277553)
@@ -1,5 +1,6 @@
 /*
  * Copyright (C) 2012, Google Inc. All rights reserved.
+ * Copyright (C) 2020-2021, Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -26,7 +27,6 @@
 
 #include "BaseAudioContext.h"
 #include "JSDOMPromiseDeferred.h"
-#include "OfflineAudioContextOptions.h"
 #include "OfflineAudioDestinationNode.h"
 #include <wtf/HashMap.h>
 #include <wtf/Lock.h>
@@ -33,23 +33,30 @@
 
 namespace WebCore {
 
+struct OfflineAudioContextOptions;
+
 class OfflineAudioContext final : public BaseAudioContext {
     WTF_MAKE_ISO_ALLOCATED(OfflineAudioContext);
 public:
+    static ExceptionOr<Ref<OfflineAudioContext>> create(ScriptExecutionContext&, const OfflineAudioContextOptions&);
     static ExceptionOr<Ref<OfflineAudioContext>> create(ScriptExecutionContext&, unsigned numberOfChannels, unsigned length, float sampleRate);
-    
-    static ExceptionOr<Ref<OfflineAudioContext>> create(ScriptExecutionContext&, const OfflineAudioContextOptions&);
+    void startRendering(Ref<DeferredPromise>&&);
+    void suspendRendering(double suspendTime, Ref<DeferredPromise>&&);
+    void resumeRendering(Ref<DeferredPromise>&&);
+    void finishedRendering(bool didRendering);
+    void didSuspendRendering(size_t frame);
 
-    void startOfflineRendering(Ref<DeferredPromise>&&);
-    void suspendOfflineRendering(double suspendTime, Ref<DeferredPromise>&&);
-    void resumeOfflineRendering(Ref<DeferredPromise>&&);
-
     unsigned length() const { return m_length; }
-    bool shouldSuspend() final;
+    bool shouldSuspend();
 
-    OfflineAudioDestinationNode& destination() { return static_cast<OfflineAudioDestinationNode&>(BaseAudioContext::destination()); }
-    const OfflineAudioDestinationNode& destination() const { return static_cast<const OfflineAudioDestinationNode&>(BaseAudioContext::destination()); }
+    OfflineAudioDestinationNode& destination() final { return m_destinationNode.get(); }
+    const OfflineAudioDestinationNode& destination() const final { return m_destinationNode.get(); }
 
+private:
+    OfflineAudioContext(Document&, const OfflineAudioContextOptions&);
+
+    AudioBuffer* renderTarget() const { return destination().renderTarget(); }
+
     // mustReleaseLock is set to true if we acquired the lock in this method call and caller must unlock(), false if it was previously acquired.
     void offlineLock(bool& mustReleaseLock);
 
@@ -72,17 +79,25 @@
         bool m_mustReleaseLock;
     };
 
-private:
-    OfflineAudioContext(Document&, unsigned numberOfChannels, unsigned length, float sampleRate, RefPtr<AudioBuffer>&& renderTarget);
+    // ActiveDOMObject
+    const char* activeDOMObjectName() const final;
 
-    void didFinishOfflineRendering(ExceptionOr<Ref<AudioBuffer>>&&) final;
-    void didSuspendRendering(size_t frame) final;
+    // EventTarget
+    void dispatchEvent(Event&) final;
+
+    void settleRenderingPromise(ExceptionOr<Ref<AudioBuffer>>&&);
     void uninitialize() final;
+    bool isOfflineContext() const final { return true; }
 
-    RefPtr<DeferredPromise> m_pendingOfflineRenderingPromise;
+    UniqueRef<OfflineAudioDestinationNode> m_destinationNode;
+    RefPtr<DeferredPromise> m_pendingRenderingPromise;
     HashMap<unsigned /* frame */, RefPtr<DeferredPromise>, WTF::IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> m_suspendRequests;
     unsigned m_length;
-    bool m_didStartOfflineRendering { false };
+    bool m_didStartRendering { false };
 };
 
 } // namespace WebCore
+
+SPECIALIZE_TYPE_TRAITS_BEGIN(WebCore::OfflineAudioContext)
+    static bool isType(const WebCore::BaseAudioContext& context) { return context.isOfflineContext(); }
+SPECIALIZE_TYPE_TRAITS_END()

Modified: trunk/Source/WebCore/Modules/webaudio/OfflineAudioContext.idl (277552 => 277553)


--- trunk/Source/WebCore/Modules/webaudio/OfflineAudioContext.idl	2021-05-15 23:55:58 UTC (rev 277552)
+++ trunk/Source/WebCore/Modules/webaudio/OfflineAudioContext.idl	2021-05-16 01:05:18 UTC (rev 277553)
@@ -31,10 +31,10 @@
     [CallWith=ScriptExecutionContext] constructor(OfflineAudioContextOptions contextOptions);
     [CallWith=ScriptExecutionContext] constructor(unsigned long numberOfChannels, unsigned long length, float sampleRate);
     
-    [ImplementedAs=startOfflineRendering] Promise<AudioBuffer> startRendering();
+    Promise<AudioBuffer> startRendering();
 
-    [ImplementedAs=resumeOfflineRendering] Promise<undefined> resume();
-    [ImplementedAs=suspendOfflineRendering] Promise<undefined> suspend(double suspendTime);
+    [ImplementedAs=resumeRendering] Promise<undefined> resume();
+    [ImplementedAs=suspendRendering] Promise<undefined> suspend(double suspendTime);
 
     readonly attribute unsigned long length;
 

Modified: trunk/Source/WebCore/Modules/webaudio/OfflineAudioDestinationNode.cpp (277552 => 277553)


--- trunk/Source/WebCore/Modules/webaudio/OfflineAudioDestinationNode.cpp	2021-05-15 23:55:58 UTC (rev 277552)
+++ trunk/Source/WebCore/Modules/webaudio/OfflineAudioDestinationNode.cpp	2021-05-16 01:05:18 UTC (rev 277553)
@@ -34,6 +34,7 @@
 #include "AudioWorklet.h"
 #include "AudioWorkletMessagingProxy.h"
 #include "HRTFDatabaseLoader.h"
+#include "OfflineAudioContext.h"
 #include "WorkerRunLoop.h"
 #include <algorithm>
 #include <wtf/IsoMallocInlines.h>
@@ -44,7 +45,7 @@
 
 WTF_MAKE_ISO_ALLOCATED_IMPL(OfflineAudioDestinationNode);
 
-OfflineAudioDestinationNode::OfflineAudioDestinationNode(BaseAudioContext& context, unsigned numberOfChannels, float sampleRate, RefPtr<AudioBuffer>&& renderTarget)
+OfflineAudioDestinationNode::OfflineAudioDestinationNode(OfflineAudioContext& context, unsigned numberOfChannels, float sampleRate, RefPtr<AudioBuffer>&& renderTarget)
     : AudioDestinationNode(context, sampleRate)
     , m_numberOfChannels(numberOfChannels)
     , m_renderTarget(WTFMove(renderTarget))
@@ -59,6 +60,16 @@
     uninitialize();
 }
 
+OfflineAudioContext& OfflineAudioDestinationNode::context()
+{
+    return downcast<OfflineAudioContext>(AudioDestinationNode::context());
+}
+
+const OfflineAudioContext& OfflineAudioDestinationNode::context() const
+{
+    return downcast<OfflineAudioContext>(AudioDestinationNode::context());
+}
+
 unsigned OfflineAudioDestinationNode::maxChannelCount() const
 {
     return m_numberOfChannels;

Modified: trunk/Source/WebCore/Modules/webaudio/OfflineAudioDestinationNode.h (277552 => 277553)


--- trunk/Source/WebCore/Modules/webaudio/OfflineAudioDestinationNode.h	2021-05-15 23:55:58 UTC (rev 277552)
+++ trunk/Source/WebCore/Modules/webaudio/OfflineAudioDestinationNode.h	2021-05-16 01:05:18 UTC (rev 277553)
@@ -33,13 +33,19 @@
 
 class AudioBus;
 class AudioContext;
+class OfflineAudioContext;
     
 class OfflineAudioDestinationNode final : public AudioDestinationNode {
     WTF_MAKE_ISO_ALLOCATED(OfflineAudioDestinationNode);
 public:
-    OfflineAudioDestinationNode(BaseAudioContext&, unsigned numberOfChannels, float sampleRate, RefPtr<AudioBuffer>&& renderTarget);
+    OfflineAudioDestinationNode(OfflineAudioContext&, unsigned numberOfChannels, float sampleRate, RefPtr<AudioBuffer>&& renderTarget);
 
     virtual ~OfflineAudioDestinationNode();
+
+    OfflineAudioContext& context();
+    const OfflineAudioContext& context() const;
+
+    AudioBuffer* renderTarget() const { return m_renderTarget.get(); }
     
     // AudioNode   
     void initialize() override;

Modified: trunk/Source/WebCore/Modules/webaudio/ScriptProcessorNode.cpp (277552 => 277553)


--- trunk/Source/WebCore/Modules/webaudio/ScriptProcessorNode.cpp	2021-05-15 23:55:58 UTC (rev 277552)
+++ trunk/Source/WebCore/Modules/webaudio/ScriptProcessorNode.cpp	2021-05-16 01:05:18 UTC (rev 277553)
@@ -62,7 +62,7 @@
     if (m_bufferSize < AudioUtilities::renderQuantumSize)
         m_bufferSize = AudioUtilities::renderQuantumSize;
 
-    ASSERT(numberOfInputChannels <= AudioContext::maxNumberOfChannels());
+    ASSERT(numberOfInputChannels <= AudioContext::maxNumberOfChannels);
 
     initializeDefaultNodeOptions(numberOfInputChannels, ChannelCountMode::Explicit, ChannelInterpretation::Speakers);
     addInput();
_______________________________________________
webkit-changes mailing list
[email protected]
https://lists.webkit.org/mailman/listinfo/webkit-changes

Reply via email to