Diff
Modified: trunk/Source/WebCore/ChangeLog (268564 => 268565)
--- trunk/Source/WebCore/ChangeLog 2020-10-16 00:28:16 UTC (rev 268564)
+++ trunk/Source/WebCore/ChangeLog 2020-10-16 00:34:30 UTC (rev 268565)
@@ -1,3 +1,37 @@
+2020-10-15 Chris Dumez <[email protected]>
+
+ Move AudioContext-specific logic out of BaseAudioContext
+ https://bugs.webkit.org/show_bug.cgi?id=217794
+
+ Reviewed by Geoffrey Garen.
+
+ Move AudioContext-specific logic out of BaseAudioContext and into the
+ AudioContext class. This required having WebKitAudioContext subclass
+ AudioContext instead of BaseAudioContext.
+
+ No new tests, no Web-facing behavior change.
+
+ * Modules/webaudio/AudioContext.cpp:
+ (WebCore::AudioContext::AudioContext):
+ (WebCore::AudioContext::suspendRendering):
+ (WebCore::AudioContext::resumeRendering):
+ (WebCore::AudioContext::nodeWillBeginPlayback):
+ (WebCore::AudioContext::startRendering):
+ (WebCore::AudioContext::lazyInitialize):
+ (WebCore::AudioContext::willPausePlayback):
+ * Modules/webaudio/AudioContext.h:
+ (WebCore::AudioContext::AudioContext):
+ * Modules/webaudio/BaseAudioContext.cpp:
+ (WebCore::BaseAudioContext::lazyInitialize):
+ * Modules/webaudio/BaseAudioContext.h:
+ (WebCore::BaseAudioContext::nodeWillBeginPlayback):
+ (WebCore::BaseAudioContext::mediaSession const):
+ * Modules/webaudio/DefaultAudioDestinationNode.h:
+ * Modules/webaudio/WebKitAudioContext.cpp:
+ (WebCore::WebKitAudioContext::WebKitAudioContext):
+ * Modules/webaudio/WebKitAudioContext.h:
+ (WebCore::WebKitAudioContext::listener):
+
2020-10-15 Sam Weinig <[email protected]>
CSSStyleDeclaration breaks JS spec (properties not showing up in Object.getOwnPropertyNames)
Modified: trunk/Source/WebCore/Modules/webaudio/AudioContext.cpp (268564 => 268565)
--- trunk/Source/WebCore/Modules/webaudio/AudioContext.cpp 2020-10-16 00:28:16 UTC (rev 268564)
+++ trunk/Source/WebCore/Modules/webaudio/AudioContext.cpp 2020-10-16 00:34:30 UTC (rev 268565)
@@ -30,8 +30,8 @@
#include "AudioContext.h"
#include "AudioTimestamp.h"
#include "DOMWindow.h"
-#include "DefaultAudioDestinationNode.h"
#include "JSDOMPromiseDeferred.h"
+#include "Page.h"
#include "Performance.h"
#include <wtf/IsoMallocInlines.h>
@@ -99,6 +99,12 @@
{
}
+// Only needed for WebKitOfflineAudioContext.
+AudioContext::AudioContext(Document& document, unsigned numberOfChannels, RefPtr<AudioBuffer>&& renderTarget)
+ : BaseAudioContext(document, numberOfChannels, WTFMove(renderTarget))
+{
+}
+
double AudioContext::baseLatency()
{
lazyInitialize();
@@ -153,6 +159,125 @@
return static_cast<DefaultAudioDestinationNode*>(BaseAudioContext::destination());
}
+void AudioContext::suspendRendering(DOMPromiseDeferred<void>&& promise)
+{
+ if (isOfflineContext() || isStopped()) {
+ promise.reject(InvalidStateError);
+ return;
+ }
+
+ if (state() == State::Closed || state() == State::Interrupted || !destinationNode()) {
+ promise.reject();
+ return;
+ }
+
+ addReaction(State::Suspended, WTFMove(promise));
+ m_wasSuspendedByScript = true;
+
+ if (!willPausePlayback())
+ return;
+
+ lazyInitialize();
+
+ destinationNode()->suspend([this, protectedThis = makeRef(*this)] {
+ setState(State::Suspended);
+ });
+}
+
+void AudioContext::resumeRendering(DOMPromiseDeferred<void>&& promise)
+{
+ if (isOfflineContext() || isStopped()) {
+ promise.reject(InvalidStateError);
+ return;
+ }
+
+ if (state() == State::Closed || !destinationNode()) {
+ promise.reject();
+ return;
+ }
+
+ addReaction(State::Running, WTFMove(promise));
+ m_wasSuspendedByScript = false;
+
+ if (!willBeginPlayback())
+ return;
+
+ lazyInitialize();
+
+ destinationNode()->resume([this, protectedThis = makeRef(*this)] {
+ setState(State::Running);
+ });
+}
+
+void AudioContext::nodeWillBeginPlayback()
+{
+ // Called by scheduled AudioNodes when clients schedule their start times.
+ // Prior to the introduction of suspend(), resume(), and stop(), starting
+ // a scheduled AudioNode would remove the user-gesture restriction, if present,
+ // and would thus unmute the context. Now that AudioContext stays in the
+ // "suspended" state if a user-gesture restriction is present, starting a
+ // schedule AudioNode should set the state to "running", but only if the
+ // user-gesture restriction is set.
+ if (userGestureRequiredForAudioStart())
+ startRendering();
+}
+
+void AudioContext::startRendering()
+{
+ ALWAYS_LOG(LOGIDENTIFIER);
+ if (isStopped() || !willBeginPlayback() || m_wasSuspendedByScript)
+ return;
+
+ makePendingActivity();
+
+ setState(State::Running);
+
+ lazyInitialize();
+ destination()->startRendering();
+}
+
+void AudioContext::lazyInitialize()
+{
+ if (isInitialized())
+ return;
+
+ BaseAudioContext::lazyInitialize();
+ if (isInitialized()) {
+ if (destinationNode() && state() != State::Running) {
+ // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio.
+ // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
+ // NOTE: for now default AudioContext does not need an explicit startRendering() call from _javascript_.
+ // We may want to consider requiring it for symmetry with OfflineAudioContext.
+ startRendering();
+ ++s_hardwareContextCount;
+ }
+ }
+}
+
+bool AudioContext::willPausePlayback()
+{
+ auto* document = this->document();
+ if (!document)
+ return false;
+
+ if (userGestureRequiredForAudioStart()) {
+ if (!document->processingUserGestureForMedia())
+ return false;
+ removeBehaviorRestriction(BaseAudioContext::RequireUserGestureForAudioStartRestriction);
+ }
+
+ if (pageConsentRequiredForAudioStart()) {
+ auto* page = document->page();
+ if (page && !page->canStartMedia()) {
+ document->addMediaCanStartListener(*this);
+ return false;
+ }
+ removeBehaviorRestriction(BaseAudioContext::RequirePageConsentForAudioStartRestriction);
+ }
+
+ return mediaSession()->clientWillPausePlayback();
+}
+
#if ENABLE(VIDEO)
ExceptionOr<Ref<MediaElementAudioSourceNode>> AudioContext::createMediaElementSource(HTMLMediaElement& mediaElement)
Modified: trunk/Source/WebCore/Modules/webaudio/AudioContext.h (268564 => 268565)
--- trunk/Source/WebCore/Modules/webaudio/AudioContext.h 2020-10-16 00:28:16 UTC (rev 268564)
+++ trunk/Source/WebCore/Modules/webaudio/AudioContext.h 2020-10-16 00:34:30 UTC (rev 268565)
@@ -27,11 +27,11 @@
#include "AudioContextOptions.h"
#include "BaseAudioContext.h"
+#include "DefaultAudioDestinationNode.h"
namespace WebCore {
class DOMWindow;
-class DefaultAudioDestinationNode;
struct AudioTimestamp;
@@ -57,8 +57,24 @@
ExceptionOr<Ref<MediaStreamAudioDestinationNode>> createMediaStreamDestination();
#endif
+ void suspendRendering(DOMPromiseDeferred<void>&&);
+ void resumeRendering(DOMPromiseDeferred<void>&&);
+
+ void nodeWillBeginPlayback() final;
+ void lazyInitialize() final;
+
+ void startRendering();
+
+protected:
+ explicit AudioContext(Document&, const AudioContextOptions& = { });
+ AudioContext(Document&, unsigned numberOfChannels, RefPtr<AudioBuffer>&& renderTarget);
+
private:
- AudioContext(Document&, const AudioContextOptions&);
+ bool willPausePlayback();
+
+ // [[suspended by user]] flag in the specification:
+ // https://www.w3.org/TR/webaudio/#dom-audiocontext-suspended-by-user-slot
+ bool m_wasSuspendedByScript { false };
};
} // WebCore
Modified: trunk/Source/WebCore/Modules/webaudio/BaseAudioContext.cpp (268564 => 268565)
--- trunk/Source/WebCore/Modules/webaudio/BaseAudioContext.cpp 2020-10-16 00:28:16 UTC (rev 268564)
+++ trunk/Source/WebCore/Modules/webaudio/BaseAudioContext.cpp 2020-10-16 00:34:30 UTC (rev 268565)
@@ -223,19 +223,9 @@
if (m_isAudioThreadFinished)
return;
- if (m_destinationNode) {
+ if (m_destinationNode)
m_destinationNode->initialize();
- if (!isOfflineContext() && state() != State::Running) {
- // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio.
- // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
- // NOTE: for now default AudioContext does not need an explicit startRendering() call from _javascript_.
- // We may want to consider requiring it for symmetry with OfflineAudioContext.
- startRendering();
- ++s_hardwareContextCount;
- }
- }
-
m_isInitialized = true;
}
@@ -1003,19 +993,6 @@
return ActiveDOMObject::scriptExecutionContext();
}
-void BaseAudioContext::nodeWillBeginPlayback()
-{
- // Called by scheduled AudioNodes when clients schedule their start times.
- // Prior to the introduction of suspend(), resume(), and stop(), starting
- // a scheduled AudioNode would remove the user-gesture restriction, if present,
- // and would thus unmute the context. Now that AudioContext stays in the
- // "suspended" state if a user-gesture restriction is present, starting a
- // schedule AudioNode should set the state to "running", but only if the
- // user-gesture restriction is set.
- if (userGestureRequiredForAudioStart())
- startRendering();
-}
-
static bool shouldDocumentAllowWebAudioToAutoPlay(const Document& document)
{
if (document.processingUserGestureForMedia() || document.isCapturing())
@@ -1053,44 +1030,6 @@
return willBegin;
}
-bool BaseAudioContext::willPausePlayback()
-{
- auto* document = this->document();
- if (!document)
- return false;
-
- if (userGestureRequiredForAudioStart()) {
- if (!document->processingUserGestureForMedia())
- return false;
- removeBehaviorRestriction(BaseAudioContext::RequireUserGestureForAudioStartRestriction);
- }
-
- if (pageConsentRequiredForAudioStart()) {
- auto* page = document->page();
- if (page && !page->canStartMedia()) {
- document->addMediaCanStartListener(*this);
- return false;
- }
- removeBehaviorRestriction(BaseAudioContext::RequirePageConsentForAudioStartRestriction);
- }
-
- return m_mediaSession->clientWillPausePlayback();
-}
-
-void BaseAudioContext::startRendering()
-{
- ALWAYS_LOG(LOGIDENTIFIER);
- if (m_isStopScheduled || !willBeginPlayback() || m_wasSuspendedByScript)
- return;
-
- makePendingActivity();
-
- setState(State::Running);
-
- lazyInitialize();
- destination()->startRendering();
-}
-
void BaseAudioContext::mediaCanStart(Document& document)
{
ASSERT_UNUSED(document, &document == this->document());
@@ -1122,6 +1061,7 @@
});
}
+// FIXME: Move to OfflineAudioContext once WebKitOfflineAudioContext gets removed.
void BaseAudioContext::finishedRendering(bool didRendering)
{
ASSERT(isOfflineContext());
@@ -1178,61 +1118,11 @@
--m_activeSourceCount;
}
-void BaseAudioContext::suspendRendering(DOMPromiseDeferred<void>&& promise)
-{
- if (isOfflineContext() || m_isStopScheduled) {
- promise.reject(InvalidStateError);
- return;
- }
-
- if (m_state == State::Closed || m_state == State::Interrupted || !m_destinationNode) {
- promise.reject();
- return;
- }
-
- addReaction(State::Suspended, WTFMove(promise));
- m_wasSuspendedByScript = true;
-
- if (!willPausePlayback())
- return;
-
- lazyInitialize();
-
- m_destinationNode->suspend([this, protectedThis = makeRef(*this)] {
- setState(State::Suspended);
- });
-}
-
void BaseAudioContext::didSuspendRendering(size_t)
{
setState(State::Suspended);
}
-void BaseAudioContext::resumeRendering(DOMPromiseDeferred<void>&& promise)
-{
- if (isOfflineContext() || m_isStopScheduled) {
- promise.reject(InvalidStateError);
- return;
- }
-
- if (m_state == State::Closed || !m_destinationNode) {
- promise.reject();
- return;
- }
-
- addReaction(State::Running, WTFMove(promise));
- m_wasSuspendedByScript = false;
-
- if (!willBeginPlayback())
- return;
-
- lazyInitialize();
-
- m_destinationNode->resume([this, protectedThis = makeRef(*this)] {
- setState(State::Running);
- });
-}
-
void BaseAudioContext::suspendPlayback()
{
if (!m_destinationNode || m_state == State::Closed)
Modified: trunk/Source/WebCore/Modules/webaudio/BaseAudioContext.h (268564 => 268565)
--- trunk/Source/WebCore/Modules/webaudio/BaseAudioContext.h 2020-10-16 00:28:16 UTC (rev 268564)
+++ trunk/Source/WebCore/Modules/webaudio/BaseAudioContext.h 2020-10-16 00:34:30 UTC (rev 268565)
@@ -144,9 +144,6 @@
AudioListener& listener();
- void suspendRendering(DOMPromiseDeferred<void>&&);
- void resumeRendering(DOMPromiseDeferred<void>&&);
-
virtual void didSuspendRendering(size_t frame);
AudioBuffer* renderTarget() const { return m_renderTarget.get(); }
@@ -258,7 +255,6 @@
void refEventTarget() override { ref(); }
void derefEventTarget() override { deref(); }
- void startRendering();
void finishedRendering(bool didRendering);
static unsigned s_hardwareContextCount;
@@ -276,7 +272,7 @@
void isPlayingAudioDidChange();
- void nodeWillBeginPlayback();
+ virtual void nodeWillBeginPlayback() { }
#if !RELEASE_LOG_DISABLED
const Logger& logger() const final { return m_logger.get(); }
@@ -317,7 +313,7 @@
void refNode(AudioNode&);
void derefNode(AudioNode&);
- void lazyInitialize();
+ virtual void lazyInitialize();
static bool isSupportedSampleRate(float sampleRate);
@@ -350,14 +346,13 @@
virtual void didFinishOfflineRendering(ExceptionOr<Ref<AudioBuffer>>&&) { }
+ bool userGestureRequiredForAudioStart() const { return !isOfflineContext() && m_restrictions & RequireUserGestureForAudioStartRestriction; }
+ bool pageConsentRequiredForAudioStart() const { return !isOfflineContext() && m_restrictions & RequirePageConsentForAudioStartRestriction; }
+
+ PlatformMediaSession* mediaSession() const { return m_mediaSession.get(); }
private:
void constructCommon();
- bool willPausePlayback();
-
- bool userGestureRequiredForAudioStart() const { return !isOfflineContext() && m_restrictions & RequireUserGestureForAudioStartRestriction; }
- bool pageConsentRequiredForAudioStart() const { return !isOfflineContext() && m_restrictions & RequirePageConsentForAudioStartRestriction; }
-
void clear();
void scheduleNodeDeletion();
@@ -476,10 +471,6 @@
HashMap<String, Vector<AudioParamDescriptor>> m_parameterDescriptorMap;
- // [[suspended by user]] flag in the specification:
- // https://www.w3.org/TR/webaudio/#dom-audiocontext-suspended-by-user-slot
- bool m_wasSuspendedByScript { false };
-
// These are cached per audio context for performance reasons. They cannot be
// static because they rely on the sample rate.
RefPtr<PeriodicWave> m_cachedPeriodicWaveSine;
Modified: trunk/Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.h (268564 => 268565)
--- trunk/Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.h 2020-10-16 00:28:16 UTC (rev 268564)
+++ trunk/Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.h 2020-10-16 00:34:30 UTC (rev 268565)
@@ -43,6 +43,8 @@
unsigned framesPerBuffer() const;
float sampleRate() const final { return m_sampleRate; }
+ ExceptionOr<void> startRendering() final;
+
private:
explicit DefaultAudioDestinationNode(BaseAudioContext&, Optional<float>);
void createDestination();
@@ -56,7 +58,6 @@
bool requiresTailProcessing() const final { return false; }
void enableInput(const String& inputDeviceId) final;
- ExceptionOr<void> startRendering() final;
void resume(Function<void ()>&&) final;
void suspend(Function<void ()>&&) final;
void close(Function<void ()>&&) final;
Modified: trunk/Source/WebCore/Modules/webaudio/WebKitAudioContext.cpp (268564 => 268565)
--- trunk/Source/WebCore/Modules/webaudio/WebKitAudioContext.cpp 2020-10-16 00:28:16 UTC (rev 268564)
+++ trunk/Source/WebCore/Modules/webaudio/WebKitAudioContext.cpp 2020-10-16 00:34:30 UTC (rev 268565)
@@ -79,13 +79,13 @@
// Constructor for rendering to the audio hardware.
WebKitAudioContext::WebKitAudioContext(Document& document)
- : BaseAudioContext(document)
+ : AudioContext(document)
{
}
// Constructor for offline (non-realtime) rendering.
WebKitAudioContext::WebKitAudioContext(Document& document, Ref<AudioBuffer>&& renderTarget)
- : BaseAudioContext(document, renderTarget->numberOfChannels(), WTFMove(renderTarget))
+ : AudioContext(document, renderTarget->numberOfChannels(), WTFMove(renderTarget))
{
}
Modified: trunk/Source/WebCore/Modules/webaudio/WebKitAudioContext.h (268564 => 268565)
--- trunk/Source/WebCore/Modules/webaudio/WebKitAudioContext.h 2020-10-16 00:28:16 UTC (rev 268564)
+++ trunk/Source/WebCore/Modules/webaudio/WebKitAudioContext.h 2020-10-16 00:34:30 UTC (rev 268565)
@@ -25,7 +25,7 @@
#pragma once
-#include "BaseAudioContext.h"
+#include "AudioContext.h"
#include "WebKitAudioListener.h"
namespace WebCore {
@@ -46,8 +46,7 @@
// AudioContext is the cornerstone of the web audio API and all AudioNodes are created from it.
// For thread safety between the audio thread and the main thread, it has a rendering graph locking mechanism.
-class WebKitAudioContext
- : public BaseAudioContext
+class WebKitAudioContext : public AudioContext
{
WTF_MAKE_ISO_ALLOCATED(WebKitAudioContext);
public:
@@ -56,7 +55,7 @@
void close(DOMPromiseDeferred<void>&&);
- WebKitAudioListener& listener() { return downcast<WebKitAudioListener>(BaseAudioContext::listener()); }
+ WebKitAudioListener& listener() { return downcast<WebKitAudioListener>(AudioContext::listener()); }
// The AudioNode create methods are called on the main thread (from _javascript_).
#if ENABLE(VIDEO)