Diff
Modified: trunk/LayoutTests/ChangeLog (111238 => 111239)
--- trunk/LayoutTests/ChangeLog 2012-03-19 20:43:33 UTC (rev 111238)
+++ trunk/LayoutTests/ChangeLog 2012-03-19 20:54:50 UTC (rev 111239)
@@ -1,3 +1,13 @@
+2012-03-19 Raymond Toy <[email protected]>
+
+ Add playback state for AudioBufferSourceNode and add number of active nodes
+ https://bugs.webkit.org/show_bug.cgi?id=79701
+
+ Reviewed by Chris Rogers.
+
+ * webaudio/audiobuffersource-playbackState-expected.txt: Added.
+ * webaudio/audiobuffersource-playbackState.html: Added.
+
2012-03-19 Sheriff Bot <[email protected]>
Unreviewed, rolling out r109014.
Added: trunk/LayoutTests/webaudio/audiobuffersource-playbackState-expected.txt (0 => 111239)
--- trunk/LayoutTests/webaudio/audiobuffersource-playbackState-expected.txt (rev 0)
+++ trunk/LayoutTests/webaudio/audiobuffersource-playbackState-expected.txt 2012-03-19 20:54:50 UTC (rev 111239)
@@ -0,0 +1,20 @@
+Test AudioContext activeSourceCount and AudioBufferSourceNode playbackState.
+
+On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
+
+PASS Rendering time is 2 seconds.
+PASS UNSCHEDULED_STATE Source has been created
+PASS SCHEDULED_STATE 3 second source scheduled to start at time 3
+PASS PLAYING_STATE 2 second source starting at time 1
+PASS FINISHED_STATE 1.25 second source starting at time 0
+PASS SCHEDULED_STATE 1 second grain scheduled to start at time 3
+PASS PLAYING_STATE 2 second grain starting at time 0.5
+PASS FINISHED_STATE 1 second grain starting at time 0.5
+PASS SCHEDULED_STATE a looping 0.5 second source scheduled at time 3
+PASS PLAYING_STATE a looping 0.5 second source starting at time 1.25
+PASS 3 are currently playing as expected.
+PASS activeSourceCount and playbackState tests succeeded.
+PASS successfullyParsed is true
+
+TEST COMPLETE
+
Added: trunk/LayoutTests/webaudio/audiobuffersource-playbackState.html (0 => 111239)
--- trunk/LayoutTests/webaudio/audiobuffersource-playbackState.html (rev 0)
+++ trunk/LayoutTests/webaudio/audiobuffersource-playbackState.html 2012-03-19 20:54:50 UTC (rev 111239)
@@ -0,0 +1,181 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html>
+<head>
+<script src=""
+<script src=""
+</head>
+
+<body>
+<div id="description"></div>
+<div id="console"></div>
+
+<script>
+description("Test AudioContext activeSourceCount and AudioBufferSourceNode playbackState.");
+
+// Create a few sources that start and end playing at various times. After rendering, check that
+// each source is in the correct state and that the number of active sources is correct.
+
+var sampleRate = 44100;
+
+// Render for this long.
+var renderTime = 2;
+var renderLength = timeToSampleFrame(renderTime, sampleRate);
+
+var context;
+
+// List of AudioBufferSourceNodes sources.
+var sources = [];
+
+// List of messages that will be printed out on success (or failure). Messages are in the same
+// order as the sources list above.
+var sourceMessages = [];
+
+// List of the expected playback state for each source. In the same order a sources list above.
+var sourceExpectedStates = [];
+
+// Array mapping the playback state to a string.
+var playbackStateName = ["UNSCHEDULED_STATE ",
+ "SCHEDULED_STATE ",
+ "PLAYING_STATE ",
+ "FINISHED_STATE "];
+
+function checkResult(event)
+{
+ var success = true;
+
+ // For each source, verify that the playback state matches our expected playback state.
+ for (var k = 0; k < sources.length; ++k) {
+ var prefix = playbackStateName[sourceExpectedStates[k]] + sourceMessages[k];
+ if (sources[k].playbackState == sourceExpectedStates[k]) {
+ testPassed(prefix);
+ } else {
+ testFailed(prefix + ": Actual = " + playbackStateName[sources[k].playbackState]);
+ success = false;
+ }
+ }
+
+ // Figure out how many active sources there should be from the expected states.
+ var playingState = sources[0].PLAYING_STATE
+
+ var expectedActiveCount = 0;
+ for (k = 0; k < sourceExpectedStates.length; ++k) {
+ if (sourceExpectedStates[k] == playingState) {
+ ++expectedActiveCount;
+ }
+ }
+
+ if (context.activeSourceCount == expectedActiveCount) {
+ testPassed(context.activeSourceCount + " are currently playing as expected.");
+ } else {
+ testFailed(context.activeSourceCount + " are currently playing, but expected " + expectedActiveCount + ".");
+ success = false;
+ }
+
+ if (success) {
+ testPassed("activeSourceCount and playbackState tests succeeded.");
+ } else {
+ testFailed("activeSourceCount and playbackState tests did not succeed.");
+ }
+
+ finishJSTest();
+}
+
+// sourceLength - length of source in seconds
+// noteFunction - function to turn on source appropriately
+// expectedState - expected state of the source at the end of rendering
+// message - message to be displayed if test passes
+function createTest(sourceLength, noteFunction, expectedState, message)
+{
+ var s = context.createBufferSource();
+ s.buffer = createImpulseBuffer(context, timeToSampleFrame(sourceLength, sampleRate));
+ s.connect(context.destination);
+ noteFunction(s);
+ sources.push(s);
+ sourceMessages.push(message);
+ sourceExpectedStates.push(expectedState);
+}
+
+function runTest()
+{
+ if (window.layoutTestController) {
+ layoutTestController.dumpAsText();
+ layoutTestController.waitUntilDone();
+ }
+
+ window.jsTestIsAsync = true;
+
+ // Create offline audio context, rendering for renderTime seconds.
+ context = new webkitAudioContext(2, timeToSampleFrame(renderTime, sampleRate), sampleRate);
+
+ // This is only used so we can access the playback state constants.
+ var bufferSource = context.createBufferSource();
+
+ // Dummy message so we know how long we're rendering so we can interpret the pass/fail messages
+ // correctly.
+ testPassed("Rendering time is " + renderTime + " seconds.");
+
+ // Test unscheduled state. Create 3 second source, but don't schedule it.
+
+ createTest(3,
+ function(s) { },
+ bufferSource.UNSCHEDULED_STATE,
+ "Source has been created");
+
+ // Test noteOn.
+
+ createTest(3,
+ function(s) { s.noteOn(renderTime + 1); },
+ bufferSource.SCHEDULED_STATE,
+ "3 second source scheduled to start at time " + (renderTime + 1));
+
+ createTest(2,
+ function(s) { s.noteOn(1); },
+ bufferSource.PLAYING_STATE,
+ "2 second source starting at time 1");
+
+ createTest(1.25,
+ function(s) { s.noteOn(0); },
+ bufferSource.FINISHED_STATE,
+ "1.25 second source starting at time 0");
+
+ // Test noteGrainOn.
+
+ createTest(3,
+ function(s) { s.noteGrainOn(renderTime + 1, 0, 1); },
+ bufferSource.SCHEDULED_STATE,
+ "1 second grain scheduled to start at time " + (renderTime + 1));
+
+ createTest(3,
+ function(s) { s.noteGrainOn(0.5, 0, 2); },
+ bufferSource.PLAYING_STATE,
+ "2 second grain starting at time 0.5");
+
+ createTest(3,
+ function(s) { s.noteGrainOn(0.5, 0, 1); },
+ bufferSource.FINISHED_STATE,
+ "1 second grain starting at time 0.5");
+
+ // Test looping source
+
+ createTest(0.5,
+ function(s) { s.loop = true; s.noteOn(renderTime + 1); },
+ bufferSource.SCHEDULED_STATE,
+ "a looping 0.5 second source scheduled at time " + (renderTime + 1));
+
+ createTest(0.5,
+ function(s) { s.loop = true; s.noteOn(1.25); },
+ bufferSource.PLAYING_STATE,
+ "a looping 0.5 second source starting at time 1.25");
+
+ context._oncomplete_ = checkResult;
+ context.startRendering();
+}
+
+runTest();
+successfullyParsed = true;
+
+</script>
+
+<script src=""
+</body>
+</html>
Modified: trunk/Source/WebCore/ChangeLog (111238 => 111239)
--- trunk/Source/WebCore/ChangeLog 2012-03-19 20:43:33 UTC (rev 111238)
+++ trunk/Source/WebCore/ChangeLog 2012-03-19 20:54:50 UTC (rev 111239)
@@ -1,3 +1,44 @@
+2012-03-19 Raymond Toy <[email protected]>
+
+ Add playback state for AudioBufferSourceNode and add number of active nodes
+ https://bugs.webkit.org/show_bug.cgi?id=79701
+
+ Add a playback state to AudioBufferSourceNode so that the user can
+ tell if the node is scheduled, playing, or finished.
+
+ For an AudioContext, add a count of the number of
+ AudioBufferSourceNode's that are active (playing audio).
+
+ Deleted m_isPlaying and m_hasFinished. For the record, m_isPlaying
+ is the same as a playbackState of SCHEDULED_STATE or PLAYING_STATE.
+ m_hasFinished is equivalent to a playbackState of FINISHED_STATE.
+
+ Reviewed by Chris Rogers.
+
+ Test: webaudio/audiobuffersource-playbackState.html
+
+ * webaudio/AudioBufferSourceNode.cpp:
+ (WebCore):
+ (WebCore::AudioBufferSourceNode::AudioBufferSourceNode):
+ initialize playbackState, remove m_isPlaying, m_hasFinished.
+ (WebCore::AudioBufferSourceNode::process): Update playbackState
+ (WebCore::AudioBufferSourceNode::finish): Update playbackState
+ (WebCore::AudioBufferSourceNode::noteOn): Update playbackState
+ (WebCore::AudioBufferSourceNode::noteGrainOn): Update playbackState
+ (WebCore::AudioBufferSourceNode::noteOff): Use playbackState
+ * webaudio/AudioBufferSourceNode.h:
+ (AudioBufferSourceNode): Define states, remove m_isPlaying, m_hasFinished.
+ (WebCore::AudioBufferSourceNode::playbackState): Added.
+ * webaudio/AudioBufferSourceNode.idl: Define constants for the
+ playback state, add attribute playbackState.
+ * webaudio/AudioContext.cpp:
+ (WebCore::AudioContext::activeSourceCount): Added.
+ (WebCore::AudioContext::incrementActiveSourceCount): Added.
+ (WebCore::AudioContext::decrementActiveSourceCount): Added.
+ * webaudio/AudioContext.h:
+ (AudioContext):
+ * webaudio/AudioContext.idl: Add attribute activeSourceCount.
+
2012-03-19 Sheriff Bot <[email protected]>
Unreviewed, rolling out r109014.
Modified: trunk/Source/WebCore/webaudio/AudioBufferSourceNode.cpp (111238 => 111239)
--- trunk/Source/WebCore/webaudio/AudioBufferSourceNode.cpp 2012-03-19 20:43:33 UTC (rev 111238)
+++ trunk/Source/WebCore/webaudio/AudioBufferSourceNode.cpp 2012-03-19 20:54:50 UTC (rev 111239)
@@ -58,9 +58,7 @@
AudioBufferSourceNode::AudioBufferSourceNode(AudioContext* context, float sampleRate)
: AudioSourceNode(context, sampleRate)
, m_buffer(0)
- , m_isPlaying(false)
, m_isLooping(false)
- , m_hasFinished(false)
, m_startTime(0.0)
, m_endTime(UnknownTime)
, m_virtualReadIndex(0)
@@ -69,6 +67,7 @@
, m_grainDuration(DefaultGrainDuration)
, m_lastGain(1.0)
, m_pannerNode(0)
+ , m_playbackState(UNSCHEDULED_STATE)
{
setNodeType(NodeTypeAudioBufferSource);
@@ -110,18 +109,24 @@
// If we know the end time and it's already passed, then don't bother doing any more rendering this cycle.
if (m_endTime != UnknownTime && endFrame <= quantumStartFrame) {
- m_isPlaying = false;
m_virtualReadIndex = 0;
finish();
}
- if (!m_isPlaying || m_hasFinished || !buffer() || startFrame >= quantumEndFrame) {
+ if (m_playbackState == UNSCHEDULED_STATE || m_playbackState == FINISHED_STATE
+ || !buffer() || startFrame >= quantumEndFrame) {
// FIXME: can optimize here by propagating silent hint instead of forcing the whole chain to process silence.
outputBus->zero();
m_processLock.unlock();
return;
}
+ if (m_playbackState == SCHEDULED_STATE) {
+ // Increment the active source count only if we're transitioning from SCHEDULED_STATE to PLAYING_STATE.
+ m_playbackState = PLAYING_STATE;
+ context()->incrementActiveSourceCount();
+ }
+
size_t quantumFrameOffset = startFrame > quantumStartFrame ? startFrame - quantumStartFrame : 0;
quantumFrameOffset = min(quantumFrameOffset, framesToProcess); // clamp to valid range
size_t bufferFramesToProcess = framesToProcess - quantumFrameOffset;
@@ -150,8 +155,8 @@
memset(m_destinationChannels[i] + zeroStartFrame, 0, sizeof(float) * framesToZero);
}
- m_isPlaying = false;
m_virtualReadIndex = 0;
+
finish();
}
@@ -167,7 +172,6 @@
{
if (!loop()) {
// If we're not looping, then stop playing when we get to the end.
- m_isPlaying = false;
if (framesToProcess > 0) {
// We're not looping and we've reached the end of the sample data, but we still need to provide more output,
@@ -343,10 +347,11 @@
void AudioBufferSourceNode::finish()
{
- if (!m_hasFinished) {
+ if (m_playbackState != FINISHED_STATE) {
// Let the context dereference this AudioNode.
context()->notifyNodeFinishedProcessing(this);
- m_hasFinished = true;
+ m_playbackState = FINISHED_STATE;
+ context()->decrementActiveSourceCount();
}
}
@@ -390,19 +395,20 @@
void AudioBufferSourceNode::noteOn(double when)
{
ASSERT(isMainThread());
- if (m_isPlaying)
+ if (m_playbackState != UNSCHEDULED_STATE)
return;
m_isGrain = false;
m_startTime = when;
m_virtualReadIndex = 0;
- m_isPlaying = true;
+ m_playbackState = SCHEDULED_STATE;
}
void AudioBufferSourceNode::noteGrainOn(double when, double grainOffset, double grainDuration)
{
ASSERT(isMainThread());
- if (m_isPlaying)
+
+ if (m_playbackState != UNSCHEDULED_STATE)
return;
if (!buffer())
@@ -432,13 +438,13 @@
// Since playbackRate == 1 is very common, it's worth considering quality.
m_virtualReadIndex = AudioUtilities::timeToSampleFrame(m_grainOffset, buffer()->sampleRate());
- m_isPlaying = true;
+ m_playbackState = SCHEDULED_STATE;
}
void AudioBufferSourceNode::noteOff(double when)
{
ASSERT(isMainThread());
- if (!m_isPlaying)
+ if (!(m_playbackState == SCHEDULED_STATE || m_playbackState == PLAYING_STATE))
return;
when = max(0.0, when);
@@ -496,7 +502,6 @@
m_isLooping = looping;
}
-
} // namespace WebCore
#endif // ENABLE(WEB_AUDIO)
Modified: trunk/Source/WebCore/webaudio/AudioBufferSourceNode.h (111238 => 111239)
--- trunk/Source/WebCore/webaudio/AudioBufferSourceNode.h 2012-03-19 20:43:33 UTC (rev 111238)
+++ trunk/Source/WebCore/webaudio/AudioBufferSourceNode.h 2012-03-19 20:54:50 UTC (rev 111239)
@@ -44,6 +44,23 @@
class AudioBufferSourceNode : public AudioSourceNode {
public:
+ // These are the possible states an AudioBufferSourceNode can be in:
+ //
+ // UNSCHEDULED_STATE - Initial playback state. Created, but not yet scheduled.
+ // SCHEDULED_STATE - Scheduled to play (via noteOn() or noteGrainOn()), but not yet playing.
+ // PLAYING_STATE - Generating sound.
+ // FINISHED_STATE - Finished generating sound.
+ //
+ // The state can only transition to the next state, except for the FINISHED_STATE which can
+ // never be changed.
+ enum PlaybackState {
+ // These must be defined with the same names and values as in the .idl file.
+ UNSCHEDULED_STATE = 0,
+ SCHEDULED_STATE = 1,
+ PLAYING_STATE = 2,
+ FINISHED_STATE = 3
+ };
+
static PassRefPtr<AudioBufferSourceNode> create(AudioContext*, float sampleRate);
virtual ~AudioBufferSourceNode();
@@ -67,6 +84,8 @@
void noteGrainOn(double when, double grainOffset, double grainDuration);
void noteOff(double when);
+ unsigned short playbackState() { return static_cast<unsigned short>(m_playbackState); }
+
// Note: the attribute was originally exposed as .looping, but to be more consistent in naming with <audio>
// and with how it's described in the specification, the proper attribute name is .loop
// The old attribute is kept for backwards compatibility.
@@ -102,17 +121,10 @@
RefPtr<AudioGain> m_gain;
RefPtr<AudioParam> m_playbackRate;
- // m_isPlaying is set to true when noteOn() or noteGrainOn() is called.
- bool m_isPlaying;
-
// If m_isLooping is false, then this node will be done playing and become inactive after it reaches the end of the sample data in the buffer.
// If true, it will wrap around to the start of the buffer each time it reaches the end.
bool m_isLooping;
- // This node is considered finished when it reaches the end of the buffer's sample data after noteOn() has been called.
- // This will only be set to true if m_isLooping == false.
- bool m_hasFinished;
-
// m_startTime is the time to start playing based on the context's timeline (0.0 or a time less than the context's current time means "now").
double m_startTime; // in seconds
@@ -145,6 +157,8 @@
// Handles the time when we reach the end of sample data (non-looping) or the noteOff() time has been reached.
void finish();
+
+ PlaybackState m_playbackState;
};
} // namespace WebCore
Modified: trunk/Source/WebCore/webaudio/AudioBufferSourceNode.idl (111238 => 111239)
--- trunk/Source/WebCore/webaudio/AudioBufferSourceNode.idl 2012-03-19 20:43:33 UTC (rev 111238)
+++ trunk/Source/WebCore/webaudio/AudioBufferSourceNode.idl 2012-03-19 20:54:50 UTC (rev 111239)
@@ -31,6 +31,13 @@
attribute [CustomSetter] AudioBuffer buffer
setter raises (DOMException);
+ const unsigned short UNSCHEDULED_STATE = 0;
+ const unsigned short SCHEDULED_STATE = 1;
+ const unsigned short PLAYING_STATE = 2;
+ const unsigned short FINISHED_STATE = 3;
+
+ readonly attribute unsigned short playbackState;
+
readonly attribute AudioGain gain;
readonly attribute AudioParam playbackRate;
Modified: trunk/Source/WebCore/webaudio/AudioContext.cpp (111238 => 111239)
--- trunk/Source/WebCore/webaudio/AudioContext.cpp 2012-03-19 20:43:33 UTC (rev 111238)
+++ trunk/Source/WebCore/webaudio/AudioContext.cpp 2012-03-19 20:54:50 UTC (rev 111239)
@@ -69,6 +69,7 @@
#endif
#include <wtf/ArrayBuffer.h>
+#include <wtf/Atomics.h>
#include <wtf/MainThread.h>
#include <wtf/OwnPtr.h>
#include <wtf/PassOwnPtr.h>
@@ -139,6 +140,7 @@
, m_audioThread(0)
, m_graphOwnerThread(UndefinedThreadIdentifier)
, m_isOfflineContext(false)
+ , m_activeSourceCount(0)
{
constructCommon();
@@ -162,6 +164,7 @@
, m_audioThread(0)
, m_graphOwnerThread(UndefinedThreadIdentifier)
, m_isOfflineContext(true)
+ , m_activeSourceCount(0)
{
constructCommon();
@@ -781,6 +784,16 @@
}
}
+void AudioContext::incrementActiveSourceCount()
+{
+ atomicIncrement(&m_activeSourceCount);
+}
+
+void AudioContext::decrementActiveSourceCount()
+{
+ atomicDecrement(&m_activeSourceCount);
+}
+
} // namespace WebCore
#endif // ENABLE(WEB_AUDIO)
Modified: trunk/Source/WebCore/webaudio/AudioContext.h (111238 => 111239)
--- trunk/Source/WebCore/webaudio/AudioContext.h 2012-03-19 20:43:33 UTC (rev 111238)
+++ trunk/Source/WebCore/webaudio/AudioContext.h 2012-03-19 20:54:50 UTC (rev 111239)
@@ -96,7 +96,11 @@
size_t currentSampleFrame() { return m_destinationNode->currentSampleFrame(); }
double currentTime() { return m_destinationNode->currentTime(); }
float sampleRate() { return m_destinationNode->sampleRate(); }
+ unsigned long activeSourceCount() { return static_cast<unsigned long>(m_activeSourceCount); }
+ void incrementActiveSourceCount();
+ void decrementActiveSourceCount();
+
PassRefPtr<AudioBuffer> createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionCode&);
PassRefPtr<AudioBuffer> createBuffer(ArrayBuffer*, bool mixToMono, ExceptionCode&);
@@ -312,6 +316,8 @@
// It is somewhat arbitrary and could be increased if necessary.
enum { MaxNumberOfChannels = 32 };
+ // Number of AudioBufferSourceNodes that are active (playing).
+ int m_activeSourceCount;
};
} // WebCore
Modified: trunk/Source/WebCore/webaudio/AudioContext.idl (111238 => 111239)
--- trunk/Source/WebCore/webaudio/AudioContext.idl 2012-03-19 20:43:33 UTC (rev 111238)
+++ trunk/Source/WebCore/webaudio/AudioContext.idl 2012-03-19 20:54:50 UTC (rev 111239)
@@ -44,6 +44,9 @@
// All panning is relative to this listener.
readonly attribute AudioListener listener;
+ // Number of AudioBufferSourceNodes that are currently playing.
+ readonly attribute unsigned long activeSourceCount;
+
AudioBuffer createBuffer(in unsigned long numberOfChannels, in unsigned long numberOfFrames, in float sampleRate)
raises(DOMException);
AudioBuffer createBuffer(in ArrayBuffer buffer, in boolean mixToMono)