Diff
Modified: trunk/Source/WebCore/ChangeLog (113599 => 113600)
--- trunk/Source/WebCore/ChangeLog 2012-04-09 18:34:17 UTC (rev 113599)
+++ trunk/Source/WebCore/ChangeLog 2012-04-09 18:49:40 UTC (rev 113600)
@@ -1,3 +1,25 @@
+2012-04-09 Raymond Liu <[email protected]>
+
+ When create _javascript_Node, do not ignore NumberOfOutputChannels parameter.
+ https://bugs.webkit.org/show_bug.cgi?id=83250
+
+ Reviewed by Chris Rogers.
+
+ * Modules/webaudio/AudioContext.cpp:
+ (WebCore::AudioContext::createJavaScriptNode):
+ (WebCore):
+ * Modules/webaudio/AudioContext.h:
+ (AudioContext):
+ * Modules/webaudio/AudioContext.idl:
+ * Modules/webaudio/_javascript_AudioNode.cpp:
+ (WebCore::_javascript_AudioNode::create):
+ (WebCore):
+ (WebCore::_javascript_AudioNode::_javascript_AudioNode):
+ (WebCore::_javascript_AudioNode::initialize):
+ (WebCore::_javascript_AudioNode::process):
+ * Modules/webaudio/_javascript_AudioNode.h:
+ (_javascript_AudioNode):
+
2012-04-09 Abhishek Arya <[email protected]>
Crash due to floats not cleared before starting SVG <text> layout.
Modified: trunk/Source/WebCore/Modules/webaudio/AudioContext.cpp (113599 => 113600)
--- trunk/Source/WebCore/Modules/webaudio/AudioContext.cpp 2012-04-09 18:34:17 UTC (rev 113599)
+++ trunk/Source/WebCore/Modules/webaudio/AudioContext.cpp 2012-04-09 18:49:40 UTC (rev 113600)
@@ -369,12 +369,29 @@
}
#endif
-PassRefPtr<_javascript_AudioNode> AudioContext::createJavaScriptNode(size_t bufferSize)
+PassRefPtr<_javascript_AudioNode> AudioContext::createJavaScriptNode(size_t bufferSize, ExceptionCode& ec)
{
+ // Set number of input/output channels to stereo by default.
+ return createJavaScriptNode(bufferSize, 2, 2, ec);
+}
+
+PassRefPtr<_javascript_AudioNode> AudioContext::createJavaScriptNode(size_t bufferSize, size_t numberOfInputChannels, ExceptionCode& ec)
+{
+ // Set number of output channels to stereo by default.
+ return createJavaScriptNode(bufferSize, numberOfInputChannels, 2, ec);
+}
+
+PassRefPtr<_javascript_AudioNode> AudioContext::createJavaScriptNode(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionCode& ec)
+{
ASSERT(isMainThread());
lazyInitialize();
- RefPtr<_javascript_AudioNode> node = _javascript_AudioNode::create(this, m_destinationNode->sampleRate(), bufferSize);
+ RefPtr<_javascript_AudioNode> node = _javascript_AudioNode::create(this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
+ if (!node.get()) {
+ ec = SYNTAX_ERR;
+ return 0;
+ }
+
refNode(node.get()); // context keeps reference until we stop making _javascript_ rendering callbacks
return node;
}
Modified: trunk/Source/WebCore/Modules/webaudio/AudioContext.h (113599 => 113600)
--- trunk/Source/WebCore/Modules/webaudio/AudioContext.h 2012-04-09 18:34:17 UTC (rev 113599)
+++ trunk/Source/WebCore/Modules/webaudio/AudioContext.h 2012-04-09 18:49:40 UTC (rev 113600)
@@ -123,7 +123,9 @@
PassRefPtr<ConvolverNode> createConvolver();
PassRefPtr<DynamicsCompressorNode> createDynamicsCompressor();
PassRefPtr<RealtimeAnalyserNode> createAnalyser();
- PassRefPtr<_javascript_AudioNode> createJavaScriptNode(size_t bufferSize);
+ PassRefPtr<_javascript_AudioNode> createJavaScriptNode(size_t bufferSize, ExceptionCode&);
+ PassRefPtr<_javascript_AudioNode> createJavaScriptNode(size_t bufferSize, size_t numberOfInputChannels, ExceptionCode&);
+ PassRefPtr<_javascript_AudioNode> createJavaScriptNode(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionCode&);
PassRefPtr<AudioChannelSplitter> createChannelSplitter();
PassRefPtr<AudioChannelMerger> createChannelMerger();
PassRefPtr<Oscillator> createOscillator();
Modified: trunk/Source/WebCore/Modules/webaudio/AudioContext.idl (113599 => 113600)
--- trunk/Source/WebCore/Modules/webaudio/AudioContext.idl 2012-04-09 18:34:17 UTC (rev 113599)
+++ trunk/Source/WebCore/Modules/webaudio/AudioContext.idl 2012-04-09 18:49:40 UTC (rev 113600)
@@ -71,7 +71,8 @@
ConvolverNode createConvolver();
DynamicsCompressorNode createDynamicsCompressor();
RealtimeAnalyserNode createAnalyser();
- _javascript_AudioNode createJavaScriptNode(in unsigned long bufferSize);
+ _javascript_AudioNode createJavaScriptNode(in unsigned long bufferSize, in [Optional] unsigned long numberOfInputChannels, in [Optional] unsigned long numberOfOutputChannels)
+ raises(DOMException);
Oscillator createOscillator();
WaveTable createWaveTable(in Float32Array real, in Float32Array imag)
raises(DOMException);
Modified: trunk/Source/WebCore/Modules/webaudio/_javascript_AudioNode.cpp (113599 => 113600)
--- trunk/Source/WebCore/Modules/webaudio/_javascript_AudioNode.cpp 2012-04-09 18:34:17 UTC (rev 113599)
+++ trunk/Source/WebCore/Modules/webaudio/_javascript_AudioNode.cpp 2012-04-09 18:49:40 UTC (rev 113600)
@@ -42,19 +42,8 @@
const size_t DefaultBufferSize = 4096;
-PassRefPtr<_javascript_AudioNode> _javascript_AudioNode::create(AudioContext* context, float sampleRate, size_t bufferSize, unsigned numberOfInputs, unsigned numberOfOutputs)
+PassRefPtr<_javascript_AudioNode> _javascript_AudioNode::create(AudioContext* context, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels)
{
- return adoptRef(new _javascript_AudioNode(context, sampleRate, bufferSize, numberOfInputs, numberOfOutputs));
-}
-
-_javascript_AudioNode::_javascript_AudioNode(AudioContext* context, float sampleRate, size_t bufferSize, unsigned numberOfInputs, unsigned numberOfOutputs)
- : AudioNode(context, sampleRate)
- , m_doubleBufferIndex(0)
- , m_doubleBufferIndexForEvent(0)
- , m_bufferSize(bufferSize)
- , m_bufferReadWriteIndex(0)
- , m_isRequestOutstanding(false)
-{
// Check for valid buffer size.
switch (bufferSize) {
case 256:
@@ -64,22 +53,37 @@
case 4096:
case 8192:
case 16384:
- m_bufferSize = bufferSize;
break;
default:
- m_bufferSize = DefaultBufferSize;
+ return 0;
}
-
- // Regardless of the allowed buffer sizes above, we still need to process at the granularity of the AudioNode.
+
+ // FIXME: We still need to implement numberOfInputChannels.
+ ASSERT_UNUSED(numberOfInputChannels, numberOfInputChannels <= AudioContext::maxNumberOfChannels());
+
+ if (!numberOfOutputChannels || numberOfOutputChannels > AudioContext::maxNumberOfChannels())
+ return 0;
+
+ return adoptRef(new _javascript_AudioNode(context, sampleRate, bufferSize, numberOfInputChannels, numberOfOutputChannels));
+}
+
+_javascript_AudioNode::_javascript_AudioNode(AudioContext* context, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels)
+ : AudioNode(context, sampleRate)
+ , m_doubleBufferIndex(0)
+ , m_doubleBufferIndexForEvent(0)
+ , m_bufferSize(bufferSize)
+ , m_bufferReadWriteIndex(0)
+ , m_isRequestOutstanding(false)
+{
+ // Regardless of the allowed buffer sizes, we still need to process at the granularity of the AudioNode.
if (m_bufferSize < AudioNode::ProcessingSizeInFrames)
m_bufferSize = AudioNode::ProcessingSizeInFrames;
- // FIXME: Right now we're hardcoded to single input and single output.
- // Although the specification says this is OK for a simple implementation, multiple inputs and outputs would be good.
- ASSERT_UNUSED(numberOfInputs, numberOfInputs == 1);
- ASSERT_UNUSED(numberOfOutputs, numberOfOutputs == 1);
+ // FIXME: We still need to implement numberOfInputChannels.
+ ASSERT_UNUSED(numberOfInputChannels, numberOfInputChannels > 0);
+
addInput(adoptPtr(new AudioNodeInput(this)));
- addOutput(adoptPtr(new AudioNodeOutput(this, 2)));
+ addOutput(adoptPtr(new AudioNodeOutput(this, numberOfOutputChannels)));
setNodeType(NodeTypeJavaScript);
@@ -102,7 +106,7 @@
// These AudioBuffers will be directly accessed in the main thread by _javascript_.
for (unsigned i = 0; i < 2; ++i) {
m_inputBuffers.append(AudioBuffer::create(2, bufferSize(), sampleRate));
- m_outputBuffers.append(AudioBuffer::create(2, bufferSize(), sampleRate));
+ m_outputBuffers.append(AudioBuffer::create(this->output(0)->numberOfChannels(), bufferSize(), sampleRate));
}
AudioNode::initialize();
@@ -155,16 +159,15 @@
return;
unsigned numberOfInputChannels = inputBus->numberOfChannels();
+ unsigned numberOfOutputChannels = outputBus->numberOfChannels();
- bool channelsAreGood = (numberOfInputChannels == 1 || numberOfInputChannels == 2) && outputBus->numberOfChannels() == 2;
+ bool channelsAreGood = (numberOfInputChannels == 1 || numberOfInputChannels == 2);
ASSERT(channelsAreGood);
if (!channelsAreGood)
return;
const float* sourceL = inputBus->channel(0)->data();
const float* sourceR = numberOfInputChannels > 1 ? inputBus->channel(1)->data() : 0;
- float* destinationL = outputBus->channel(0)->mutableData();
- float* destinationR = outputBus->channel(1)->mutableData();
// Copy from the input to the input buffer. See "buffersAreGood" check above for safety.
size_t bytesToCopy = sizeof(float) * framesToProcess;
@@ -178,9 +181,9 @@
memcpy(inputBuffer->getChannelData(1)->data() + m_bufferReadWriteIndex, sourceL, bytesToCopy);
}
- // Copy from the output buffer to the output. See "buffersAreGood" check above for safety.
- memcpy(destinationL, outputBuffer->getChannelData(0)->data() + m_bufferReadWriteIndex, bytesToCopy);
- memcpy(destinationR, outputBuffer->getChannelData(1)->data() + m_bufferReadWriteIndex, bytesToCopy);
+ // Copy from the output buffer to the output.
+ for (unsigned i = 0; i < numberOfOutputChannels; ++i)
+ memcpy(outputBus->channel(i)->mutableData(), outputBuffer->getChannelData(i)->data() + m_bufferReadWriteIndex, bytesToCopy);
// Update the buffering index.
m_bufferReadWriteIndex = (m_bufferReadWriteIndex + framesToProcess) % bufferSize();
Modified: trunk/Source/WebCore/Modules/webaudio/_javascript_AudioNode.h (113599 => 113600)
--- trunk/Source/WebCore/Modules/webaudio/_javascript_AudioNode.h 2012-04-09 18:34:17 UTC (rev 113599)
+++ trunk/Source/WebCore/Modules/webaudio/_javascript_AudioNode.h 2012-04-09 18:49:40 UTC (rev 113600)
@@ -53,7 +53,7 @@
// This value controls how frequently the onaudioprocess event handler is called and how many sample-frames need to be processed each call.
// Lower numbers for bufferSize will result in a lower (better) latency. Higher numbers will be necessary to avoid audio breakup and glitches.
// The value chosen must carefully balance between latency and audio quality.
- static PassRefPtr<_javascript_AudioNode> create(AudioContext*, float sampleRate, size_t bufferSize, unsigned numberOfInputs = 1, unsigned numberOfOutputs = 1);
+ static PassRefPtr<_javascript_AudioNode> create(AudioContext*, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels);
virtual ~_javascript_AudioNode();
@@ -81,7 +81,7 @@
virtual double tailTime() const OVERRIDE;
virtual double latencyTime() const OVERRIDE;
- _javascript_AudioNode(AudioContext*, float sampleRate, size_t bufferSize, unsigned numberOfInputs, unsigned numberOfOutputs);
+ _javascript_AudioNode(AudioContext*, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels);
static void fireProcessEventDispatch(void* userData);
void fireProcessEvent();