Modified: trunk/Source/WebCore/ChangeLog (238223 => 238224)
--- trunk/Source/WebCore/ChangeLog 2018-11-15 13:57:50 UTC (rev 238223)
+++ trunk/Source/WebCore/ChangeLog 2018-11-15 14:39:27 UTC (rev 238224)
@@ -1,3 +1,17 @@
+2018-11-15 Thibault Saunier <tsaun...@igalia.com>
+
+ [GStreamer][WebRTC] Add support for sending silence or silencing an incoming track
+ https://bugs.webkit.org/show_bug.cgi?id=191631
+
+ Reviewed by Xabier Rodriguez-Calvar.
+
+ This will be tested once webkit.org/b/186933 is implemented.
+
+ * platform/mediastream/gstreamer/RealtimeIncomingAudioSourceLibWebRTC.cpp:
+ (WebCore::RealtimeIncomingAudioSourceLibWebRTC::OnData):
+ * platform/mediastream/gstreamer/RealtimeOutgoingAudioSourceLibWebRTC.cpp:
+ (WebCore::RealtimeOutgoingAudioSourceLibWebRTC::pullAudioData):
+
2018-11-15 Antti Koivisto <an...@apple.com>
REGRESSION(r238178): fast/forms/access-key-mutated.html and fast/forms/access-key-case-insensitive.html are timing out
Modified: trunk/Source/WebCore/platform/mediastream/gstreamer/RealtimeIncomingAudioSourceLibWebRTC.cpp (238223 => 238224)
--- trunk/Source/WebCore/platform/mediastream/gstreamer/RealtimeIncomingAudioSourceLibWebRTC.cpp 2018-11-15 13:57:50 UTC (rev 238223)
+++ trunk/Source/WebCore/platform/mediastream/gstreamer/RealtimeIncomingAudioSourceLibWebRTC.cpp 2018-11-15 14:39:27 UTC (rev 238224)
@@ -64,9 +64,14 @@
gst_audio_info_set_format(&info, format, sampleRate, numberOfChannels, NULL);
- auto buffer = adoptGRef(gst_buffer_new_wrapped(
- g_memdup(audioData, GST_AUDIO_INFO_BPF(&info) * numberOfFrames),
- GST_AUDIO_INFO_BPF(&info) * numberOfFrames));
+ auto bufferSize = GST_AUDIO_INFO_BPF(&info) * numberOfFrames;
+ gpointer bufferData = g_malloc(bufferSize);
+ if (muted())
+ gst_audio_format_fill_silence(info.finfo, bufferData, bufferSize);
+ else
+ memcpy(bufferData, audioData, bufferSize);
+
+ auto buffer = adoptGRef(gst_buffer_new_wrapped(bufferData, bufferSize));
GRefPtr<GstCaps> caps = adoptGRef(gst_audio_info_to_caps(&info));
auto sample = adoptGRef(gst_sample_new(buffer.get(), caps.get(), nullptr, nullptr));
auto data(std::unique_ptr<GStreamerAudioData>(new GStreamerAudioData(WTFMove(sample), info)));
Modified: trunk/Source/WebCore/platform/mediastream/gstreamer/RealtimeOutgoingAudioSourceLibWebRTC.cpp (238223 => 238224)
--- trunk/Source/WebCore/platform/mediastream/gstreamer/RealtimeOutgoingAudioSourceLibWebRTC.cpp 2018-11-15 13:57:50 UTC (rev 238223)
+++ trunk/Source/WebCore/platform/mediastream/gstreamer/RealtimeOutgoingAudioSourceLibWebRTC.cpp 2018-11-15 14:39:27 UTC (rev 238224)
@@ -118,27 +118,25 @@
return;
}
- auto inbuf = adoptGRef(gst_adapter_take_buffer(m_adapter.get(), inBufferSize));
- GstMapInfo inmap;
- gst_buffer_map(inbuf.get(), &inmap, static_cast<GstMapFlags>(GST_MAP_READ));
+ auto inBuffer = adoptGRef(gst_adapter_take_buffer(m_adapter.get(), inBufferSize));
+ auto outBuffer = adoptGRef(gst_buffer_new_allocate(nullptr, outBufferSize, 0));
+ GstMappedBuffer outMap(outBuffer.get(), GST_MAP_WRITE);
+ if (isSilenced())
+ gst_audio_format_fill_silence(m_outputStreamDescription->getInfo()->finfo, outMap.data(), outMap.size());
+ else {
+ GstMappedBuffer inMap(inBuffer.get(), GST_MAP_READ);
- GstMapInfo outmap;
- auto outbuf = adoptGRef(gst_buffer_new_allocate(nullptr, outBufferSize, 0));
- gst_buffer_map(outbuf.get(), &outmap, static_cast<GstMapFlags>(GST_MAP_WRITE));
+ gpointer in[1] = { inMap.data() };
+ gpointer out[1] = { outMap.data() };
+ if (!gst_audio_converter_samples(m_sampleConverter, static_cast<GstAudioConverterFlags>(0), in, inChunkSampleCount, out, outChunkSampleCount)) {
+ GST_ERROR("Could not convert samples.");
- gpointer in[1] = { inmap.data };
- gpointer out[1] = { outmap.data };
- if (gst_audio_converter_samples(m_sampleConverter, static_cast<GstAudioConverterFlags>(0), in, inChunkSampleCount, out, outChunkSampleCount)) {
- sendAudioFrames(outmap.data,
- LibWebRTCAudioFormat::sampleSize,
- static_cast<int>(m_outputStreamDescription->sampleRate()),
- static_cast<int>(m_outputStreamDescription->numberOfChannels()),
- outChunkSampleCount);
- } else
- GST_ERROR("Could not convert samples.");
+ return;
+ }
+ }
- gst_buffer_unmap(inbuf.get(), &inmap);
- gst_buffer_unmap(outbuf.get(), &outmap);
+ sendAudioFrames(outMap.data(), LibWebRTCAudioFormat::sampleSize, static_cast<int>(m_outputStreamDescription->sampleRate()),
+ static_cast<int>(m_outputStreamDescription->numberOfChannels()), outChunkSampleCount);
}
bool RealtimeOutgoingAudioSourceLibWebRTC::isReachingBufferedAudioDataHighLimit()