Diff
Modified: trunk/Source/WebCore/ChangeLog (269848 => 269849)
--- trunk/Source/WebCore/ChangeLog 2020-11-16 12:29:26 UTC (rev 269848)
+++ trunk/Source/WebCore/ChangeLog 2020-11-16 12:46:38 UTC (rev 269849)
@@ -1,3 +1,32 @@
+2020-11-16 Philippe Normand <[email protected]>
+
+ [GStreamer] Clean-up Audio{Data,StreamDescription} implementations
+ https://bugs.webkit.org/show_bug.cgi?id=218957
+
+ Reviewed by Xabier Rodriguez-Calvar.
+
+ Refactor the GStreamerAudioData and GStreamerStreamDescription implementations in order to
+ avoid un-necessary copies. The call-sites were adapted accordingly. Some usage of the latter
+ class was removed because it was simpler to use the GstAudioInfo API directly.
+
+ * Modules/webaudio/MediaStreamAudioSourceGStreamer.cpp:
+ (WebCore::MediaStreamAudioSource::consumeAudio):
+ * platform/audio/gstreamer/GStreamerAudioData.h:
+ * platform/audio/gstreamer/GStreamerAudioStreamDescription.h:
+ * platform/mediastream/gstreamer/GStreamerAudioCaptureSource.cpp:
+ (WebCore::GStreamerAudioCaptureSource::newSampleCallback):
+ * platform/mediastream/gstreamer/GStreamerMediaStreamSource.cpp:
+ (webkitMediaStreamSrcPushAudioSample):
+ * platform/mediastream/gstreamer/MockRealtimeAudioSourceGStreamer.cpp:
+ (WebCore::MockRealtimeAudioSourceGStreamer::render):
+ * platform/mediastream/gstreamer/RealtimeIncomingAudioSourceLibWebRTC.cpp:
+ (WebCore::RealtimeIncomingAudioSourceLibWebRTC::OnData):
+ * platform/mediastream/gstreamer/RealtimeOutgoingAudioSourceLibWebRTC.cpp:
+ (WebCore::libwebrtcAudioFormat):
+ (WebCore::RealtimeOutgoingAudioSourceLibWebRTC::audioSamplesAvailable):
+ (WebCore::RealtimeOutgoingAudioSourceLibWebRTC::pullAudioData):
+ * platform/mediastream/gstreamer/RealtimeOutgoingAudioSourceLibWebRTC.h:
+
2020-11-16 Andres Gonzalez <[email protected]>
Optimization for AccessibilityNodeObject::accessibilityDescription.
Modified: trunk/Source/WebCore/Modules/webaudio/MediaStreamAudioSourceGStreamer.cpp (269848 => 269849)
--- trunk/Source/WebCore/Modules/webaudio/MediaStreamAudioSourceGStreamer.cpp 2020-11-16 12:29:26 UTC (rev 269848)
+++ trunk/Source/WebCore/Modules/webaudio/MediaStreamAudioSourceGStreamer.cpp 2020-11-16 12:46:38 UTC (rev 269849)
@@ -52,7 +52,7 @@
return;
}
- auto mediaTime = MediaTime((m_numberOfFrames * G_USEC_PER_SEC) / m_currentSettings.sampleRate(), G_USEC_PER_SEC);
+ MediaTime mediaTime((m_numberOfFrames * G_USEC_PER_SEC) / m_currentSettings.sampleRate(), G_USEC_PER_SEC);
m_numberOfFrames += numberOfFrames;
GstAudioInfo info;
@@ -64,10 +64,9 @@
auto buffer = adoptGRef(gst_buffer_new_allocate(nullptr, size, nullptr));
copyBusData(bus, buffer.get(), muted());
auto sample = adoptGRef(gst_sample_new(buffer.get(), caps.get(), nullptr, nullptr));
- m_audioBuffer = makeUnique<GStreamerAudioData>(WTFMove(sample), info);
-
- GStreamerAudioStreamDescription description(info);
- audioSamplesAvailable(mediaTime, *m_audioBuffer, description, numberOfFrames);
+ GStreamerAudioData audioBuffer(WTFMove(sample), info);
+ GStreamerAudioStreamDescription description(&info);
+ audioSamplesAvailable(mediaTime, audioBuffer, description, numberOfFrames);
}
} // namespace WebCore
Modified: trunk/Source/WebCore/platform/audio/gstreamer/GStreamerAudioData.h (269848 => 269849)
--- trunk/Source/WebCore/platform/audio/gstreamer/GStreamerAudioData.h 2020-11-16 12:29:26 UTC (rev 269848)
+++ trunk/Source/WebCore/platform/audio/gstreamer/GStreamerAudioData.h 2020-11-16 12:46:38 UTC (rev 269849)
@@ -32,8 +32,14 @@
class GStreamerAudioData final : public PlatformAudioData {
public:
- GStreamerAudioData(GRefPtr<GstSample>&& sample, GstAudioInfo info)
+ GStreamerAudioData(GRefPtr<GstSample>&& sample, GstAudioInfo&& info)
: m_sample(WTFMove(sample))
+ , m_audioInfo(WTFMove(info))
+ {
+ }
+
+ GStreamerAudioData(GRefPtr<GstSample>&& sample, const GstAudioInfo& info)
+ : m_sample(WTFMove(sample))
, m_audioInfo(info)
{
}
@@ -45,8 +51,8 @@
}
void setSample(GRefPtr<GstSample>&& sample) { m_sample = WTFMove(sample); }
- GstSample* getSample() { return m_sample.get(); }
- GstAudioInfo getAudioInfo() { return m_audioInfo; }
+ const GRefPtr<GstSample>& getSample() const { return m_sample; }
+ const GstAudioInfo& getAudioInfo() const { return m_audioInfo; }
uint32_t channelCount() const { return GST_AUDIO_INFO_CHANNELS(&m_audioInfo); }
private:
Modified: trunk/Source/WebCore/platform/audio/gstreamer/GStreamerAudioStreamDescription.h (269848 => 269849)
--- trunk/Source/WebCore/platform/audio/gstreamer/GStreamerAudioStreamDescription.h 2020-11-16 12:29:26 UTC (rev 269848)
+++ trunk/Source/WebCore/platform/audio/gstreamer/GStreamerAudioStreamDescription.h 2020-11-16 12:46:38 UTC (rev 269849)
@@ -30,13 +30,19 @@
class GStreamerAudioStreamDescription final: public AudioStreamDescription {
public:
- GStreamerAudioStreamDescription(GstAudioInfo info)
+ GStreamerAudioStreamDescription(GstAudioInfo&& info)
+ : m_info(WTFMove(info))
+ , m_caps(adoptGRef(gst_audio_info_to_caps(&m_info)))
+ {
+ }
+
+ GStreamerAudioStreamDescription(const GstAudioInfo& info)
: m_info(info)
, m_caps(adoptGRef(gst_audio_info_to_caps(&m_info)))
{
}
- GStreamerAudioStreamDescription(GstAudioInfo *info)
+ GStreamerAudioStreamDescription(GstAudioInfo* info)
: m_info(*info)
, m_caps(adoptGRef(gst_audio_info_to_caps(&m_info)))
{
@@ -93,8 +99,8 @@
bool operator==(const GStreamerAudioStreamDescription& other) { return gst_audio_info_is_equal(&m_info, &other.m_info); }
bool operator!=(const GStreamerAudioStreamDescription& other) { return !operator == (other); }
- GstCaps* caps() { return m_caps.get(); }
- GstAudioInfo* getInfo() { return &m_info; }
+ const GRefPtr<GstCaps>& caps() const { return m_caps; }
+ const GstAudioInfo& getInfo() const { return m_info; }
private:
GstAudioInfo m_info;
Modified: trunk/Source/WebCore/platform/mediastream/gstreamer/GStreamerAudioCaptureSource.cpp (269848 => 269849)
--- trunk/Source/WebCore/platform/mediastream/gstreamer/GStreamerAudioCaptureSource.cpp 2020-11-16 12:29:26 UTC (rev 269848)
+++ trunk/Source/WebCore/platform/mediastream/gstreamer/GStreamerAudioCaptureSource.cpp 2020-11-16 12:46:38 UTC (rev 269849)
@@ -124,13 +124,13 @@
auto sample = adoptGRef(gst_app_sink_pull_sample(GST_APP_SINK(sink)));
// FIXME - figure out a way to avoid copying (on write) the data.
- GstBuffer* buf = gst_sample_get_buffer(sample.get());
- auto frames(std::unique_ptr<GStreamerAudioData>(new GStreamerAudioData(WTFMove(sample))));
- auto streamDesc(std::unique_ptr<GStreamerAudioStreamDescription>(new GStreamerAudioStreamDescription(frames->getAudioInfo())));
+ auto* buffer = gst_sample_get_buffer(sample.get());
+ GStreamerAudioData frames(WTFMove(sample));
+ GStreamerAudioStreamDescription description(frames.getAudioInfo());
source->audioSamplesAvailable(
- MediaTime(GST_TIME_AS_USECONDS(GST_BUFFER_PTS(buf)), G_USEC_PER_SEC),
- *frames, *streamDesc, gst_buffer_get_size(buf) / frames->getAudioInfo().bpf);
+ MediaTime(GST_TIME_AS_USECONDS(GST_BUFFER_PTS(buffer)), G_USEC_PER_SEC),
+ frames, description, gst_buffer_get_size(buffer) / description.getInfo().bpf);
return GST_FLOW_OK;
}
Modified: trunk/Source/WebCore/platform/mediastream/gstreamer/GStreamerMediaStreamSource.cpp (269848 => 269849)
--- trunk/Source/WebCore/platform/mediastream/gstreamer/GStreamerMediaStreamSource.cpp 2020-11-16 12:29:26 UTC (rev 269848)
+++ trunk/Source/WebCore/platform/mediastream/gstreamer/GStreamerMediaStreamSource.cpp 2020-11-16 12:46:38 UTC (rev 269849)
@@ -40,7 +40,7 @@
using namespace WebCore;
static void webkitMediaStreamSrcPushVideoSample(WebKitMediaStreamSrc*, GstSample*);
-static void webkitMediaStreamSrcPushAudioSample(WebKitMediaStreamSrc*, GstSample*);
+static void webkitMediaStreamSrcPushAudioSample(WebKitMediaStreamSrc*, const GRefPtr<GstSample>&);
static void webkitMediaStreamSrcTrackEnded(WebKitMediaStreamSrc*, MediaStreamTrackPrivate&);
static void webkitMediaStreamSrcRemoveTrackByType(WebKitMediaStreamSrc*, RealtimeMediaSource::Type);
@@ -591,10 +591,10 @@
self->priv->videoSrc->pushSample(sample);
}
-static void webkitMediaStreamSrcPushAudioSample(WebKitMediaStreamSrc* self, GstSample* sample)
+static void webkitMediaStreamSrcPushAudioSample(WebKitMediaStreamSrc* self, const GRefPtr<GstSample>& sample)
{
if (self->priv->audioSrc)
- self->priv->audioSrc->pushSample(sample);
+ self->priv->audioSrc->pushSample(sample.get());
}
static void webkitMediaStreamSrcTrackEnded(WebKitMediaStreamSrc* self, MediaStreamTrackPrivate& track)
Modified: trunk/Source/WebCore/platform/mediastream/gstreamer/MockRealtimeAudioSourceGStreamer.cpp (269848 => 269849)
--- trunk/Source/WebCore/platform/mediastream/gstreamer/MockRealtimeAudioSourceGStreamer.cpp 2020-11-16 12:29:26 UTC (rev 269848)
+++ trunk/Source/WebCore/platform/mediastream/gstreamer/MockRealtimeAudioSourceGStreamer.cpp 2020-11-16 12:46:38 UTC (rev 269849)
@@ -81,13 +81,13 @@
uint32_t bipBopCount = std::min(frameCount, bipBopRemain);
ASSERT(m_streamFormat);
- GstAudioInfo* info = m_streamFormat->getInfo();
+ const auto& info = m_streamFormat->getInfo();
GRefPtr<GstBuffer> buffer = adoptGRef(gst_buffer_new_allocate(nullptr, bipBopCount * m_streamFormat->bytesPerFrame(), nullptr));
{
GstMappedBuffer map(buffer.get(), GST_MAP_WRITE);
if (muted())
- gst_audio_format_fill_silence(info->finfo, map.data(), map.size());
+ gst_audio_format_fill_silence(info.finfo, map.data(), map.size());
else {
memcpy(map.data(), &m_bipBopBuffer[bipBopStart], sizeof(float) * bipBopCount);
addHum(s_HumVolume, s_HumFrequency, sampleRate(), m_samplesRendered, reinterpret_cast<float*>(map.data()), bipBopCount);
@@ -98,11 +98,11 @@
totalFrameCount -= bipBopCount;
frameCount = std::min(totalFrameCount, m_maximiumFrameCount);
- GRefPtr<GstCaps> caps = adoptGRef(gst_audio_info_to_caps(info));
+ auto caps = adoptGRef(gst_audio_info_to_caps(&info));
auto sample = adoptGRef(gst_sample_new(buffer.get(), caps.get(), nullptr, nullptr));
- auto data(std::unique_ptr<GStreamerAudioData>(new GStreamerAudioData(WTFMove(sample), *info)));
- auto mediaTime = MediaTime((m_samplesRendered * G_USEC_PER_SEC) / sampleRate(), G_USEC_PER_SEC);
- audioSamplesAvailable(mediaTime, *data.get(), *m_streamFormat, bipBopCount);
+ GStreamerAudioData data(WTFMove(sample), info);
+ MediaTime mediaTime((m_samplesRendered * G_USEC_PER_SEC) / sampleRate(), G_USEC_PER_SEC);
+ audioSamplesAvailable(mediaTime, data, *m_streamFormat, bipBopCount);
}
}
Modified: trunk/Source/WebCore/platform/mediastream/gstreamer/RealtimeIncomingAudioSourceLibWebRTC.cpp (269848 => 269849)
--- trunk/Source/WebCore/platform/mediastream/gstreamer/RealtimeIncomingAudioSourceLibWebRTC.cpp 2020-11-16 12:29:26 UTC (rev 269848)
+++ trunk/Source/WebCore/platform/mediastream/gstreamer/RealtimeIncomingAudioSourceLibWebRTC.cpp 2020-11-16 12:46:38 UTC (rev 269849)
@@ -72,13 +72,12 @@
memcpy(bufferData, audioData, bufferSize);
auto buffer = adoptGRef(gst_buffer_new_wrapped(bufferData, bufferSize));
- GRefPtr<GstCaps> caps = adoptGRef(gst_audio_info_to_caps(&info));
+ auto caps = adoptGRef(gst_audio_info_to_caps(&info));
auto sample = adoptGRef(gst_sample_new(buffer.get(), caps.get(), nullptr, nullptr));
- auto data(std::unique_ptr<GStreamerAudioData>(new GStreamerAudioData(WTFMove(sample), info)));
+ GStreamerAudioData data(WTFMove(sample), info);
+ MediaTime mediaTime((m_numberOfFrames * G_USEC_PER_SEC) / sampleRate, G_USEC_PER_SEC);
+ audioSamplesAvailable(mediaTime, data, GStreamerAudioStreamDescription(info), numberOfFrames);
- auto mediaTime = MediaTime((m_numberOfFrames * G_USEC_PER_SEC) / sampleRate, G_USEC_PER_SEC);
- audioSamplesAvailable(mediaTime, *data.get(), GStreamerAudioStreamDescription(info), numberOfFrames);
-
m_numberOfFrames += numberOfFrames;
}
}
Modified: trunk/Source/WebCore/platform/mediastream/gstreamer/RealtimeOutgoingAudioSourceLibWebRTC.cpp (269848 => 269849)
--- trunk/Source/WebCore/platform/mediastream/gstreamer/RealtimeOutgoingAudioSourceLibWebRTC.cpp 2020-11-16 12:29:26 UTC (rev 269848)
+++ trunk/Source/WebCore/platform/mediastream/gstreamer/RealtimeOutgoingAudioSourceLibWebRTC.cpp 2020-11-16 12:46:38 UTC (rev 269849)
@@ -22,10 +22,11 @@
#if USE(LIBWEBRTC) && USE(GSTREAMER)
#include "RealtimeOutgoingAudioSourceLibWebRTC.h"
+#include "GStreamerAudioData.h"
+#include "GStreamerAudioStreamDescription.h"
#include "LibWebRTCAudioFormat.h"
#include "LibWebRTCProvider.h"
#include "NotImplemented.h"
-#include "gstreamer/GStreamerAudioData.h"
namespace WebCore {
@@ -46,8 +47,7 @@
return RealtimeOutgoingAudioSourceLibWebRTC::create(WTFMove(audioSource));
}
-static inline std::unique_ptr<GStreamerAudioStreamDescription> libwebrtcAudioFormat(int sampleRate,
- size_t channelCount)
+static inline GstAudioInfo libwebrtcAudioFormat(int sampleRate, size_t channelCount)
{
GstAudioFormat format = gst_audio_format_build_integer(
LibWebRTCAudioFormat::isSigned,
@@ -59,34 +59,30 @@
size_t libWebRTCChannelCount = channelCount >= 2 ? 2 : channelCount;
gst_audio_info_set_format(&info, format, sampleRate, libWebRTCChannelCount, nullptr);
-
- return std::unique_ptr<GStreamerAudioStreamDescription>(new GStreamerAudioStreamDescription(info));
+ return info;
}
-void RealtimeOutgoingAudioSourceLibWebRTC::audioSamplesAvailable(const MediaTime&,
- const PlatformAudioData& audioData, const AudioStreamDescription& streamDescription,
- size_t /* sampleCount */)
+void RealtimeOutgoingAudioSourceLibWebRTC::audioSamplesAvailable(const MediaTime&, const PlatformAudioData& audioData, const AudioStreamDescription& streamDescription, size_t /* sampleCount */)
{
auto data = "" GStreamerAudioData&>(audioData);
auto desc = static_cast<const GStreamerAudioStreamDescription&>(streamDescription);
- if (m_sampleConverter && !gst_audio_info_is_equal(m_inputStreamDescription->getInfo(), desc.getInfo())) {
+ if (m_sampleConverter && !gst_audio_info_is_equal(&m_inputStreamDescription, &desc.getInfo())) {
GST_ERROR_OBJECT(this, "FIXME - Audio format renegotiation is not possible yet!");
m_sampleConverter = nullptr;
}
if (!m_sampleConverter) {
- m_inputStreamDescription = std::unique_ptr<GStreamerAudioStreamDescription>(new GStreamerAudioStreamDescription(desc.getInfo()));
- m_outputStreamDescription = libwebrtcAudioFormat(LibWebRTCAudioFormat::sampleRate, streamDescription.numberOfChannels());
- m_sampleConverter.reset(gst_audio_converter_new(GST_AUDIO_CONVERTER_FLAG_IN_WRITABLE,
- m_inputStreamDescription->getInfo(),
- m_outputStreamDescription->getInfo(),
- nullptr));
+ m_inputStreamDescription = desc.getInfo();
+ m_outputStreamDescription = libwebrtcAudioFormat(LibWebRTCAudioFormat::sampleRate, desc.numberOfChannels());
+ m_sampleConverter.reset(gst_audio_converter_new(GST_AUDIO_CONVERTER_FLAG_IN_WRITABLE, &m_inputStreamDescription,
+ &m_outputStreamDescription, nullptr));
}
{
LockHolder locker(m_adapterMutex);
- auto* buffer = gst_sample_get_buffer(data.getSample());
+ const auto& sample = data.getSample();
+ auto* buffer = gst_sample_get_buffer(sample.get());
gst_adapter_push(m_adapter.get(), gst_buffer_ref(buffer));
}
LibWebRTCProvider::callOnWebRTCSignalingThread([protectedThis = makeRef(*this)] {
@@ -96,24 +92,23 @@
void RealtimeOutgoingAudioSourceLibWebRTC::pullAudioData()
{
- if (!m_inputStreamDescription || !m_outputStreamDescription) {
+ if (!GST_AUDIO_INFO_IS_VALID(&m_inputStreamDescription) || !GST_AUDIO_INFO_IS_VALID(&m_outputStreamDescription)) {
GST_INFO("No stream description set yet.");
-
return;
}
size_t outChunkSampleCount = LibWebRTCAudioFormat::chunkSampleCount;
- size_t outBufferSize = outChunkSampleCount * m_outputStreamDescription->getInfo()->bpf;
+ size_t outBufferSize = outChunkSampleCount * m_outputStreamDescription.bpf;
LockHolder locker(m_adapterMutex);
size_t inChunkSampleCount = gst_audio_converter_get_in_frames(m_sampleConverter.get(), outChunkSampleCount);
- size_t inBufferSize = inChunkSampleCount * m_inputStreamDescription->getInfo()->bpf;
+ size_t inBufferSize = inChunkSampleCount * m_inputStreamDescription.bpf;
while (gst_adapter_available(m_adapter.get()) > inBufferSize) {
auto inBuffer = adoptGRef(gst_adapter_take_buffer(m_adapter.get(), inBufferSize));
m_audioBuffer.grow(outBufferSize);
if (isSilenced())
- gst_audio_format_fill_silence(m_outputStreamDescription->getInfo()->finfo, m_audioBuffer.data(), outBufferSize);
+ gst_audio_format_fill_silence(m_outputStreamDescription.finfo, m_audioBuffer.data(), outBufferSize);
else {
GstMappedBuffer inMap(inBuffer.get(), GST_MAP_READ);
@@ -126,8 +121,8 @@
}
}
- sendAudioFrames(m_audioBuffer.data(), LibWebRTCAudioFormat::sampleSize, static_cast<int>(m_outputStreamDescription->sampleRate()),
- static_cast<int>(m_outputStreamDescription->numberOfChannels()), outChunkSampleCount);
+ sendAudioFrames(m_audioBuffer.data(), LibWebRTCAudioFormat::sampleSize, GST_AUDIO_INFO_RATE(&m_outputStreamDescription),
+ GST_AUDIO_INFO_CHANNELS(&m_outputStreamDescription), outChunkSampleCount);
}
}
Modified: trunk/Source/WebCore/platform/mediastream/gstreamer/RealtimeOutgoingAudioSourceLibWebRTC.h (269848 => 269849)
--- trunk/Source/WebCore/platform/mediastream/gstreamer/RealtimeOutgoingAudioSourceLibWebRTC.h 2020-11-16 12:29:26 UTC (rev 269848)
+++ trunk/Source/WebCore/platform/mediastream/gstreamer/RealtimeOutgoingAudioSourceLibWebRTC.h 2020-11-16 12:46:38 UTC (rev 269849)
@@ -21,7 +21,6 @@
#if USE(LIBWEBRTC)
-#include "GStreamerAudioStreamDescription.h"
#include "GStreamerCommon.h"
#include "RealtimeOutgoingAudioSource.h"
@@ -49,8 +48,8 @@
void pullAudioData();
GUniquePtr<GstAudioConverter> m_sampleConverter;
- std::unique_ptr<GStreamerAudioStreamDescription> m_inputStreamDescription;
- std::unique_ptr<GStreamerAudioStreamDescription> m_outputStreamDescription;
+ GstAudioInfo m_inputStreamDescription;
+ GstAudioInfo m_outputStreamDescription;
Lock m_adapterMutex;
GRefPtr<GstAdapter> m_adapter;