Modified: branches/safari-612.3.6.0-branch/Source/WebKit/ChangeLog (285682 => 285683)
--- branches/safari-612.3.6.0-branch/Source/WebKit/ChangeLog 2021-11-11 23:48:06 UTC (rev 285682)
+++ branches/safari-612.3.6.0-branch/Source/WebKit/ChangeLog 2021-11-11 23:48:09 UTC (rev 285683)
@@ -1,3 +1,55 @@
+2021-11-11 Kocsen Chung <[email protected]>
+
+ Cherry-pick r285509. rdar://problem/83381842
+
+ Fix wrong frame count of CARingBuffer in SpeechRecognitionRemoteRealtimeMediaSource
+ https://bugs.webkit.org/show_bug.cgi?id=232863
+ <rdar://83381842>
+
+ Reviewed by Youenn Fablet.
+
+ SpeechRecognitionRealtimeMediaSourceManager::Source uses shared ring buffer to pass audio data to
+ SpeechRecognitionRemoteRealtimeMediaSource. We used to ask CARingBuffer in
+ SpeechRecognitionRealtimeMediaSourceManager::Source to allocate with m_numberOfFrames and send m_numberOfFrames
+ to SpeechRecognitionRemoteRealtimeMediaSource (so SpeechRecognitionRemoteRealtimeMediaSource can create a
+ corresponding CARingBuffer). This is wrong because CARingBuffer::allocate() rounds up frameCount to power of
+ two, which means m_numberOfFrames may be not the number used. We should get the actual frameCount in the
+ setStorage callback of SharedRingBufferStorage, and pass that value to SpeechRecognitionRemoteRealtimeMediaSource.
+
+ Manually tested.
+
+ * UIProcess/SpeechRecognitionRemoteRealtimeMediaSource.cpp:
+ (WebKit::SpeechRecognitionRemoteRealtimeMediaSource::setStorage):
+ * WebProcess/Speech/SpeechRecognitionRealtimeMediaSourceManager.cpp:
+ (WebKit::SpeechRecognitionRealtimeMediaSourceManager::Source::Source):
+ (WebKit::SpeechRecognitionRealtimeMediaSourceManager::Source::storageChanged):
+
+ git-svn-id: https://svn.webkit.org/repository/webkit/trunk@285509 268f45cc-cd09-0410-ab3c-d52691b4dbfc
+
+ 2021-11-09 Sihui Liu <[email protected]>
+
+ Fix wrong frame count of CARingBuffer in SpeechRecognitionRemoteRealtimeMediaSource
+ https://bugs.webkit.org/show_bug.cgi?id=232863
+ <rdar://83381842>
+
+ Reviewed by Youenn Fablet.
+
+ SpeechRecognitionRealtimeMediaSourceManager::Source uses shared ring buffer to pass audio data to
+ SpeechRecognitionRemoteRealtimeMediaSource. We used to ask CARingBuffer in
+ SpeechRecognitionRealtimeMediaSourceManager::Source to allocate with m_numberOfFrames and send m_numberOfFrames
+ to SpeechRecognitionRemoteRealtimeMediaSource (so SpeechRecognitionRemoteRealtimeMediaSource can create a
+ corresponding CARingBuffer). This is wrong because CARingBuffer::allocate() rounds up frameCount to power of
+ two, which means m_numberOfFrames may be not the number used. We should get the actual frameCount in the
+ setStorage callback of SharedRingBufferStorage, and pass that value to SpeechRecognitionRemoteRealtimeMediaSource.
+
+ Manually tested.
+
+ * UIProcess/SpeechRecognitionRemoteRealtimeMediaSource.cpp:
+ (WebKit::SpeechRecognitionRemoteRealtimeMediaSource::setStorage):
+ * WebProcess/Speech/SpeechRecognitionRealtimeMediaSourceManager.cpp:
+ (WebKit::SpeechRecognitionRealtimeMediaSourceManager::Source::Source):
+ (WebKit::SpeechRecognitionRealtimeMediaSourceManager::Source::storageChanged):
+
2021-11-08 Kocsen Chung <[email protected]>
Cherry-pick r285236. rdar://problem/83950623
Modified: branches/safari-612.3.6.0-branch/Source/WebKit/UIProcess/SpeechRecognitionRemoteRealtimeMediaSource.cpp (285682 => 285683)
--- branches/safari-612.3.6.0-branch/Source/WebKit/UIProcess/SpeechRecognitionRemoteRealtimeMediaSource.cpp 2021-11-11 23:48:06 UTC (rev 285682)
+++ branches/safari-612.3.6.0-branch/Source/WebKit/UIProcess/SpeechRecognitionRemoteRealtimeMediaSource.cpp 2021-11-11 23:48:09 UTC (rev 285683)
@@ -77,8 +77,13 @@
void SpeechRecognitionRemoteRealtimeMediaSource::setStorage(const SharedMemory::Handle& handle, const WebCore::CAAudioStreamDescription& description, uint64_t numberOfFrames)
{
+ if (!numberOfFrames) {
+ m_ringBuffer = nullptr;
+ m_buffer = nullptr;
+ return;
+ }
+
m_description = description;
-
m_ringBuffer = WebCore::CARingBuffer::adoptStorage(makeUniqueRef<ReadOnlySharedRingBufferStorage>(handle), description, numberOfFrames).moveToUniquePtr();
m_buffer = makeUnique<WebCore::WebAudioBufferList>(description);
}
Modified: branches/safari-612.3.6.0-branch/Source/WebKit/WebProcess/Speech/SpeechRecognitionRealtimeMediaSourceManager.cpp (285682 => 285683)
--- branches/safari-612.3.6.0-branch/Source/WebKit/WebProcess/Speech/SpeechRecognitionRealtimeMediaSourceManager.cpp 2021-11-11 23:48:06 UTC (rev 285682)
+++ branches/safari-612.3.6.0-branch/Source/WebKit/WebProcess/Speech/SpeechRecognitionRealtimeMediaSourceManager.cpp 2021-11-11 23:48:09 UTC (rev 285683)
@@ -63,7 +63,7 @@
, m_source(WTFMove(source))
, m_connection(WTFMove(connection))
#if PLATFORM(COCOA)
- , m_ringBuffer(makeUniqueRef<SharedRingBufferStorage>(std::bind(&Source::storageChanged, this, std::placeholders::_1)))
+ , m_ringBuffer(makeUniqueRef<SharedRingBufferStorage>(std::bind(&Source::storageChanged, this, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3)))
#endif
{
m_source->addObserver(*this);
@@ -113,9 +113,7 @@
if (m_description != description) {
ASSERT(description.platformDescription().type == PlatformDescription::CAAudioStreamBasicType);
m_description = *WTF::get<const AudioStreamBasicDescription*>(description.platformDescription().description);
-
- m_numberOfFrames = m_description.sampleRate() * 2;
- m_ringBuffer.allocate(m_description.streamDescription(), m_numberOfFrames);
+ m_ringBuffer.allocate(m_description.streamDescription(), m_description.sampleRate() * 2);
}
ASSERT(is<WebAudioBufferList>(audioData));
@@ -131,7 +129,7 @@
#if PLATFORM(COCOA)
- void storageChanged(SharedMemory* storage)
+ void storageChanged(SharedMemory* storage, const WebCore::CAAudioStreamDescription& description, size_t numberOfFrames)
{
DisableMallocRestrictionsForCurrentThreadScope scope;
SharedMemory::Handle handle;
@@ -142,7 +140,7 @@
#else
uint64_t dataSize = 0;
#endif
- m_connection->send(Messages::SpeechRecognitionRemoteRealtimeMediaSourceManager::SetStorage(m_identifier, SharedMemory::IPCHandle { WTFMove(handle), dataSize }, m_description, m_numberOfFrames), 0);
+ m_connection->send(Messages::SpeechRecognitionRemoteRealtimeMediaSourceManager::SetStorage(m_identifier, SharedMemory::IPCHandle { WTFMove(handle), dataSize }, description, numberOfFrames), 0);
}
#endif
@@ -162,7 +160,6 @@
Ref<IPC::Connection> m_connection;
#if PLATFORM(COCOA)
- uint64_t m_numberOfFrames { 0 };
CARingBuffer m_ringBuffer;
CAAudioStreamDescription m_description { };
#endif