Title: [285509] trunk/Source/WebKit
Revision
285509
Author
[email protected]
Date
2021-11-09 10:53:15 -0800 (Tue, 09 Nov 2021)

Log Message

Fix wrong frame count of CARingBuffer in SpeechRecognitionRemoteRealtimeMediaSource
https://bugs.webkit.org/show_bug.cgi?id=232863
<rdar://83381842>

Reviewed by Youenn Fablet.

SpeechRecognitionRealtimeMediaSourceManager::Source uses shared ring buffer to pass audio data to
SpeechRecognitionRemoteRealtimeMediaSource. We used to ask CARingBuffer in
SpeechRecognitionRealtimeMediaSourceManager::Source to allocate with m_numberOfFrames and send m_numberOfFrames
to SpeechRecognitionRemoteRealtimeMediaSource (so SpeechRecognitionRemoteRealtimeMediaSource can create a
corresponding CARingBuffer). This is wrong because CARingBuffer::allocate() rounds up frameCount to power of
two, which means m_numberOfFrames may be not the number used. We should get the actual frameCount in the
setStorage callback of SharedRingBufferStorage, and pass that value to SpeechRecognitionRemoteRealtimeMediaSource.

Manually tested.

* UIProcess/SpeechRecognitionRemoteRealtimeMediaSource.cpp:
(WebKit::SpeechRecognitionRemoteRealtimeMediaSource::setStorage):
* WebProcess/Speech/SpeechRecognitionRealtimeMediaSourceManager.cpp:
(WebKit::SpeechRecognitionRealtimeMediaSourceManager::Source::Source):
(WebKit::SpeechRecognitionRealtimeMediaSourceManager::Source::storageChanged):

Modified Paths

Diff

Modified: trunk/Source/WebKit/ChangeLog (285508 => 285509)


--- trunk/Source/WebKit/ChangeLog	2021-11-09 18:51:26 UTC (rev 285508)
+++ trunk/Source/WebKit/ChangeLog	2021-11-09 18:53:15 UTC (rev 285509)
@@ -1,3 +1,27 @@
+2021-11-09  Sihui Liu  <[email protected]>
+
+        Fix wrong frame count of CARingBuffer in SpeechRecognitionRemoteRealtimeMediaSource
+        https://bugs.webkit.org/show_bug.cgi?id=232863
+        <rdar://83381842>
+
+        Reviewed by Youenn Fablet.
+
+        SpeechRecognitionRealtimeMediaSourceManager::Source uses shared ring buffer to pass audio data to 
+        SpeechRecognitionRemoteRealtimeMediaSource. We used to ask CARingBuffer in 
+        SpeechRecognitionRealtimeMediaSourceManager::Source to allocate with m_numberOfFrames and send m_numberOfFrames
+        to SpeechRecognitionRemoteRealtimeMediaSource (so SpeechRecognitionRemoteRealtimeMediaSource can create a 
+        corresponding CARingBuffer). This is wrong because CARingBuffer::allocate() rounds up frameCount to power of 
+        two, which means m_numberOfFrames may be not the number used. We should get the actual frameCount in the 
+        setStorage callback of SharedRingBufferStorage, and pass that value to SpeechRecognitionRemoteRealtimeMediaSource.
+
+        Manually tested.
+
+        * UIProcess/SpeechRecognitionRemoteRealtimeMediaSource.cpp:
+        (WebKit::SpeechRecognitionRemoteRealtimeMediaSource::setStorage):
+        * WebProcess/Speech/SpeechRecognitionRealtimeMediaSourceManager.cpp:
+        (WebKit::SpeechRecognitionRealtimeMediaSourceManager::Source::Source):
+        (WebKit::SpeechRecognitionRealtimeMediaSourceManager::Source::storageChanged):
+
 2021-11-09  Brent Fulgham  <[email protected]>
 
         Reduce telemetry for well-understood sandbox rules

Modified: trunk/Source/WebKit/UIProcess/SpeechRecognitionRemoteRealtimeMediaSource.cpp (285508 => 285509)


--- trunk/Source/WebKit/UIProcess/SpeechRecognitionRemoteRealtimeMediaSource.cpp	2021-11-09 18:51:26 UTC (rev 285508)
+++ trunk/Source/WebKit/UIProcess/SpeechRecognitionRemoteRealtimeMediaSource.cpp	2021-11-09 18:53:15 UTC (rev 285509)
@@ -77,8 +77,13 @@
 
 void SpeechRecognitionRemoteRealtimeMediaSource::setStorage(const SharedMemory::Handle& handle, const WebCore::CAAudioStreamDescription& description, uint64_t numberOfFrames)
 {
+    if (!numberOfFrames) {
+        m_ringBuffer = nullptr;
+        m_buffer = nullptr;
+        return;
+    }
+
     m_description = description;
-
     m_ringBuffer = WebCore::CARingBuffer::adoptStorage(makeUniqueRef<ReadOnlySharedRingBufferStorage>(handle), description, numberOfFrames).moveToUniquePtr();
     m_buffer = makeUnique<WebCore::WebAudioBufferList>(description);
 }

Modified: trunk/Source/WebKit/WebProcess/Speech/SpeechRecognitionRealtimeMediaSourceManager.cpp (285508 => 285509)


--- trunk/Source/WebKit/WebProcess/Speech/SpeechRecognitionRealtimeMediaSourceManager.cpp	2021-11-09 18:51:26 UTC (rev 285508)
+++ trunk/Source/WebKit/WebProcess/Speech/SpeechRecognitionRealtimeMediaSourceManager.cpp	2021-11-09 18:53:15 UTC (rev 285509)
@@ -63,7 +63,7 @@
         , m_source(WTFMove(source))
         , m_connection(WTFMove(connection))
 #if PLATFORM(COCOA)
-        , m_ringBuffer(makeUniqueRef<SharedRingBufferStorage>(std::bind(&Source::storageChanged, this, std::placeholders::_1)))
+        , m_ringBuffer(makeUniqueRef<SharedRingBufferStorage>(std::bind(&Source::storageChanged, this, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3)))
 #endif
     {
         m_source->addObserver(*this);
@@ -113,9 +113,7 @@
         if (m_description != description) {
             ASSERT(description.platformDescription().type == PlatformDescription::CAAudioStreamBasicType);
             m_description = *std::get<const AudioStreamBasicDescription*>(description.platformDescription().description);
-
-            m_numberOfFrames = m_description.sampleRate() * 2;
-            m_ringBuffer.allocate(m_description.streamDescription(), m_numberOfFrames);
+            m_ringBuffer.allocate(m_description.streamDescription(), m_description.sampleRate() * 2);
         }
 
         ASSERT(is<WebAudioBufferList>(audioData));
@@ -131,7 +129,7 @@
 
 #if PLATFORM(COCOA)
 
-    void storageChanged(SharedMemory* storage)
+    void storageChanged(SharedMemory* storage, const WebCore::CAAudioStreamDescription& description, size_t numberOfFrames)
     {
         DisableMallocRestrictionsForCurrentThreadScope scope;
         SharedMemory::Handle handle;
@@ -142,7 +140,7 @@
 #else
         uint64_t dataSize = 0;
 #endif
-        m_connection->send(Messages::SpeechRecognitionRemoteRealtimeMediaSourceManager::SetStorage(m_identifier, SharedMemory::IPCHandle { WTFMove(handle),  dataSize }, m_description, m_numberOfFrames), 0);
+        m_connection->send(Messages::SpeechRecognitionRemoteRealtimeMediaSourceManager::SetStorage(m_identifier, SharedMemory::IPCHandle { WTFMove(handle),  dataSize }, description, numberOfFrames), 0);
     }
 
 #endif
@@ -162,7 +160,6 @@
     Ref<IPC::Connection> m_connection;
 
 #if PLATFORM(COCOA)
-    uint64_t m_numberOfFrames { 0 };
     CARingBuffer m_ringBuffer;
     CAAudioStreamDescription m_description { };
 #endif
_______________________________________________
webkit-changes mailing list
[email protected]
https://lists.webkit.org/mailman/listinfo/webkit-changes

Reply via email to