Title: [272337] trunk/Source
Revision
272337
Author
[email protected]
Date
2021-02-03 12:40:15 -0800 (Wed, 03 Feb 2021)

Log Message

Create a SpeechRecognizer for each SpeechRecognitionRequest
https://bugs.webkit.org/show_bug.cgi?id=219699
<rdar://problem/72392097>

Patch by Sihui Liu <[email protected]> on 2021-02-03
Reviewed by Youenn Fablet.

Source/WebCore:

Make SpeechRecognizer take the ownerwhip of the SpeechRecognitionRequest it's about to handle, and manage its
state.

* Modules/speech/SpeechRecognizer.cpp:
(WebCore::SpeechRecognizer::SpeechRecognizer):
(WebCore::SpeechRecognizer::~SpeechRecognizer):
(WebCore::SpeechRecognizer::abort):
(WebCore::SpeechRecognizer::stop):
(WebCore::SpeechRecognizer::clientIdentifier const):
(WebCore::SpeechRecognizer::start):
(WebCore::SpeechRecognizer::startCapture):
(WebCore::SpeechRecognizer::stopCapture):
(WebCore::SpeechRecognizer::startRecognition):
(WebCore::SpeechRecognizer::abortRecognition):
(WebCore::SpeechRecognizer::stopRecognition):
(WebCore::SpeechRecognizer::reset): Deleted.
(WebCore::SpeechRecognizer::resetRecognition): Deleted.
* Modules/speech/SpeechRecognizer.h:
(WebCore::SpeechRecognizer::setInactive):
(WebCore::SpeechRecognizer::currentClientIdentifier const): Deleted.
* Modules/speech/cocoa/SpeechRecognizerCocoa.mm:
(WebCore::SpeechRecognizer::startRecognition):
(WebCore::SpeechRecognizer::resetRecognition): Deleted.

Source/WebKit:

We used the same SpeechRecognizer for multiple requests, so we had to reset SpeechRecognizer state between
requests, which makes things complicated and hard to debug (like nested reset calls). Now we have one dedicated
SpeechRecognizer for each request.

* UIProcess/SpeechRecognitionServer.cpp:
(WebKit::SpeechRecognitionServer::requestPermissionForRequest):
(WebKit::SpeechRecognitionServer::handleRequest):
(WebKit::SpeechRecognitionServer::stop):
(WebKit::SpeechRecognitionServer::abort):
(WebKit::SpeechRecognitionServer::invalidate):
* UIProcess/SpeechRecognitionServer.h:

Modified Paths

Diff

Modified: trunk/Source/WebCore/ChangeLog (272336 => 272337)


--- trunk/Source/WebCore/ChangeLog	2021-02-03 20:36:28 UTC (rev 272336)
+++ trunk/Source/WebCore/ChangeLog	2021-02-03 20:40:15 UTC (rev 272337)
@@ -1,3 +1,35 @@
+2021-02-03  Sihui Liu  <[email protected]>
+
+        Create a SpeechRecognizer for each SpeechRecognitionRequest
+        https://bugs.webkit.org/show_bug.cgi?id=219699
+        <rdar://problem/72392097>
+
+        Reviewed by Youenn Fablet.
+
+        Make SpeechRecognizer take the ownerwhip of the SpeechRecognitionRequest it's about to handle, and manage its 
+        state.
+
+        * Modules/speech/SpeechRecognizer.cpp:
+        (WebCore::SpeechRecognizer::SpeechRecognizer):
+        (WebCore::SpeechRecognizer::~SpeechRecognizer):
+        (WebCore::SpeechRecognizer::abort):
+        (WebCore::SpeechRecognizer::stop):
+        (WebCore::SpeechRecognizer::clientIdentifier const):
+        (WebCore::SpeechRecognizer::start):
+        (WebCore::SpeechRecognizer::startCapture):
+        (WebCore::SpeechRecognizer::stopCapture):
+        (WebCore::SpeechRecognizer::startRecognition):
+        (WebCore::SpeechRecognizer::abortRecognition):
+        (WebCore::SpeechRecognizer::stopRecognition):
+        (WebCore::SpeechRecognizer::reset): Deleted.
+        (WebCore::SpeechRecognizer::resetRecognition): Deleted.
+        * Modules/speech/SpeechRecognizer.h:
+        (WebCore::SpeechRecognizer::setInactive):
+        (WebCore::SpeechRecognizer::currentClientIdentifier const): Deleted.
+        * Modules/speech/cocoa/SpeechRecognizerCocoa.mm:
+        (WebCore::SpeechRecognizer::startRecognition):
+        (WebCore::SpeechRecognizer::resetRecognition): Deleted.
+
 2021-02-03  Ryan Haddad  <[email protected]>
 
         Unreviewed, reverting r272300.

Modified: trunk/Source/WebCore/Modules/speech/SpeechRecognizer.cpp (272336 => 272337)


--- trunk/Source/WebCore/Modules/speech/SpeechRecognizer.cpp	2021-02-03 20:36:28 UTC (rev 272336)
+++ trunk/Source/WebCore/Modules/speech/SpeechRecognizer.cpp	2021-02-03 20:40:15 UTC (rev 272337)
@@ -26,6 +26,7 @@
 #include "config.h"
 #include "SpeechRecognizer.h"
 
+#include "SpeechRecognitionRequest.h"
 #include "SpeechRecognitionUpdate.h"
 #include <wtf/MediaTime.h>
 
@@ -35,24 +36,27 @@
 
 namespace WebCore {
 
-SpeechRecognizer::SpeechRecognizer(DelegateCallback&& callback)
-    : m_delegateCallback(WTFMove(callback))
+SpeechRecognizer::SpeechRecognizer(DelegateCallback&& delegateCallback, UniqueRef<SpeechRecognitionRequest>&& request)
+    : m_delegateCallback(WTFMove(delegateCallback))
+    , m_request(WTFMove(request))
 {
 }
 
-void SpeechRecognizer::reset()
+SpeechRecognizer::~SpeechRecognizer()
 {
-    if (!m_clientIdentifier)
+    if (m_state == State::Aborting || m_state == State::Stopping || m_state == State::Running)
+        m_delegateCallback(SpeechRecognitionUpdate::create(clientIdentifier(), SpeechRecognitionUpdateType::End));
+}
+
+void SpeechRecognizer::abort(Optional<SpeechRecognitionError>&& error)
+{
+    if (m_state == State::Aborting || m_state == State::Inactive)
         return;
+    m_state = State::Aborting;
 
-    stopCapture();
-    resetRecognition();
-    m_clientIdentifier = WTF::nullopt;
-}
+    if (error)
+        m_delegateCallback(SpeechRecognitionUpdate::createError(clientIdentifier(), *error));
 
-void SpeechRecognizer::abort()
-{
-    ASSERT(m_clientIdentifier);
     stopCapture();
     abortRecognition();
 }
@@ -59,25 +63,31 @@
 
 void SpeechRecognizer::stop()
 {
-    ASSERT(m_clientIdentifier);
+    if (m_state == State::Aborting || m_state == State::Inactive)
+        return;
+    m_state = State::Stopping;
+
     stopCapture();
     stopRecognition();
 }
 
+SpeechRecognitionConnectionClientIdentifier SpeechRecognizer::clientIdentifier() const
+{
+    return m_request->clientIdentifier();
+}
+
 #if ENABLE(MEDIA_STREAM)
 
-void SpeechRecognizer::start(SpeechRecognitionConnectionClientIdentifier clientIdentifier, Ref<RealtimeMediaSource>&& source, bool mockSpeechRecognitionEnabled, const String& localeIdentifier, bool continuous, bool interimResults, uint64_t maxAlternatives)
+void SpeechRecognizer::start(Ref<RealtimeMediaSource>&& source, bool mockSpeechRecognitionEnabled)
 {
-    ASSERT(!m_clientIdentifier);
-    m_clientIdentifier = clientIdentifier;
-    m_delegateCallback(SpeechRecognitionUpdate::create(*m_clientIdentifier, SpeechRecognitionUpdateType::Start));
-
-    if (!startRecognition(mockSpeechRecognitionEnabled, clientIdentifier, localeIdentifier, continuous, interimResults, maxAlternatives)) {
-        auto error = WebCore::SpeechRecognitionError { WebCore::SpeechRecognitionErrorType::ServiceNotAllowed, "Failed to start recognition"_s };
-        m_delegateCallback(WebCore::SpeechRecognitionUpdate::createError(clientIdentifier, WTFMove(error)));
+    if (!startRecognition(mockSpeechRecognitionEnabled, clientIdentifier(), m_request->lang(), m_request->continuous(), m_request->interimResults(), m_request->maxAlternatives())) {
+        auto error = SpeechRecognitionError { SpeechRecognitionErrorType::ServiceNotAllowed, "Failed to start recognition"_s };
+        m_delegateCallback(SpeechRecognitionUpdate::createError(clientIdentifier(), WTFMove(error)));
         return;
     }
 
+    m_state = State::Running;
+    m_delegateCallback(SpeechRecognitionUpdate::create(clientIdentifier(), SpeechRecognitionUpdateType::Start));
     startCapture(WTFMove(source));
 }
 
@@ -88,15 +98,12 @@
             weakThis->dataCaptured(time, data, description, sampleCount);
     };
 
-    auto stateUpdateCallback = [this, weakThis = makeWeakPtr(this)](const auto& update) {
-        if (!weakThis)
-            return;
-
-        ASSERT(m_clientIdentifier && m_clientIdentifier.value() == update.clientIdentifier());
-        m_delegateCallback(update);
+    auto stateUpdateCallback = [weakThis = makeWeakPtr(this)](const auto& update) {
+        if (weakThis)
+            weakThis->m_delegateCallback(update);
     };
 
-    m_source = makeUnique<SpeechRecognitionCaptureSource>(*m_clientIdentifier, WTFMove(dataCallback), WTFMove(stateUpdateCallback), WTFMove(source));
+    m_source = makeUnique<SpeechRecognitionCaptureSource>(clientIdentifier(), WTFMove(dataCallback), WTFMove(stateUpdateCallback), WTFMove(source));
 }
 
 #endif
@@ -107,7 +114,7 @@
         return;
 
     m_source = nullptr;
-    m_delegateCallback(SpeechRecognitionUpdate::create(*m_clientIdentifier, SpeechRecognitionUpdateType::AudioEnd));
+    m_delegateCallback(SpeechRecognitionUpdate::create(clientIdentifier(), SpeechRecognitionUpdateType::AudioEnd));
 }
 
 #if !HAVE(SPEECHRECOGNIZER)
@@ -118,29 +125,19 @@
 
 bool SpeechRecognizer::startRecognition(bool, SpeechRecognitionConnectionClientIdentifier, const String&, bool, bool, uint64_t)
 {
-    m_isRecognizing = true;
     return true;
 }
 
 void SpeechRecognizer::abortRecognition()
 {
-    m_isRecognizing = false;
-    m_delegateCallback(SpeechRecognitionUpdate::create(*m_clientIdentifier, SpeechRecognitionUpdateType::End));
+    m_delegateCallback(SpeechRecognitionUpdate::create(clientIdentifier(), SpeechRecognitionUpdateType::End));
 }
 
 void SpeechRecognizer::stopRecognition()
 {
-    m_isRecognizing = false;
-    m_delegateCallback(SpeechRecognitionUpdate::create(*m_clientIdentifier, SpeechRecognitionUpdateType::End));
+    m_delegateCallback(SpeechRecognitionUpdate::create(clientIdentifier(), SpeechRecognitionUpdateType::End));
 }
 
-void SpeechRecognizer::resetRecognition()
-{
-    if (!m_isRecognizing)
-        return;
-    abortRecognition();
-}
-
 #endif
 
 } // namespace WebCore

Modified: trunk/Source/WebCore/Modules/speech/SpeechRecognizer.h (272336 => 272337)


--- trunk/Source/WebCore/Modules/speech/SpeechRecognizer.h	2021-02-03 20:36:28 UTC (rev 272336)
+++ trunk/Source/WebCore/Modules/speech/SpeechRecognizer.h	2021-02-03 20:40:15 UTC (rev 272337)
@@ -27,6 +27,8 @@
 
 #include "SpeechRecognitionCaptureSource.h"
 #include "SpeechRecognitionConnectionClientIdentifier.h"
+#include "SpeechRecognitionError.h"
+#include <wtf/UniqueRef.h>
 
 #if HAVE(SPEECHRECOGNIZER)
 #include <wtf/RetainPtr.h>
@@ -35,6 +37,7 @@
 
 namespace WebCore {
 
+class SpeechRecognitionRequest;
 class SpeechRecognitionUpdate;
 
 class SpeechRecognizer : public CanMakeWeakPtr<SpeechRecognizer> {
@@ -41,21 +44,27 @@
     WTF_MAKE_FAST_ALLOCATED;
 public:
     using DelegateCallback = Function<void(const SpeechRecognitionUpdate&)>;
-    WEBCORE_EXPORT explicit SpeechRecognizer(DelegateCallback&&);
-    WEBCORE_EXPORT ~SpeechRecognizer() = default;
+    WEBCORE_EXPORT explicit SpeechRecognizer(DelegateCallback&&, UniqueRef<SpeechRecognitionRequest>&&);
+    WEBCORE_EXPORT ~SpeechRecognizer();
 
 #if ENABLE(MEDIA_STREAM)
-    WEBCORE_EXPORT void start(SpeechRecognitionConnectionClientIdentifier, Ref<RealtimeMediaSource>&&, bool mockSpeechRecognitionEnabled, const String& localeIdentifier, bool continuous, bool interimResults, uint64_t maxAlternatives);
+    WEBCORE_EXPORT void start(Ref<RealtimeMediaSource>&&, bool mockSpeechRecognitionEnabled);
 #endif
-    WEBCORE_EXPORT void reset();
-    WEBCORE_EXPORT void abort();
+    WEBCORE_EXPORT void abort(Optional<SpeechRecognitionError>&& = WTF::nullopt);
     WEBCORE_EXPORT void stop();
 
-    Optional<SpeechRecognitionConnectionClientIdentifier> currentClientIdentifier() const { return m_clientIdentifier; }
+    WEBCORE_EXPORT SpeechRecognitionConnectionClientIdentifier clientIdentifier() const;
     SpeechRecognitionCaptureSource* source() { return m_source.get(); }
 
+    void setInactive() { m_state = State::Inactive; }
+
 private:
-    void stopInternal();
+    enum class State {
+        Inactive,
+        Running,
+        Stopping,
+        Aborting
+    };
 
 #if ENABLE(MEDIA_STREAM)
     void startCapture(Ref<RealtimeMediaSource>&&);
@@ -65,17 +74,14 @@
     bool startRecognition(bool mockSpeechRecognitionEnabled, SpeechRecognitionConnectionClientIdentifier, const String& localeIdentifier, bool continuous, bool interimResults, uint64_t alternatives);
     void abortRecognition();
     void stopRecognition();
-    void resetRecognition();
 
-    Optional<SpeechRecognitionConnectionClientIdentifier> m_clientIdentifier;
     DelegateCallback m_delegateCallback;
+    UniqueRef<SpeechRecognitionRequest> m_request;
     std::unique_ptr<SpeechRecognitionCaptureSource> m_source;
+    State m_state { State::Inactive };
 
 #if HAVE(SPEECHRECOGNIZER)
     RetainPtr<WebSpeechRecognizerTask> m_task;
-#else
-    // For testing.
-    bool m_isRecognizing { false };
 #endif
 };
 

Modified: trunk/Source/WebCore/Modules/speech/cocoa/SpeechRecognizerCocoa.mm (272336 => 272337)


--- trunk/Source/WebCore/Modules/speech/cocoa/SpeechRecognizerCocoa.mm	2021-02-03 20:36:28 UTC (rev 272336)
+++ trunk/Source/WebCore/Modules/speech/cocoa/SpeechRecognizerCocoa.mm	2021-02-03 20:40:15 UTC (rev 272337)
@@ -45,11 +45,9 @@
 bool SpeechRecognizer::startRecognition(bool mockSpeechRecognitionEnabled, SpeechRecognitionConnectionClientIdentifier identifier, const String& localeIdentifier, bool continuous, bool interimResults, uint64_t alternatives)
 {
     auto taskClass = mockSpeechRecognitionEnabled ? [WebSpeechRecognizerTaskMock class] : [WebSpeechRecognizerTask class];
-    m_task = adoptNS([[taskClass alloc] initWithIdentifier:identifier locale:localeIdentifier doMultipleRecognitions:continuous reportInterimResults:interimResults maxAlternatives:alternatives delegateCallback:[this, weakThis = makeWeakPtr(this)](const WebCore::SpeechRecognitionUpdate& update) {
-        if (!weakThis)
-            return;
-
-        m_delegateCallback(update);
+    m_task = adoptNS([[taskClass alloc] initWithIdentifier:identifier locale:localeIdentifier doMultipleRecognitions:continuous reportInterimResults:interimResults maxAlternatives:alternatives delegateCallback:[weakThis = makeWeakPtr(this)](const WebCore::SpeechRecognitionUpdate& update) {
+        if (weakThis)
+            weakThis->m_delegateCallback(update);
     }]);
 
     return !!m_task;
@@ -67,15 +65,6 @@
     [m_task abort];
 }
 
-void SpeechRecognizer::resetRecognition()
-{
-    if (!m_task)
-        return;
-
-    auto task = std::exchange(m_task, nullptr);
-    [task abort];
-}
-
 } // namespace WebCore
 
 #endif // HAVE(SPEECHRECOGNIZER)

Modified: trunk/Source/WebKit/ChangeLog (272336 => 272337)


--- trunk/Source/WebKit/ChangeLog	2021-02-03 20:36:28 UTC (rev 272336)
+++ trunk/Source/WebKit/ChangeLog	2021-02-03 20:40:15 UTC (rev 272337)
@@ -1,3 +1,23 @@
+2021-02-03  Sihui Liu  <[email protected]>
+
+        Create a SpeechRecognizer for each SpeechRecognitionRequest
+        https://bugs.webkit.org/show_bug.cgi?id=219699
+        <rdar://problem/72392097>
+
+        Reviewed by Youenn Fablet.
+
+        We used the same SpeechRecognizer for multiple requests, so we had to reset SpeechRecognizer state between
+        requests, which makes things complicated and hard to debug (like nested reset calls). Now we have one dedicated
+        SpeechRecognizer for each request.
+
+        * UIProcess/SpeechRecognitionServer.cpp:
+        (WebKit::SpeechRecognitionServer::requestPermissionForRequest):
+        (WebKit::SpeechRecognitionServer::handleRequest):
+        (WebKit::SpeechRecognitionServer::stop):
+        (WebKit::SpeechRecognitionServer::abort):
+        (WebKit::SpeechRecognitionServer::invalidate):
+        * UIProcess/SpeechRecognitionServer.h:
+
 2021-02-03  Tim Horton  <[email protected]>
 
         REGRESSION (r270392): Launch time regression under IOSurface::maximumSize()

Modified: trunk/Source/WebKit/UIProcess/SpeechRecognitionServer.cpp (272336 => 272337)


--- trunk/Source/WebKit/UIProcess/SpeechRecognitionServer.cpp	2021-02-03 20:36:28 UTC (rev 272336)
+++ trunk/Source/WebKit/UIProcess/SpeechRecognitionServer.cpp	2021-02-03 20:40:15 UTC (rev 272337)
@@ -71,64 +71,46 @@
             return;
 
         auto identifier = weakRequest->clientIdentifier();
+        auto request = m_requests.take(identifier);
         if (error) {
-            m_requests.remove(identifier);
             sendUpdate(identifier, WebCore::SpeechRecognitionUpdateType::Error, WTFMove(error));
             return;
         }
 
-        handleRequest(*weakRequest);
+        ASSERT(request);
+        handleRequest(makeUniqueRefFromNonNullUniquePtr(WTFMove(request)));
     });
 }
 
-void SpeechRecognitionServer::handleRequest(WebCore::SpeechRecognitionRequest& request)
+void SpeechRecognitionServer::handleRequest(UniqueRef<WebCore::SpeechRecognitionRequest>&& request)
 {
-    if (!m_recognizer) {
-        m_recognizer = makeUnique<WebCore::SpeechRecognizer>([this, weakThis = makeWeakPtr(this)](auto& update) {
-            if (!weakThis)
-                return;
+    if (m_recognizer)
+        m_recognizer->abort(WebCore::SpeechRecognitionError { WebCore::SpeechRecognitionErrorType::Aborted, "Another request is started"_s });
 
-            auto clientIdentifier = update.clientIdentifier();
-            if (!m_requests.contains(clientIdentifier))
-                return;
+    auto clientIdentifier = request->clientIdentifier();
+    m_recognizer = makeUnique<WebCore::SpeechRecognizer>([this, weakThis = makeWeakPtr(this)](auto& update) {
+        if (!weakThis)
+            return;
 
-            sendUpdate(update);
+        sendUpdate(update);
 
-            auto type = update.type();
-            if (type != WebCore::SpeechRecognitionUpdateType::Error && type != WebCore::SpeechRecognitionUpdateType::End)
-                return;
+        if (update.type() == WebCore::SpeechRecognitionUpdateType::Error)
+            m_recognizer->abort();
+        else if (update.type() == WebCore::SpeechRecognitionUpdateType::End)
+            m_recognizer->setInactive();
+    }, WTFMove(request));
 
-            if (m_isResetting)
-                return;
-            m_isResetting = true;
-
-            m_recognizer->reset();
-            m_requests.remove(clientIdentifier);
-            m_isResetting = false;
-        });
-    }
-
-    if (auto currentClientIdentifier = m_recognizer->currentClientIdentifier()) {
-        auto error = WebCore::SpeechRecognitionError { WebCore::SpeechRecognitionErrorType::Aborted, "Another request is started"_s };
-        sendUpdate(*currentClientIdentifier, WebCore::SpeechRecognitionUpdateType::Error, error);
-        m_recognizer->reset();
-    }
-
-    auto clientIdentifier = request.clientIdentifier();
 #if ENABLE(MEDIA_STREAM)
     auto sourceOrError = m_realtimeMediaSourceCreateFunction();
     if (!sourceOrError) {
-        m_requests.remove(clientIdentifier);
         sendUpdate(WebCore::SpeechRecognitionUpdate::createError(clientIdentifier, WebCore::SpeechRecognitionError { WebCore::SpeechRecognitionErrorType::AudioCapture, sourceOrError.errorMessage }));
         return;
     }
 
     WebProcessProxy::muteCaptureInPagesExcept(m_identifier);
-
     bool mockDeviceCapturesEnabled = m_checkIfMockSpeechRecognitionEnabled();
-    m_recognizer->start(clientIdentifier, sourceOrError.source(), mockDeviceCapturesEnabled, request.lang(), request.continuous(), request.interimResults(), request.maxAlternatives());
+    m_recognizer->start(sourceOrError.source(), mockDeviceCapturesEnabled);
 #else
-    m_requests.remove(clientIdentifier);
     sendUpdate(clientIdentifier, WebCore::SpeechRecognitionUpdateType::Error, WebCore::SpeechRecognitionError { WebCore::SpeechRecognitionErrorType::AudioCapture, "Audio capture is not implemented"_s });
 #endif
 }
@@ -136,34 +118,33 @@
 void SpeechRecognitionServer::stop(WebCore::SpeechRecognitionConnectionClientIdentifier clientIdentifier)
 {
     MESSAGE_CHECK(clientIdentifier);
-    if (m_recognizer && m_recognizer->currentClientIdentifier() == clientIdentifier) {
-        m_recognizer->stop();
+
+    if (m_requests.remove(clientIdentifier)) {
+        sendUpdate(clientIdentifier, WebCore::SpeechRecognitionUpdateType::End);
         return;
     }
 
-    if (m_requests.remove(clientIdentifier))
-        sendUpdate(clientIdentifier, WebCore::SpeechRecognitionUpdateType::End);
+    if (m_recognizer && m_recognizer->clientIdentifier() == clientIdentifier)
+        m_recognizer->stop();
 }
 
 void SpeechRecognitionServer::abort(WebCore::SpeechRecognitionConnectionClientIdentifier clientIdentifier)
 {
     MESSAGE_CHECK(clientIdentifier);
-    if (m_recognizer && m_recognizer->currentClientIdentifier() == clientIdentifier) {
-        m_recognizer->abort();
+    if (m_requests.remove(clientIdentifier)) {
+        sendUpdate(clientIdentifier, WebCore::SpeechRecognitionUpdateType::End);
         return;
     }
 
-    if (m_requests.remove(clientIdentifier))
-        sendUpdate(clientIdentifier, WebCore::SpeechRecognitionUpdateType::End);
+    if (m_recognizer && m_recognizer->clientIdentifier() == clientIdentifier)
+        m_recognizer->abort();
 }
 
 void SpeechRecognitionServer::invalidate(WebCore::SpeechRecognitionConnectionClientIdentifier clientIdentifier)
 {
     MESSAGE_CHECK(clientIdentifier);
-    if (m_requests.remove(clientIdentifier)) {
-        if (m_recognizer && m_recognizer->currentClientIdentifier() == clientIdentifier)
-            m_recognizer->abort();
-    }
+    if (m_recognizer && m_recognizer->clientIdentifier() == clientIdentifier)
+        m_recognizer->abort();
 }
 
 void SpeechRecognitionServer::sendUpdate(WebCore::SpeechRecognitionConnectionClientIdentifier clientIdentifier, WebCore::SpeechRecognitionUpdateType type, Optional<WebCore::SpeechRecognitionError> error, Optional<Vector<WebCore::SpeechRecognitionResultData>> result)
@@ -178,6 +159,7 @@
 
 void SpeechRecognitionServer::sendUpdate(const WebCore::SpeechRecognitionUpdate& update)
 {
+    WTFLogAlways("[%p]SpeechRecognitionServer::sendUpdate update.type[%d], update.clientIdentifier[%llu]", this, update.type(), update.clientIdentifier().toUInt64());
     send(Messages::WebSpeechRecognitionConnection::DidReceiveUpdate(update));
 }
 

Modified: trunk/Source/WebKit/UIProcess/SpeechRecognitionServer.h (272336 => 272337)


--- trunk/Source/WebKit/UIProcess/SpeechRecognitionServer.h	2021-02-03 20:36:28 UTC (rev 272336)
+++ trunk/Source/WebKit/UIProcess/SpeechRecognitionServer.h	2021-02-03 20:40:15 UTC (rev 272337)
@@ -67,7 +67,7 @@
 
 private:
     void requestPermissionForRequest(WebCore::SpeechRecognitionRequest&);
-    void handleRequest(WebCore::SpeechRecognitionRequest&);
+    void handleRequest(UniqueRef<WebCore::SpeechRecognitionRequest>&&);
     void sendUpdate(WebCore::SpeechRecognitionConnectionClientIdentifier, WebCore::SpeechRecognitionUpdateType, Optional<WebCore::SpeechRecognitionError> = WTF::nullopt, Optional<Vector<WebCore::SpeechRecognitionResultData>> = WTF::nullopt);
     void sendUpdate(const WebCore::SpeechRecognitionUpdate&);
 
_______________________________________________
webkit-changes mailing list
[email protected]
https://lists.webkit.org/mailman/listinfo/webkit-changes

Reply via email to