Changeset 270772 in webkit
- Timestamp:
- Dec 14, 2020 9:45:54 AM (19 months ago)
- Location:
- trunk
- Files:
-
- 8 added
- 12 edited
- 5 copied
-
LayoutTests/ChangeLog (modified) (1 diff)
-
LayoutTests/fast/speechrecognition/ios/restart-recognition-after-stop-expected.txt (added)
-
LayoutTests/fast/speechrecognition/ios/restart-recognition-after-stop.html (added)
-
LayoutTests/fast/speechrecognition/ios/start-recognition-then-stop-expected.txt (added)
-
LayoutTests/fast/speechrecognition/ios/start-recognition-then-stop.html (added)
-
Source/WebCore/ChangeLog (modified) (1 diff)
-
Source/WebCore/Modules/speech/SpeechRecognizer.cpp (modified) (5 diffs)
-
Source/WebCore/Modules/speech/SpeechRecognizer.h (modified) (3 diffs)
-
Source/WebCore/Modules/speech/cocoa (added)
-
Source/WebCore/Modules/speech/cocoa/SpeechRecognizerCocoa.mm (added)
-
Source/WebCore/Modules/speech/cocoa/WebSpeechRecognizerTask.h (copied) (copied from trunk/Source/WebCore/Modules/speech/SpeechRecognizer.h) (1 diff)
-
Source/WebCore/Modules/speech/cocoa/WebSpeechRecognizerTask.mm (added)
-
Source/WebCore/Modules/speech/cocoa/WebSpeechRecognizerTaskMock.h (copied) (copied from trunk/Source/WebCore/Modules/speech/SpeechRecognizer.h) (1 diff)
-
Source/WebCore/Modules/speech/cocoa/WebSpeechRecognizerTaskMock.mm (added)
-
Source/WebCore/PAL/ChangeLog (modified) (1 diff)
-
Source/WebCore/PAL/PAL.xcodeproj/project.pbxproj (modified) (6 diffs)
-
Source/WebCore/PAL/pal/cocoa/SpeechSoftLink.h (copied) (copied from trunk/Source/WebCore/Modules/speech/SpeechRecognizer.h) (1 diff)
-
Source/WebCore/PAL/pal/cocoa/SpeechSoftLink.mm (copied) (copied from trunk/Source/WebCore/Modules/speech/SpeechRecognizer.h) (1 diff)
-
Source/WebCore/PAL/pal/spi/cocoa/SpeechSPI.h (copied) (copied from trunk/Source/WebCore/Modules/speech/SpeechRecognizer.h) (1 diff)
-
Source/WebCore/SourcesCocoa.txt (modified) (1 diff)
-
Source/WebCore/WebCore.xcodeproj/project.pbxproj (modified) (5 diffs)
-
Source/WebKit/ChangeLog (modified) (1 diff)
-
Source/WebKit/UIProcess/SpeechRecognitionServer.cpp (modified) (6 diffs)
-
Source/WebKit/UIProcess/SpeechRecognitionServer.h (modified) (4 diffs)
-
Source/WebKit/UIProcess/WebProcessProxy.cpp (modified) (2 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/LayoutTests/ChangeLog
r270771 r270772 1 2020-12-14 Sihui Liu <sihui_liu@apple.com> 2 3 Implement recognizer for SpeechRecognition 4 https://bugs.webkit.org/show_bug.cgi?id=219459 5 <rdar://problem/71914465> 6 7 Reviewed by Youenn Fablet. 8 9 * fast/speechrecognition/ios/restart-recognition-after-stop-expected.txt: Added. 10 * fast/speechrecognition/ios/restart-recognition-after-stop.html: Added. 11 * fast/speechrecognition/ios/start-recognition-then-stop-expected.txt: Added. 12 * fast/speechrecognition/ios/start-recognition-then-stop.html: Added. 13 1 14 2020-12-14 Andy Estes <aestes@apple.com> 2 15 -
trunk/Source/WebCore/ChangeLog
r270768 r270772 1 2020-12-14 Sihui Liu <sihui_liu@apple.com> 2 3 Implement recognizer for SpeechRecognition 4 https://bugs.webkit.org/show_bug.cgi?id=219459 5 <rdar://problem/71914465> 6 7 Reviewed by Youenn Fablet. 8 9 Add WebSpeechRecognizerTask, which connects to speech recognition service using Speech framework APIs. 10 11 Tests: fast/speechrecognition/ios/restart-recognition-after-stop.html 12 fast/speechrecognition/ios/start-recognition-then-stop.html 13 14 * Modules/speech/SpeechRecognizer.cpp: 15 (WebCore::SpeechRecognizer::reset): 16 (WebCore::SpeechRecognizer::abort): 17 (WebCore::SpeechRecognizer::stop): 18 (WebCore::SpeechRecognizer::start): 19 (WebCore::SpeechRecognizer::startCapture): 20 (WebCore::SpeechRecognizer::stopCapture): 21 (WebCore::SpeechRecognizer::dataCaptured): 22 (WebCore::SpeechRecognizer::startRecognition): 23 (WebCore::SpeechRecognizer::abortRecognition): 24 (WebCore::SpeechRecognizer::stopRecognition): 25 (WebCore::SpeechRecognizer::resetRecognition): 26 (WebCore::SpeechRecognizer::setSource): Deleted. 27 (WebCore::SpeechRecognizer::stopInternal): Deleted. 28 * Modules/speech/SpeechRecognizer.h: 29 * Modules/speech/cocoa/SpeechRecognizerCocoa.mm: Added. 30 (WebCore::SpeechRecognizer::dataCaptured): 31 (WebCore::SpeechRecognizer::startRecognition): 32 (WebCore::SpeechRecognizer::stopRecognition): 33 (WebCore::SpeechRecognizer::abortRecognition): 34 (WebCore::SpeechRecognizer::resetRecognition): 35 * Modules/speech/cocoa/WebSpeechRecognizerTask.h: Added. 36 * Modules/speech/cocoa/WebSpeechRecognizerTask.mm: Added. 37 (-[WebSpeechRecognizerTaskImpl initWithIdentifier:locale:doMultipleRecognitions:reportInterimResults:maxAlternatives:delegateCallback:]): 38 (-[WebSpeechRecognizerTaskImpl callbackWithResult:isFinal:]): 39 (-[WebSpeechRecognizerTaskImpl audioSamplesAvailable:]): 40 (-[WebSpeechRecognizerTaskImpl abort]): 41 (-[WebSpeechRecognizerTaskImpl stop]): 42 (-[WebSpeechRecognizerTaskImpl sendSpeechStartIfNeeded]): 43 (-[WebSpeechRecognizerTaskImpl sendSpeechEndIfNeeded]): 44 (-[WebSpeechRecognizerTaskImpl sendEndIfNeeded]): 45 (-[WebSpeechRecognizerTaskImpl speechRecognizer:availabilityDidChange:]): 46 (-[WebSpeechRecognizerTaskImpl speechRecognitionTask:didHypothesizeTranscription:]): 47 (-[WebSpeechRecognizerTaskImpl speechRecognitionTask:didFinishRecognition:]): 48 (-[WebSpeechRecognizerTaskImpl speechRecognitionTaskWasCancelled:]): 49 (-[WebSpeechRecognizerTaskImpl speechRecognitionTask:didFinishSuccessfully:]): 50 (-[WebSpeechRecognizerTask initWithIdentifier:locale:doMultipleRecognitions:reportInterimResults:maxAlternatives:delegateCallback:]): 51 (-[WebSpeechRecognizerTask audioSamplesAvailable:]): 52 (-[WebSpeechRecognizerTask abort]): 53 (-[WebSpeechRecognizerTask stop]): 54 * Modules/speech/cocoa/WebSpeechRecognizerTaskMock.h: Added. 55 * Modules/speech/cocoa/WebSpeechRecognizerTaskMock.mm: Added. 56 (-[WebSpeechRecognizerTaskMock initWithIdentifier:locale:doMultipleRecognitions:reportInterimResults:maxAlternatives:delegateCallback:]): 57 (-[WebSpeechRecognizerTaskMock audioSamplesAvailable:]): 58 (-[WebSpeechRecognizerTaskMock abort]): 59 (-[WebSpeechRecognizerTaskMock stop]): 60 * SourcesCocoa.txt: 61 * WebCore.xcodeproj/project.pbxproj: 62 1 63 2020-12-14 Chris Dumez <cdumez@apple.com> 2 64 -
trunk/Source/WebCore/Modules/speech/SpeechRecognizer.cpp
r270574 r270772 28 28 29 29 #include "SpeechRecognitionUpdate.h" 30 #include <wtf/MediaTime.h> 30 31 31 32 #if PLATFORM(COCOA) 32 33 #include "MediaUtilities.h" 33 #include <pal/avfoundation/MediaTimeAVFoundation.h>34 34 #endif 35 35 … … 46 46 return; 47 47 48 if (m_source) 49 m_source = nullptr; 48 stopCapture(); 49 resetRecognition(); 50 m_clientIdentifier = WTF::nullopt; 51 } 50 52 51 auto error = SpeechRecognitionError { SpeechRecognitionErrorType::Aborted, "Another request is started" }; 52 m_delegateCallback(SpeechRecognitionUpdate::createError(*m_clientIdentifier, error)); 53 void SpeechRecognizer::abort() 54 { 55 ASSERT(m_clientIdentifier); 56 stopCapture(); 57 abortRecognition(); 58 } 53 59 54 m_clientIdentifier = WTF::nullopt; 60 void SpeechRecognizer::stop() 61 { 62 ASSERT(m_clientIdentifier); 63 stopCapture(); 64 stopRecognition(); 55 65 } 56 66 57 67 #if ENABLE(MEDIA_STREAM) 58 68 59 void SpeechRecognizer::start(SpeechRecognitionConnectionClientIdentifier identifier, Ref<RealtimeMediaSource>&& source)69 void SpeechRecognizer::start(SpeechRecognitionConnectionClientIdentifier clientIdentifier, Ref<RealtimeMediaSource>&& source, bool mockSpeechRecognitionEnabled, const String& localeIdentifier, bool continuous, bool interimResults, uint64_t maxAlternatives) 60 70 { 61 ASSERT(!m_source); 62 63 m_clientIdentifier = identifier; 71 ASSERT(!m_clientIdentifier); 72 m_clientIdentifier = clientIdentifier; 64 73 m_delegateCallback(SpeechRecognitionUpdate::create(*m_clientIdentifier, SpeechRecognitionUpdateType::Start)); 65 74 66 setSource(WTFMove(source)); 75 if (!startRecognition(mockSpeechRecognitionEnabled, clientIdentifier, localeIdentifier, continuous, interimResults, maxAlternatives)) { 76 auto error = WebCore::SpeechRecognitionError { WebCore::SpeechRecognitionErrorType::ServiceNotAllowed, "Failed to start recognition"_s }; 77 m_delegateCallback(WebCore::SpeechRecognitionUpdate::createError(clientIdentifier, WTFMove(error))); 78 return; 79 } 80 81 startCapture(WTFMove(source)); 67 82 } 68 83 69 void SpeechRecognizer::s etSource(Ref<RealtimeMediaSource>&& source)84 void SpeechRecognizer::startCapture(Ref<RealtimeMediaSource>&& source) 70 85 { 71 86 auto dataCallback = [weakThis = makeWeakPtr(this)](const auto& time, const auto& data, const auto& description, auto sampleCount) { 72 if (!weakThis) 73 return; 74 75 #if PLATFORM(COCOA) 76 auto buffer = createAudioSampleBuffer(data, description, PAL::toCMTime(time), sampleCount); 77 UNUSED_PARAM(buffer); 78 #else 79 UNUSED_PARAM(time); 80 UNUSED_PARAM(data); 81 UNUSED_PARAM(description); 82 UNUSED_PARAM(sampleCount); 83 #endif 87 if (weakThis) 88 weakThis->dataCaptured(time, data, description, sampleCount); 84 89 }; 85 90 … … 90 95 ASSERT(m_clientIdentifier && m_clientIdentifier.value() == update.clientIdentifier()); 91 96 m_delegateCallback(update); 92 93 if (update.type() == SpeechRecognitionUpdateType::Error)94 m_source = nullptr;95 97 }; 96 98 … … 100 102 #endif 101 103 102 void SpeechRecognizer::stop(ShouldGenerateFinalResult shouldGenerateFinalResult) 103 { 104 if (!m_clientIdentifier) 105 return; 106 107 stopInternal(); 108 109 if (shouldGenerateFinalResult == ShouldGenerateFinalResult::Yes) { 110 // TODO: generate real result when speech recognition backend is implemented. 111 Vector<SpeechRecognitionResultData> resultDatas; 112 m_delegateCallback(SpeechRecognitionUpdate::createResult(*m_clientIdentifier, resultDatas)); 113 } 114 115 m_delegateCallback(SpeechRecognitionUpdate::create(*m_clientIdentifier, SpeechRecognitionUpdateType::End)); 116 m_clientIdentifier = WTF::nullopt; 117 } 118 119 void SpeechRecognizer::stopInternal() 104 void SpeechRecognizer::stopCapture() 120 105 { 121 106 if (!m_source) … … 126 111 } 127 112 113 #if !HAVE(SPEECHRECOGNIZER) 114 115 void SpeechRecognizer::dataCaptured(const MediaTime&, const PlatformAudioData&, const AudioStreamDescription&, size_t) 116 { 117 } 118 119 bool SpeechRecognizer::startRecognition(bool, SpeechRecognitionConnectionClientIdentifier, const String&, bool, bool, uint64_t) 120 { 121 return true; 122 } 123 124 void SpeechRecognizer::abortRecognition() 125 { 126 m_delegateCallback(SpeechRecognitionUpdate::create(*m_clientIdentifier, SpeechRecognitionUpdateType::End)); 127 } 128 129 void SpeechRecognizer::stopRecognition() 130 { 131 m_delegateCallback(SpeechRecognitionUpdate::create(*m_clientIdentifier, SpeechRecognitionUpdateType::End)); 132 } 133 134 void SpeechRecognizer::resetRecognition() 135 { 136 } 137 138 #endif 139 128 140 } // namespace WebCore -
trunk/Source/WebCore/Modules/speech/SpeechRecognizer.h
r270574 r270772 29 29 #include "SpeechRecognitionConnectionClientIdentifier.h" 30 30 31 #if HAVE(SPEECHRECOGNIZER) 32 #include <wtf/RetainPtr.h> 33 OBJC_CLASS WebSpeechRecognizerTask; 34 #endif 35 31 36 namespace WebCore { 32 37 … … 41 46 42 47 #if ENABLE(MEDIA_STREAM) 43 WEBCORE_EXPORT void start(SpeechRecognitionConnectionClientIdentifier, Ref<RealtimeMediaSource>&& );48 WEBCORE_EXPORT void start(SpeechRecognitionConnectionClientIdentifier, Ref<RealtimeMediaSource>&&, bool mockSpeechRecognitionEnabled, const String& localeIdentifier, bool continuous, bool interimResults, uint64_t maxAlternatives); 44 49 #endif 45 50 WEBCORE_EXPORT void reset(); 46 enum class ShouldGenerateFinalResult { No, Yes };47 WEBCORE_EXPORT void stop( ShouldGenerateFinalResult = ShouldGenerateFinalResult::Yes);51 WEBCORE_EXPORT void abort(); 52 WEBCORE_EXPORT void stop(); 48 53 49 54 Optional<SpeechRecognitionConnectionClientIdentifier> currentClientIdentifier() const { return m_clientIdentifier; } … … 53 58 54 59 #if ENABLE(MEDIA_STREAM) 55 void s etSource(Ref<RealtimeMediaSource>&&);60 void startCapture(Ref<RealtimeMediaSource>&&); 56 61 #endif 62 void stopCapture(); 63 void dataCaptured(const WTF::MediaTime&, const PlatformAudioData&, const AudioStreamDescription&, size_t sampleCount); 64 bool startRecognition(bool mockSpeechRecognitionEnabled, SpeechRecognitionConnectionClientIdentifier, const String& localeIdentifier, bool continuous, bool interimResults, uint64_t alternatives); 65 void abortRecognition(); 66 void stopRecognition(); 67 void resetRecognition(); 57 68 58 69 Optional<SpeechRecognitionConnectionClientIdentifier> m_clientIdentifier; 59 70 DelegateCallback m_delegateCallback; 60 71 std::unique_ptr<SpeechRecognitionCaptureSource> m_source; 72 73 #if HAVE(SPEECHRECOGNIZER) 74 RetainPtr<WebSpeechRecognizerTask> m_task; 75 #endif 61 76 }; 62 77 -
trunk/Source/WebCore/Modules/speech/cocoa/WebSpeechRecognizerTask.h
r270771 r270772 24 24 */ 25 25 26 # pragma once26 #if HAVE(SPEECHRECOGNIZER) 27 27 28 #i nclude "SpeechRecognitionCaptureSource.h"29 #i nclude "SpeechRecognitionConnectionClientIdentifier.h"28 #import "SpeechRecognitionConnectionClientIdentifier.h" 29 #import "SpeechRecognitionUpdate.h" 30 30 31 namespace WebCore { 31 NS_ASSUME_NONNULL_BEGIN 32 32 33 class SpeechRecognitionUpdate;33 typedef struct opaqueCMSampleBuffer *CMSampleBufferRef; 34 34 35 class SpeechRecognizer : public CanMakeWeakPtr<SpeechRecognizer> { 36 WTF_MAKE_FAST_ALLOCATED; 37 public: 38 using DelegateCallback = Function<void(const SpeechRecognitionUpdate&)>; 39 WEBCORE_EXPORT explicit SpeechRecognizer(DelegateCallback&&); 40 WEBCORE_EXPORT ~SpeechRecognizer() = default; 35 @class WebSpeechRecognizerTaskImpl; 41 36 42 #if ENABLE(MEDIA_STREAM) 43 WEBCORE_EXPORT void start(SpeechRecognitionConnectionClientIdentifier, Ref<RealtimeMediaSource>&&); 37 @interface WebSpeechRecognizerTask : NSObject { 38 @private 39 RetainPtr<WebSpeechRecognizerTaskImpl> _impl; 40 } 41 42 - (instancetype)initWithIdentifier:(WebCore::SpeechRecognitionConnectionClientIdentifier)identifier locale:(NSString*)localeIdentifier doMultipleRecognitions:(BOOL)continuous reportInterimResults:(BOOL)interimResults maxAlternatives:(unsigned long)alternatives delegateCallback:(void(^)(const WebCore::SpeechRecognitionUpdate&))callback; 43 - (void)audioSamplesAvailable:(CMSampleBufferRef)sampleBuffer; 44 - (void)abort; 45 - (void)stop; 46 47 @end 48 49 NS_ASSUME_NONNULL_END 50 44 51 #endif 45 WEBCORE_EXPORT void reset();46 enum class ShouldGenerateFinalResult { No, Yes };47 WEBCORE_EXPORT void stop(ShouldGenerateFinalResult = ShouldGenerateFinalResult::Yes);48 49 Optional<SpeechRecognitionConnectionClientIdentifier> currentClientIdentifier() const { return m_clientIdentifier; }50 51 private:52 void stopInternal();53 54 #if ENABLE(MEDIA_STREAM)55 void setSource(Ref<RealtimeMediaSource>&&);56 #endif57 58 Optional<SpeechRecognitionConnectionClientIdentifier> m_clientIdentifier;59 DelegateCallback m_delegateCallback;60 std::unique_ptr<SpeechRecognitionCaptureSource> m_source;61 };62 63 } // namespace WebCore -
trunk/Source/WebCore/Modules/speech/cocoa/WebSpeechRecognizerTaskMock.h
r270771 r270772 24 24 */ 25 25 26 # pragma once26 #if HAVE(SPEECHRECOGNIZER) 27 27 28 #i nclude "SpeechRecognitionCaptureSource.h"29 #i nclude "SpeechRecognitionConnectionClientIdentifier.h"28 #import "WebSpeechRecognizerTask.h" 29 #import <wtf/BlockPtr.h> 30 30 31 namespace WebCore { 31 NS_ASSUME_NONNULL_BEGIN 32 32 33 class SpeechRecognitionUpdate;33 @class WebSpeechRecognizerTaskMock; 34 34 35 class SpeechRecognizer : public CanMakeWeakPtr<SpeechRecognizer> { 36 WTF_MAKE_FAST_ALLOCATED; 37 public: 38 using DelegateCallback = Function<void(const SpeechRecognitionUpdate&)>; 39 WEBCORE_EXPORT explicit SpeechRecognizer(DelegateCallback&&); 40 WEBCORE_EXPORT ~SpeechRecognizer() = default; 35 @interface WebSpeechRecognizerTaskMock : WebSpeechRecognizerTask { 36 @private 37 WebCore::SpeechRecognitionConnectionClientIdentifier _identifier; 38 BlockPtr<void(const WebCore::SpeechRecognitionUpdate&)> _delegateCallback; 39 bool _doMultipleRecognitions; 40 bool _hasSentSpeechStart; 41 bool _hasSentSpeechEnd; 42 bool _completed; 43 } 41 44 42 #if ENABLE(MEDIA_STREAM) 43 WEBCORE_EXPORT void start(SpeechRecognitionConnectionClientIdentifier, Ref<RealtimeMediaSource>&&); 45 - (instancetype)initWithIdentifier:(WebCore::SpeechRecognitionConnectionClientIdentifier)identifier locale:(NSString*)localeIdentifier doMultipleRecognitions:(BOOL)continuous reportInterimResults:(BOOL)interimResults maxAlternatives:(unsigned long)alternatives delegateCallback:(void(^)(const WebCore::SpeechRecognitionUpdate&))callback; 46 - (void)audioSamplesAvailable:(CMSampleBufferRef)sampleBuffer; 47 - (void)abort; 48 - (void)stop; 49 50 @end 51 52 NS_ASSUME_NONNULL_END 53 44 54 #endif 45 WEBCORE_EXPORT void reset();46 enum class ShouldGenerateFinalResult { No, Yes };47 WEBCORE_EXPORT void stop(ShouldGenerateFinalResult = ShouldGenerateFinalResult::Yes);48 49 Optional<SpeechRecognitionConnectionClientIdentifier> currentClientIdentifier() const { return m_clientIdentifier; }50 51 private:52 void stopInternal();53 54 #if ENABLE(MEDIA_STREAM)55 void setSource(Ref<RealtimeMediaSource>&&);56 #endif57 58 Optional<SpeechRecognitionConnectionClientIdentifier> m_clientIdentifier;59 DelegateCallback m_delegateCallback;60 std::unique_ptr<SpeechRecognitionCaptureSource> m_source;61 };62 63 } // namespace WebCore -
trunk/Source/WebCore/PAL/ChangeLog
r270758 r270772 1 2020-12-14 Sihui Liu <sihui_liu@apple.com> 2 3 Implement recognizer for SpeechRecognition 4 https://bugs.webkit.org/show_bug.cgi?id=219459 5 <rdar://problem/71914465> 6 7 Reviewed by Youenn Fablet. 8 9 Add soft linking to Speech framework and SPI. 10 11 * PAL.xcodeproj/project.pbxproj: 12 * pal/cocoa/SpeechSoftLink.h: Added. 13 * pal/cocoa/SpeechSoftLink.mm: Added. 14 * pal/spi/cocoa/SpeechSPI.h: Added. 15 1 16 2020-12-13 Andy Estes <aestes@apple.com> 2 17 -
trunk/Source/WebCore/PAL/PAL.xcodeproj/project.pbxproj
r269893 r270772 145 145 7A36D0F9223AD9AB00B0522E /* CommonCryptoSPI.h in Headers */ = {isa = PBXBuildFile; fileRef = 7A36D0F8223AD9AB00B0522E /* CommonCryptoSPI.h */; }; 146 146 7A3A6A8020CADB4700317AAE /* NSImageSPI.h in Headers */ = {isa = PBXBuildFile; fileRef = 7A3A6A7F20CADB4600317AAE /* NSImageSPI.h */; }; 147 93B38EBE25821CB600198E63 /* SpeechSoftLink.h in Headers */ = {isa = PBXBuildFile; fileRef = 93B38EBD25821CB600198E63 /* SpeechSoftLink.h */; }; 148 93B38EC025821CD800198E63 /* SpeechSoftLink.mm in Sources */ = {isa = PBXBuildFile; fileRef = 93B38EBF25821CD700198E63 /* SpeechSoftLink.mm */; }; 149 93B38EC225821D2200198E63 /* SpeechSPI.h in Headers */ = {isa = PBXBuildFile; fileRef = 93B38EC125821D2200198E63 /* SpeechSPI.h */; }; 147 150 93DB7D3724626BCD004BD8A3 /* NSTextInputContextSPI.h in Headers */ = {isa = PBXBuildFile; fileRef = 93DB7D3624626BCC004BD8A3 /* NSTextInputContextSPI.h */; }; 148 151 93DB7D3A24626F86004BD8A3 /* NSUndoManagerSPI.h in Headers */ = {isa = PBXBuildFile; fileRef = 93DB7D3924626F86004BD8A3 /* NSUndoManagerSPI.h */; }; … … 341 344 7A36D0F8223AD9AB00B0522E /* CommonCryptoSPI.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CommonCryptoSPI.h; sourceTree = "<group>"; }; 342 345 7A3A6A7F20CADB4600317AAE /* NSImageSPI.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = NSImageSPI.h; sourceTree = "<group>"; }; 346 93B38EBD25821CB600198E63 /* SpeechSoftLink.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SpeechSoftLink.h; sourceTree = "<group>"; }; 347 93B38EBF25821CD700198E63 /* SpeechSoftLink.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = SpeechSoftLink.mm; sourceTree = "<group>"; }; 348 93B38EC125821D2200198E63 /* SpeechSPI.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SpeechSPI.h; sourceTree = "<group>"; }; 343 349 93DB7D3624626BCC004BD8A3 /* NSTextInputContextSPI.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = NSTextInputContextSPI.h; sourceTree = "<group>"; }; 344 350 93DB7D3924626F86004BD8A3 /* NSUndoManagerSPI.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = NSUndoManagerSPI.h; sourceTree = "<group>"; }; … … 512 518 570AB8F020AE2E8D00B8BE87 /* SecKeyProxySPI.h */, 513 519 0C2DA13C1F3BEB4900DBC317 /* ServersSPI.h */, 520 93B38EC125821D2200198E63 /* SpeechSPI.h */, 514 521 0C2DA12B1F3BEB4900DBC317 /* URLFormattingSPI.h */, 515 522 0C2DA13D1F3BEB4900DBC317 /* WebFilterEvaluatorSPI.h */, … … 683 690 A1F63C9D21A4DBF7006FB43B /* PassKitSoftLink.h */, 684 691 A1F63C9E21A4DBF7006FB43B /* PassKitSoftLink.mm */, 692 93B38EBD25821CB600198E63 /* SpeechSoftLink.h */, 693 93B38EBF25821CD700198E63 /* SpeechSoftLink.mm */, 685 694 07611DB4243FA5BE00D80704 /* UsageTrackingSoftLink.h */, 686 695 07611DB5243FA5BF00D80704 /* UsageTrackingSoftLink.mm */, … … 908 917 A3AB6E611F3D1E39009C14B1 /* SleepDisablerCocoa.h in Headers */, 909 918 A3788E981F05B6CE00679425 /* Sound.h in Headers */, 919 93B38EBE25821CB600198E63 /* SpeechSoftLink.h in Headers */, 920 93B38EC225821D2200198E63 /* SpeechSPI.h in Headers */, 910 921 A1175B491F6AFF8E00C4B9F0 /* SpeechSynthesisSPI.h in Headers */, 911 922 0C5AF9211F43A4C7002EAC02 /* SQLite3SPI.h in Headers */, … … 1038 1049 A3788E9C1F05B78200679425 /* Sound.cpp in Sources */, 1039 1050 A3788E9E1F05B78E00679425 /* SoundMac.mm in Sources */, 1051 93B38EC025821CD800198E63 /* SpeechSoftLink.mm in Sources */, 1040 1052 A3AB6E571F3D1DDB009C14B1 /* SystemSleepListener.cpp in Sources */, 1041 1053 A3AB6E651F3D217F009C14B1 /* SystemSleepListenerMac.mm in Sources */, -
trunk/Source/WebCore/PAL/pal/cocoa/SpeechSoftLink.h
r270771 r270772 26 26 #pragma once 27 27 28 #include "SpeechRecognitionCaptureSource.h" 29 #include "SpeechRecognitionConnectionClientIdentifier.h" 28 #if HAVE(SPEECHRECOGNIZER) 30 29 31 namespace WebCore { 30 #import <Speech/Speech.h> 31 #import <wtf/SoftLinking.h> 32 32 33 class SpeechRecognitionUpdate; 33 SOFT_LINK_FRAMEWORK_FOR_HEADER(PAL, Speech); 34 SOFT_LINK_CLASS_FOR_HEADER(PAL, SFSpeechRecognitionResult); 35 SOFT_LINK_CLASS_FOR_HEADER(PAL, SFSpeechRecognitionRequest); 36 SOFT_LINK_CLASS_FOR_HEADER(PAL, SFSpeechAudioBufferRecognitionRequest); 37 SOFT_LINK_CLASS_FOR_HEADER(PAL, SFSpeechRecognitionTask); 38 SOFT_LINK_CLASS_FOR_HEADER(PAL, SFSpeechRecognizer); 39 SOFT_LINK_CLASS_FOR_HEADER(PAL, SFTranscriptionSegment); 40 SOFT_LINK_CLASS_FOR_HEADER(PAL, SFTranscription); 34 41 35 class SpeechRecognizer : public CanMakeWeakPtr<SpeechRecognizer> {36 WTF_MAKE_FAST_ALLOCATED;37 public:38 using DelegateCallback = Function<void(const SpeechRecognitionUpdate&)>;39 WEBCORE_EXPORT explicit SpeechRecognizer(DelegateCallback&&);40 WEBCORE_EXPORT ~SpeechRecognizer() = default;41 42 #if ENABLE(MEDIA_STREAM)43 WEBCORE_EXPORT void start(SpeechRecognitionConnectionClientIdentifier, Ref<RealtimeMediaSource>&&);44 42 #endif 45 WEBCORE_EXPORT void reset();46 enum class ShouldGenerateFinalResult { No, Yes };47 WEBCORE_EXPORT void stop(ShouldGenerateFinalResult = ShouldGenerateFinalResult::Yes);48 49 Optional<SpeechRecognitionConnectionClientIdentifier> currentClientIdentifier() const { return m_clientIdentifier; }50 51 private:52 void stopInternal();53 54 #if ENABLE(MEDIA_STREAM)55 void setSource(Ref<RealtimeMediaSource>&&);56 #endif57 58 Optional<SpeechRecognitionConnectionClientIdentifier> m_clientIdentifier;59 DelegateCallback m_delegateCallback;60 std::unique_ptr<SpeechRecognitionCaptureSource> m_source;61 };62 63 } // namespace WebCore -
trunk/Source/WebCore/PAL/pal/cocoa/SpeechSoftLink.mm
r270771 r270772 24 24 */ 25 25 26 # pragma once26 #import "config.h" 27 27 28 #include "SpeechRecognitionCaptureSource.h" 29 #include "SpeechRecognitionConnectionClientIdentifier.h" 28 #if HAVE(SPEECHRECOGNIZER) 30 29 31 namespace WebCore { 30 #import <Speech/Speech.h> 31 #import <wtf/SoftLinking.h> 32 32 33 class SpeechRecognitionUpdate; 33 SOFT_LINK_FRAMEWORK_FOR_SOURCE(PAL, Speech) 34 34 35 class SpeechRecognizer : public CanMakeWeakPtr<SpeechRecognizer> { 36 WTF_MAKE_FAST_ALLOCATED; 37 public: 38 using DelegateCallback = Function<void(const SpeechRecognitionUpdate&)>; 39 WEBCORE_EXPORT explicit SpeechRecognizer(DelegateCallback&&); 40 WEBCORE_EXPORT ~SpeechRecognizer() = default; 35 SOFT_LINK_CLASS_FOR_SOURCE_WITH_EXPORT(PAL, Speech, SFSpeechRecognitionResult, PAL_EXPORT); 36 SOFT_LINK_CLASS_FOR_SOURCE_WITH_EXPORT(PAL, Speech, SFSpeechRecognitionRequest, PAL_EXPORT); 37 SOFT_LINK_CLASS_FOR_SOURCE_WITH_EXPORT(PAL, Speech, SFSpeechAudioBufferRecognitionRequest, PAL_EXPORT); 38 SOFT_LINK_CLASS_FOR_SOURCE_WITH_EXPORT(PAL, Speech, SFSpeechRecognitionTask, PAL_EXPORT); 39 SOFT_LINK_CLASS_FOR_SOURCE_WITH_EXPORT(PAL, Speech, SFSpeechRecognizer, PAL_EXPORT); 40 SOFT_LINK_CLASS_FOR_SOURCE_WITH_EXPORT(PAL, Speech, SFTranscriptionSegment, PAL_EXPORT); 41 SOFT_LINK_CLASS_FOR_SOURCE_WITH_EXPORT(PAL, Speech, SFTranscription, PAL_EXPORT); 41 42 42 #if ENABLE(MEDIA_STREAM)43 WEBCORE_EXPORT void start(SpeechRecognitionConnectionClientIdentifier, Ref<RealtimeMediaSource>&&);44 43 #endif 45 WEBCORE_EXPORT void reset();46 enum class ShouldGenerateFinalResult { No, Yes };47 WEBCORE_EXPORT void stop(ShouldGenerateFinalResult = ShouldGenerateFinalResult::Yes);48 49 Optional<SpeechRecognitionConnectionClientIdentifier> currentClientIdentifier() const { return m_clientIdentifier; }50 51 private:52 void stopInternal();53 54 #if ENABLE(MEDIA_STREAM)55 void setSource(Ref<RealtimeMediaSource>&&);56 #endif57 58 Optional<SpeechRecognitionConnectionClientIdentifier> m_clientIdentifier;59 DelegateCallback m_delegateCallback;60 std::unique_ptr<SpeechRecognitionCaptureSource> m_source;61 };62 63 } // namespace WebCore -
trunk/Source/WebCore/PAL/pal/spi/cocoa/SpeechSPI.h
r270771 r270772 24 24 */ 25 25 26 # pragma once26 #if HAVE(SPEECHRECOGNIZER) 27 27 28 #include "SpeechRecognitionCaptureSource.h" 29 #include "SpeechRecognitionConnectionClientIdentifier.h" 28 #if USE(APPLE_INTERNAL_SDK) 30 29 31 namespace WebCore { 30 #import <Speech/SFSpeechRecognitionRequest_Private.h> 32 31 33 class SpeechRecognitionUpdate; 32 #else 34 33 35 class SpeechRecognizer : public CanMakeWeakPtr<SpeechRecognizer> { 36 WTF_MAKE_FAST_ALLOCATED; 37 public: 38 using DelegateCallback = Function<void(const SpeechRecognitionUpdate&)>; 39 WEBCORE_EXPORT explicit SpeechRecognizer(DelegateCallback&&); 40 WEBCORE_EXPORT ~SpeechRecognizer() = default; 34 NS_ASSUME_NONNULL_BEGIN 41 35 42 #if ENABLE(MEDIA_STREAM) 43 WEBCORE_EXPORT void start(SpeechRecognitionConnectionClientIdentifier, Ref<RealtimeMediaSource>&&); 44 #endif 45 WEBCORE_EXPORT void reset(); 46 enum class ShouldGenerateFinalResult { No, Yes }; 47 WEBCORE_EXPORT void stop(ShouldGenerateFinalResult = ShouldGenerateFinalResult::Yes); 36 @interface SFSpeechRecognitionRequest () 37 @property (nonatomic) BOOL detectMultipleUtterances; 38 @property (nonatomic, getter=_maximumRecognitionDuration, setter=_setMaximumRecognitionDuration:) NSTimeInterval _maximumRecognitionDuration; 48 39 49 Optional<SpeechRecognitionConnectionClientIdentifier> currentClientIdentifier() const { return m_clientIdentifier; } 40 @end 50 41 51 private: 52 void stopInternal(); 42 NS_ASSUME_NONNULL_END 53 43 54 #if ENABLE(MEDIA_STREAM) 55 void setSource(Ref<RealtimeMediaSource>&&); 56 #endif 44 #endif // USE(APPLE_INTERNAL_SDK) 57 45 58 Optional<SpeechRecognitionConnectionClientIdentifier> m_clientIdentifier; 59 DelegateCallback m_delegateCallback; 60 std::unique_ptr<SpeechRecognitionCaptureSource> m_source; 61 }; 62 63 } // namespace WebCore 46 #endif // HAVE(SPEECHRECOGNIZER) -
trunk/Source/WebCore/SourcesCocoa.txt
r270662 r270772 93 93 Modules/plugins/QuickTimePluginReplacement.mm 94 94 Modules/plugins/YouTubePluginReplacement.cpp 95 Modules/speech/cocoa/SpeechRecognizerCocoa.mm 96 Modules/speech/cocoa/WebSpeechRecognizerTask.mm 97 Modules/speech/cocoa/WebSpeechRecognizerTaskMock.mm 95 98 Modules/webdatabase/cocoa/DatabaseManagerCocoa.mm 96 99 accessibility/ios/AXObjectCacheIOS.mm -
trunk/Source/WebCore/WebCore.xcodeproj/project.pbxproj
r270748 r270772 2820 2820 93A806201E03B585008A1F26 /* JSLongRange.h in Headers */ = {isa = PBXBuildFile; fileRef = 93A8061C1E03B585008A1F26 /* JSLongRange.h */; }; 2821 2821 93B2D8160F9920D2006AE6B2 /* SuddenTermination.h in Headers */ = {isa = PBXBuildFile; fileRef = 93B2D8150F9920D2006AE6B2 /* SuddenTermination.h */; settings = {ATTRIBUTES = (Private, ); }; }; 2822 93B38EC325821DB400198E63 /* WebSpeechRecognizerTask.h in Headers */ = {isa = PBXBuildFile; fileRef = 93B38EBA2582193B00198E63 /* WebSpeechRecognizerTask.h */; }; 2823 93B38EC425821DB700198E63 /* WebSpeechRecognizerTaskMock.h in Headers */ = {isa = PBXBuildFile; fileRef = 93B38EBB2582193D00198E63 /* WebSpeechRecognizerTaskMock.h */; }; 2822 2824 93B6A0E60B0BCA5C00F5027A /* ContextMenu.h in Headers */ = {isa = PBXBuildFile; fileRef = 93B6A0E50B0BCA5C00F5027A /* ContextMenu.h */; settings = {ATTRIBUTES = (Private, ); }; }; 2823 2825 93B70D6409EB0C7C009D8468 /* JSDOMBinding.h in Headers */ = {isa = PBXBuildFile; fileRef = 93B70D4809EB0C7C009D8468 /* JSDOMBinding.h */; settings = {ATTRIBUTES = (Private, ); }; }; … … 11542 11544 93B2D8150F9920D2006AE6B2 /* SuddenTermination.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SuddenTermination.h; sourceTree = "<group>"; }; 11543 11545 93B2D8170F9920EE006AE6B2 /* SuddenTermination.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = SuddenTermination.mm; sourceTree = "<group>"; }; 11546 93B38EB82582189900198E63 /* SpeechRecognizerCocoa.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = SpeechRecognizerCocoa.mm; sourceTree = "<group>"; }; 11547 93B38EB92582193A00198E63 /* WebSpeechRecognizerTaskMock.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = WebSpeechRecognizerTaskMock.mm; sourceTree = "<group>"; }; 11548 93B38EBA2582193B00198E63 /* WebSpeechRecognizerTask.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = WebSpeechRecognizerTask.h; sourceTree = "<group>"; }; 11549 93B38EBB2582193D00198E63 /* WebSpeechRecognizerTaskMock.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = WebSpeechRecognizerTaskMock.h; sourceTree = "<group>"; }; 11550 93B38EBC2582193E00198E63 /* WebSpeechRecognizerTask.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = WebSpeechRecognizerTask.mm; sourceTree = "<group>"; }; 11544 11551 93B6A0E50B0BCA5C00F5027A /* ContextMenu.h */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = sourcecode.c.h; path = ContextMenu.h; sourceTree = "<group>"; }; 11545 11552 93B70D4809EB0C7C009D8468 /* JSDOMBinding.h */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = sourcecode.c.h; path = JSDOMBinding.h; sourceTree = "<group>"; }; … … 23192 23199 sourceTree = "<group>"; 23193 23200 }; 23201 93B38EB62582183300198E63 /* cocoa */ = { 23202 isa = PBXGroup; 23203 children = ( 23204 93B38EB82582189900198E63 /* SpeechRecognizerCocoa.mm */, 23205 93B38EBA2582193B00198E63 /* WebSpeechRecognizerTask.h */, 23206 93B38EBC2582193E00198E63 /* WebSpeechRecognizerTask.mm */, 23207 93B38EBB2582193D00198E63 /* WebSpeechRecognizerTaskMock.h */, 23208 93B38EB92582193A00198E63 /* WebSpeechRecognizerTaskMock.mm */, 23209 ); 23210 path = cocoa; 23211 sourceTree = "<group>"; 23212 }; 23194 23213 93C09A820B064F05005ABD4D /* mac */ = { 23195 23214 isa = PBXGroup; … … 25724 25743 isa = PBXGroup; 25725 25744 children = ( 25745 93B38EB62582183300198E63 /* cocoa */, 25726 25746 AA2A5ABA16A485D500975A25 /* DOMWindow+SpeechSynthesis.idl */, 25727 25747 AA2A5AB816A485D500975A25 /* DOMWindowSpeechSynthesis.cpp */, … … 35304 35324 97AABD2714FA09D5007457AE /* WebSocketHandshake.h in Headers */, 35305 35325 41E12E9F24FE74E20093FFB4 /* WebSocketIdentifier.h in Headers */, 35326 93B38EC325821DB400198E63 /* WebSpeechRecognizerTask.h in Headers */, 35327 93B38EC425821DB700198E63 /* WebSpeechRecognizerTaskMock.h in Headers */, 35306 35328 1F8756B21E22C3350042C40D /* WebSQLiteDatabaseTrackerClient.h in Headers */, 35307 35329 31DEA4561B39F4D900F77178 /* WebSystemBackdropLayer.h in Headers */, -
trunk/Source/WebKit/ChangeLog
r270768 r270772 1 2020-12-14 Sihui Liu <sihui_liu@apple.com> 2 3 Implement recognizer for SpeechRecognition 4 https://bugs.webkit.org/show_bug.cgi?id=219459 5 <rdar://problem/71914465> 6 7 Reviewed by Youenn Fablet. 8 9 * UIProcess/SpeechRecognitionServer.cpp: 10 (WebKit::SpeechRecognitionServer::SpeechRecognitionServer): 11 (WebKit::SpeechRecognitionServer::requestPermissionForRequest): 12 (WebKit::SpeechRecognitionServer::handleRequest): 13 (WebKit::SpeechRecognitionServer::abort): 14 (WebKit::SpeechRecognitionServer::invalidate): 15 * UIProcess/SpeechRecognitionServer.h: 16 * UIProcess/WebProcessProxy.cpp: 17 (WebKit::WebProcessProxy::createSpeechRecognitionServer): 18 1 19 2020-12-14 Chris Dumez <cdumez@apple.com> 2 20 -
trunk/Source/WebKit/UIProcess/SpeechRecognitionServer.cpp
r270574 r270772 37 37 namespace WebKit { 38 38 39 40 SpeechRecognitionServer::SpeechRecognitionServer(Ref<IPC::Connection>&& connection, SpeechRecognitionServerIdentifier identifier, SpeechRecognitionPermissionChecker&& permissionChecker, SpeechRecognitionCheckIfmockSpeechRecognitionEnabled&& checkIfEnabled 39 41 #if ENABLE(MEDIA_STREAM) 40 SpeechRecognitionServer::SpeechRecognitionServer(Ref<IPC::Connection>&& connection, SpeechRecognitionServerIdentifier identifier, SpeechRecognitionPermissionChecker&& permissionChecker, RealtimeMediaSourceCreateFunction&& function) 42 , RealtimeMediaSourceCreateFunction&& function 43 #endif 44 ) 41 45 : m_connection(WTFMove(connection)) 42 46 , m_identifier(identifier) 43 47 , m_permissionChecker(WTFMove(permissionChecker)) 48 , m_checkIfmockSpeechRecognitionEnabled(WTFMove(checkIfEnabled)) 49 #if ENABLE(MEDIA_STREAM) 44 50 , m_realtimeMediaSourceCreateFunction(WTFMove(function)) 51 #endif 45 52 { 46 53 } 47 #else48 SpeechRecognitionServer::SpeechRecognitionServer(Ref<IPC::Connection>&& connection, SpeechRecognitionServerIdentifier identifier, SpeechRecognitionPermissionChecker&& permissionChecker)49 : m_connection(WTFMove(connection))50 , m_identifier(identifier)51 , m_permissionChecker(WTFMove(permissionChecker))52 {53 }54 #endif55 54 56 55 void SpeechRecognitionServer::start(WebCore::SpeechRecognitionConnectionClientIdentifier clientIdentifier, String&& lang, bool continuous, bool interimResults, uint64_t maxAlternatives, WebCore::ClientOrigin&& origin) … … 81 80 } 82 81 83 handleRequest( identifier);82 handleRequest(*weakRequest); 84 83 }); 85 84 } 86 85 87 void SpeechRecognitionServer::handleRequest(WebCore::SpeechRecognition ConnectionClientIdentifier clientIdentifier)86 void SpeechRecognitionServer::handleRequest(WebCore::SpeechRecognitionRequest& request) 88 87 { 89 88 if (!m_recognizer) { … … 96 95 return; 97 96 97 sendUpdate(update); 98 98 99 auto type = update.type(); 99 if (type == WebCore::SpeechRecognitionUpdateType::Error || type == WebCore::SpeechRecognitionUpdateType::End)100 m_requests.remove(clientIdentifier);100 if (type != WebCore::SpeechRecognitionUpdateType::Error && type != WebCore::SpeechRecognitionUpdateType::End) 101 return; 101 102 102 sendUpdate(update); 103 if (m_isResetting) 104 return; 105 m_isResetting = true; 106 107 m_recognizer->reset(); 108 m_requests.remove(clientIdentifier); 109 m_isResetting = false; 103 110 }); 104 111 } 105 106 m_recognizer->reset();107 112 113 if (auto currentClientIdentifier = m_recognizer->currentClientIdentifier()) { 114 auto error = WebCore::SpeechRecognitionError { WebCore::SpeechRecognitionErrorType::Aborted, "Another request is started"_s }; 115 sendUpdate(*currentClientIdentifier, WebCore::SpeechRecognitionUpdateType::Error, error); 116 m_recognizer->reset(); 117 } 118 119 auto clientIdentifier = request.clientIdentifier(); 108 120 #if ENABLE(MEDIA_STREAM) 109 121 auto sourceOrError = m_realtimeMediaSourceCreateFunction(); … … 113 125 return; 114 126 } 115 m_recognizer->start(clientIdentifier, sourceOrError.source()); 127 128 bool mockDeviceCapturesEnabled = m_checkIfmockSpeechRecognitionEnabled(); 129 m_recognizer->start(clientIdentifier, sourceOrError.source(), mockDeviceCapturesEnabled, request.lang(), request.continuous(), request.interimResults(), request.maxAlternatives()); 116 130 #else 117 131 m_requests.remove(clientIdentifier); 118 sendUpdate(clientIdentifier, WebCore::SpeechRecognitionUpdateType::Error, WebCore::SpeechRecognitionError { WebCore::SpeechRecognitionErrorType::AudioCapture, "Audio capture is not implemented" });132 sendUpdate(clientIdentifier, WebCore::SpeechRecognitionUpdateType::Error, WebCore::SpeechRecognitionError { WebCore::SpeechRecognitionErrorType::AudioCapture, "Audio capture is not implemented"_s }); 119 133 #endif 120 134 } … … 136 150 MESSAGE_CHECK(clientIdentifier); 137 151 if (m_recognizer && m_recognizer->currentClientIdentifier() == clientIdentifier) { 138 m_recognizer-> stop(WebCore::SpeechRecognizer::ShouldGenerateFinalResult::No);152 m_recognizer->abort(); 139 153 return; 140 154 } … … 149 163 if (m_requests.remove(clientIdentifier)) { 150 164 if (m_recognizer && m_recognizer->currentClientIdentifier() == clientIdentifier) 151 m_recognizer-> stop();165 m_recognizer->abort(); 152 166 } 153 167 } -
trunk/Source/WebKit/UIProcess/SpeechRecognitionServer.h
r270574 r270772 48 48 using SpeechRecognitionServerIdentifier = WebCore::PageIdentifier; 49 49 using SpeechRecognitionPermissionChecker = Function<void(const WebCore::ClientOrigin&, CompletionHandler<void(SpeechRecognitionPermissionDecision)>&&)>; 50 using SpeechRecognitionCheckIfmockSpeechRecognitionEnabled = Function<bool()>; 50 51 51 52 class SpeechRecognitionServer : public CanMakeWeakPtr<SpeechRecognitionServer>, public IPC::MessageReceiver, private IPC::MessageSender { … … 54 55 #if ENABLE(MEDIA_STREAM) 55 56 using RealtimeMediaSourceCreateFunction = Function<WebCore::CaptureSourceOrError()>; 56 SpeechRecognitionServer(Ref<IPC::Connection>&&, SpeechRecognitionServerIdentifier, SpeechRecognitionPermissionChecker&&, RealtimeMediaSourceCreateFunction&&);57 SpeechRecognitionServer(Ref<IPC::Connection>&&, SpeechRecognitionServerIdentifier, SpeechRecognitionPermissionChecker&&, SpeechRecognitionCheckIfmockSpeechRecognitionEnabled&&, RealtimeMediaSourceCreateFunction&&); 57 58 #else 58 SpeechRecognitionServer(Ref<IPC::Connection>&&, SpeechRecognitionServerIdentifier, SpeechRecognitionPermissionChecker&& );59 SpeechRecognitionServer(Ref<IPC::Connection>&&, SpeechRecognitionServerIdentifier, SpeechRecognitionPermissionChecker&&, SpeechRecognitionCheckIfmockSpeechRecognitionEnabled&&); 59 60 #endif 60 61 … … 66 67 private: 67 68 void requestPermissionForRequest(WebCore::SpeechRecognitionRequest&); 68 void handleRequest(WebCore::SpeechRecognition ConnectionClientIdentifier);69 void handleRequest(WebCore::SpeechRecognitionRequest&); 69 70 void sendUpdate(WebCore::SpeechRecognitionConnectionClientIdentifier, WebCore::SpeechRecognitionUpdateType, Optional<WebCore::SpeechRecognitionError> = WTF::nullopt, Optional<Vector<WebCore::SpeechRecognitionResultData>> = WTF::nullopt); 70 71 void sendUpdate(const WebCore::SpeechRecognitionUpdate&); … … 82 83 SpeechRecognitionPermissionChecker m_permissionChecker; 83 84 std::unique_ptr<WebCore::SpeechRecognizer> m_recognizer; 85 SpeechRecognitionCheckIfmockSpeechRecognitionEnabled m_checkIfmockSpeechRecognitionEnabled; 86 bool m_isResetting { false }; 84 87 85 88 #if ENABLE(MEDIA_STREAM) -
trunk/Source/WebKit/UIProcess/WebProcessProxy.cpp
r270646 r270772 1723 1723 ASSERT(!m_speechRecognitionServerMap.contains(identifier)); 1724 1724 auto& speechRecognitionServer = m_speechRecognitionServerMap.add(identifier, nullptr).iterator->value; 1725 speechRecognitionServer = makeUnique<SpeechRecognitionServer>(makeRef(*connection()), identifier,[weakPage = makeWeakPtr(targetPage)](auto& origin, auto&& completionHandler) mutable {1725 auto permissionChecker = [weakPage = makeWeakPtr(targetPage)](auto& origin, auto&& completionHandler) mutable { 1726 1726 if (!weakPage) { 1727 1727 completionHandler(SpeechRecognitionPermissionDecision::Deny); … … 1730 1730 1731 1731 weakPage->requestSpeechRecognitionPermission(origin, WTFMove(completionHandler)); 1732 } 1732 }; 1733 auto checkIfMockCaptureDevicesEnabled = [weakPage = makeWeakPtr(targetPage)]() { 1734 return weakPage && weakPage->preferences().mockCaptureDevicesEnabled(); 1735 }; 1736 1733 1737 #if ENABLE(MEDIA_STREAM) 1734 ,[weakPage = makeWeakPtr(targetPage)]() {1738 auto createRealtimeMediaSource = [weakPage = makeWeakPtr(targetPage)]() { 1735 1739 return weakPage ? weakPage->createRealtimeMediaSourceForSpeechRecognition() : CaptureSourceOrError { "Page is invalid" }; 1736 } 1737 #endif 1738 ); 1740 }; 1741 speechRecognitionServer = makeUnique<SpeechRecognitionServer>(makeRef(*connection()), identifier, WTFMove(permissionChecker), WTFMove(checkIfMockCaptureDevicesEnabled), WTFMove(createRealtimeMediaSource)); 1742 #else 1743 speechRecognitionServer = makeUnique<SpeechRecognitionServer>(makeRef(*connection()), identifier, WTFMove(permissionChecker), WTFMove(checkIfMockCaptureDevicesEnabled)); 1744 #endif 1745 1739 1746 addMessageReceiver(Messages::SpeechRecognitionServer::messageReceiverName(), identifier, *speechRecognitionServer); 1740 1747 }
Note: See TracChangeset
for help on using the changeset viewer.