Changeset 268565 in webkit
- Timestamp:
- Oct 15, 2020 5:34:30 PM (4 years ago)
- Location:
- trunk/Source/WebCore
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/WebCore/ChangeLog
r268564 r268565 1 2020-10-15 Chris Dumez <cdumez@apple.com> 2 3 Move AudioContext-specific logic out of BaseAudioContext 4 https://bugs.webkit.org/show_bug.cgi?id=217794 5 6 Reviewed by Geoffrey Garen. 7 8 Move AudioContext-specific logic out of BaseAudioContext and into the 9 AudioContext class. This required having WebKitAudioContext subclass 10 AudioContext instead of BaseAudioContext. 11 12 No new tests, no Web-facing behavior change. 13 14 * Modules/webaudio/AudioContext.cpp: 15 (WebCore::AudioContext::AudioContext): 16 (WebCore::AudioContext::suspendRendering): 17 (WebCore::AudioContext::resumeRendering): 18 (WebCore::AudioContext::nodeWillBeginPlayback): 19 (WebCore::AudioContext::startRendering): 20 (WebCore::AudioContext::lazyInitialize): 21 (WebCore::AudioContext::willPausePlayback): 22 * Modules/webaudio/AudioContext.h: 23 (WebCore::AudioContext::AudioContext): 24 * Modules/webaudio/BaseAudioContext.cpp: 25 (WebCore::BaseAudioContext::lazyInitialize): 26 * Modules/webaudio/BaseAudioContext.h: 27 (WebCore::BaseAudioContext::nodeWillBeginPlayback): 28 (WebCore::BaseAudioContext::mediaSession const): 29 * Modules/webaudio/DefaultAudioDestinationNode.h: 30 * Modules/webaudio/WebKitAudioContext.cpp: 31 (WebCore::WebKitAudioContext::WebKitAudioContext): 32 * Modules/webaudio/WebKitAudioContext.h: 33 (WebCore::WebKitAudioContext::listener): 34 1 35 2020-10-15 Sam Weinig <weinig@apple.com> 2 36 -
trunk/Source/WebCore/Modules/webaudio/AudioContext.cpp
r267147 r268565 31 31 #include "AudioTimestamp.h" 32 32 #include "DOMWindow.h" 33 #include "DefaultAudioDestinationNode.h"34 33 #include "JSDOMPromiseDeferred.h" 34 #include "Page.h" 35 35 #include "Performance.h" 36 36 #include <wtf/IsoMallocInlines.h> … … 100 100 } 101 101 102 // Only needed for WebKitOfflineAudioContext. 103 AudioContext::AudioContext(Document& document, unsigned numberOfChannels, RefPtr<AudioBuffer>&& renderTarget) 104 : BaseAudioContext(document, numberOfChannels, WTFMove(renderTarget)) 105 { 106 } 107 102 108 double AudioContext::baseLatency() 103 109 { … … 154 160 } 155 161 162 void AudioContext::suspendRendering(DOMPromiseDeferred<void>&& promise) 163 { 164 if (isOfflineContext() || isStopped()) { 165 promise.reject(InvalidStateError); 166 return; 167 } 168 169 if (state() == State::Closed || state() == State::Interrupted || !destinationNode()) { 170 promise.reject(); 171 return; 172 } 173 174 addReaction(State::Suspended, WTFMove(promise)); 175 m_wasSuspendedByScript = true; 176 177 if (!willPausePlayback()) 178 return; 179 180 lazyInitialize(); 181 182 destinationNode()->suspend([this, protectedThis = makeRef(*this)] { 183 setState(State::Suspended); 184 }); 185 } 186 187 void AudioContext::resumeRendering(DOMPromiseDeferred<void>&& promise) 188 { 189 if (isOfflineContext() || isStopped()) { 190 promise.reject(InvalidStateError); 191 return; 192 } 193 194 if (state() == State::Closed || !destinationNode()) { 195 promise.reject(); 196 return; 197 } 198 199 addReaction(State::Running, WTFMove(promise)); 200 m_wasSuspendedByScript = false; 201 202 if (!willBeginPlayback()) 203 return; 204 205 lazyInitialize(); 206 207 destinationNode()->resume([this, protectedThis = makeRef(*this)] { 208 setState(State::Running); 209 }); 210 } 211 212 void AudioContext::nodeWillBeginPlayback() 213 { 214 // Called by scheduled AudioNodes when clients schedule their start times. 215 // Prior to the introduction of suspend(), resume(), and stop(), starting 216 // a scheduled AudioNode would remove the user-gesture restriction, if present, 217 // and would thus unmute the context. Now that AudioContext stays in the 218 // "suspended" state if a user-gesture restriction is present, starting a 219 // schedule AudioNode should set the state to "running", but only if the 220 // user-gesture restriction is set. 221 if (userGestureRequiredForAudioStart()) 222 startRendering(); 223 } 224 225 void AudioContext::startRendering() 226 { 227 ALWAYS_LOG(LOGIDENTIFIER); 228 if (isStopped() || !willBeginPlayback() || m_wasSuspendedByScript) 229 return; 230 231 makePendingActivity(); 232 233 setState(State::Running); 234 235 lazyInitialize(); 236 destination()->startRendering(); 237 } 238 239 void AudioContext::lazyInitialize() 240 { 241 if (isInitialized()) 242 return; 243 244 BaseAudioContext::lazyInitialize(); 245 if (isInitialized()) { 246 if (destinationNode() && state() != State::Running) { 247 // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio. 248 // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum". 249 // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript. 250 // We may want to consider requiring it for symmetry with OfflineAudioContext. 251 startRendering(); 252 ++s_hardwareContextCount; 253 } 254 } 255 } 256 257 bool AudioContext::willPausePlayback() 258 { 259 auto* document = this->document(); 260 if (!document) 261 return false; 262 263 if (userGestureRequiredForAudioStart()) { 264 if (!document->processingUserGestureForMedia()) 265 return false; 266 removeBehaviorRestriction(BaseAudioContext::RequireUserGestureForAudioStartRestriction); 267 } 268 269 if (pageConsentRequiredForAudioStart()) { 270 auto* page = document->page(); 271 if (page && !page->canStartMedia()) { 272 document->addMediaCanStartListener(*this); 273 return false; 274 } 275 removeBehaviorRestriction(BaseAudioContext::RequirePageConsentForAudioStartRestriction); 276 } 277 278 return mediaSession()->clientWillPausePlayback(); 279 } 280 156 281 #if ENABLE(VIDEO) 157 282 -
trunk/Source/WebCore/Modules/webaudio/AudioContext.h
r267147 r268565 28 28 #include "AudioContextOptions.h" 29 29 #include "BaseAudioContext.h" 30 #include "DefaultAudioDestinationNode.h" 30 31 31 32 namespace WebCore { 32 33 33 34 class DOMWindow; 34 class DefaultAudioDestinationNode;35 35 36 36 struct AudioTimestamp; … … 58 58 #endif 59 59 60 void suspendRendering(DOMPromiseDeferred<void>&&); 61 void resumeRendering(DOMPromiseDeferred<void>&&); 62 63 void nodeWillBeginPlayback() final; 64 void lazyInitialize() final; 65 66 void startRendering(); 67 68 protected: 69 explicit AudioContext(Document&, const AudioContextOptions& = { }); 70 AudioContext(Document&, unsigned numberOfChannels, RefPtr<AudioBuffer>&& renderTarget); 71 60 72 private: 61 AudioContext(Document&, const AudioContextOptions&); 73 bool willPausePlayback(); 74 75 // [[suspended by user]] flag in the specification: 76 // https://www.w3.org/TR/webaudio/#dom-audiocontext-suspended-by-user-slot 77 bool m_wasSuspendedByScript { false }; 62 78 }; 63 79 -
trunk/Source/WebCore/Modules/webaudio/BaseAudioContext.cpp
r268368 r268565 224 224 return; 225 225 226 if (m_destinationNode) {226 if (m_destinationNode) 227 227 m_destinationNode->initialize(); 228 229 if (!isOfflineContext() && state() != State::Running) {230 // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio.231 // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".232 // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.233 // We may want to consider requiring it for symmetry with OfflineAudioContext.234 startRendering();235 ++s_hardwareContextCount;236 }237 }238 228 239 229 m_isInitialized = true; … … 1004 994 } 1005 995 1006 void BaseAudioContext::nodeWillBeginPlayback()1007 {1008 // Called by scheduled AudioNodes when clients schedule their start times.1009 // Prior to the introduction of suspend(), resume(), and stop(), starting1010 // a scheduled AudioNode would remove the user-gesture restriction, if present,1011 // and would thus unmute the context. Now that AudioContext stays in the1012 // "suspended" state if a user-gesture restriction is present, starting a1013 // schedule AudioNode should set the state to "running", but only if the1014 // user-gesture restriction is set.1015 if (userGestureRequiredForAudioStart())1016 startRendering();1017 }1018 1019 996 static bool shouldDocumentAllowWebAudioToAutoPlay(const Document& document) 1020 997 { … … 1054 1031 } 1055 1032 1056 bool BaseAudioContext::willPausePlayback()1057 {1058 auto* document = this->document();1059 if (!document)1060 return false;1061 1062 if (userGestureRequiredForAudioStart()) {1063 if (!document->processingUserGestureForMedia())1064 return false;1065 removeBehaviorRestriction(BaseAudioContext::RequireUserGestureForAudioStartRestriction);1066 }1067 1068 if (pageConsentRequiredForAudioStart()) {1069 auto* page = document->page();1070 if (page && !page->canStartMedia()) {1071 document->addMediaCanStartListener(*this);1072 return false;1073 }1074 removeBehaviorRestriction(BaseAudioContext::RequirePageConsentForAudioStartRestriction);1075 }1076 1077 return m_mediaSession->clientWillPausePlayback();1078 }1079 1080 void BaseAudioContext::startRendering()1081 {1082 ALWAYS_LOG(LOGIDENTIFIER);1083 if (m_isStopScheduled || !willBeginPlayback() || m_wasSuspendedByScript)1084 return;1085 1086 makePendingActivity();1087 1088 setState(State::Running);1089 1090 lazyInitialize();1091 destination()->startRendering();1092 }1093 1094 1033 void BaseAudioContext::mediaCanStart(Document& document) 1095 1034 { … … 1123 1062 } 1124 1063 1064 // FIXME: Move to OfflineAudioContext once WebKitOfflineAudioContext gets removed. 1125 1065 void BaseAudioContext::finishedRendering(bool didRendering) 1126 1066 { … … 1179 1119 } 1180 1120 1181 void BaseAudioContext::suspendRendering(DOMPromiseDeferred<void>&& promise)1182 {1183 if (isOfflineContext() || m_isStopScheduled) {1184 promise.reject(InvalidStateError);1185 return;1186 }1187 1188 if (m_state == State::Closed || m_state == State::Interrupted || !m_destinationNode) {1189 promise.reject();1190 return;1191 }1192 1193 addReaction(State::Suspended, WTFMove(promise));1194 m_wasSuspendedByScript = true;1195 1196 if (!willPausePlayback())1197 return;1198 1199 lazyInitialize();1200 1201 m_destinationNode->suspend([this, protectedThis = makeRef(*this)] {1202 setState(State::Suspended);1203 });1204 }1205 1206 1121 void BaseAudioContext::didSuspendRendering(size_t) 1207 1122 { 1208 1123 setState(State::Suspended); 1209 }1210 1211 void BaseAudioContext::resumeRendering(DOMPromiseDeferred<void>&& promise)1212 {1213 if (isOfflineContext() || m_isStopScheduled) {1214 promise.reject(InvalidStateError);1215 return;1216 }1217 1218 if (m_state == State::Closed || !m_destinationNode) {1219 promise.reject();1220 return;1221 }1222 1223 addReaction(State::Running, WTFMove(promise));1224 m_wasSuspendedByScript = false;1225 1226 if (!willBeginPlayback())1227 return;1228 1229 lazyInitialize();1230 1231 m_destinationNode->resume([this, protectedThis = makeRef(*this)] {1232 setState(State::Running);1233 });1234 1124 } 1235 1125 -
trunk/Source/WebCore/Modules/webaudio/BaseAudioContext.h
r268103 r268565 145 145 AudioListener& listener(); 146 146 147 void suspendRendering(DOMPromiseDeferred<void>&&);148 void resumeRendering(DOMPromiseDeferred<void>&&);149 150 147 virtual void didSuspendRendering(size_t frame); 151 148 … … 259 256 void derefEventTarget() override { deref(); } 260 257 261 void startRendering();262 258 void finishedRendering(bool didRendering); 263 259 … … 277 273 void isPlayingAudioDidChange(); 278 274 279 v oid nodeWillBeginPlayback();275 virtual void nodeWillBeginPlayback() { } 280 276 281 277 #if !RELEASE_LOG_DISABLED … … 318 314 void derefNode(AudioNode&); 319 315 320 v oid lazyInitialize();316 virtual void lazyInitialize(); 321 317 322 318 static bool isSupportedSampleRate(float sampleRate); … … 351 347 virtual void didFinishOfflineRendering(ExceptionOr<Ref<AudioBuffer>>&&) { } 352 348 349 bool userGestureRequiredForAudioStart() const { return !isOfflineContext() && m_restrictions & RequireUserGestureForAudioStartRestriction; } 350 bool pageConsentRequiredForAudioStart() const { return !isOfflineContext() && m_restrictions & RequirePageConsentForAudioStartRestriction; } 351 352 PlatformMediaSession* mediaSession() const { return m_mediaSession.get(); } 353 353 private: 354 354 void constructCommon(); 355 356 bool willPausePlayback();357 358 bool userGestureRequiredForAudioStart() const { return !isOfflineContext() && m_restrictions & RequireUserGestureForAudioStartRestriction; }359 bool pageConsentRequiredForAudioStart() const { return !isOfflineContext() && m_restrictions & RequirePageConsentForAudioStartRestriction; }360 355 361 356 void clear(); … … 477 472 HashMap<String, Vector<AudioParamDescriptor>> m_parameterDescriptorMap; 478 473 479 // [[suspended by user]] flag in the specification:480 // https://www.w3.org/TR/webaudio/#dom-audiocontext-suspended-by-user-slot481 bool m_wasSuspendedByScript { false };482 483 474 // These are cached per audio context for performance reasons. They cannot be 484 475 // static because they rely on the sample rate. -
trunk/Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.h
r267859 r268565 44 44 float sampleRate() const final { return m_sampleRate; } 45 45 46 ExceptionOr<void> startRendering() final; 47 46 48 private: 47 49 explicit DefaultAudioDestinationNode(BaseAudioContext&, Optional<float>); … … 57 59 58 60 void enableInput(const String& inputDeviceId) final; 59 ExceptionOr<void> startRendering() final;60 61 void resume(Function<void ()>&&) final; 61 62 void suspend(Function<void ()>&&) final; -
trunk/Source/WebCore/Modules/webaudio/WebKitAudioContext.cpp
r267544 r268565 80 80 // Constructor for rendering to the audio hardware. 81 81 WebKitAudioContext::WebKitAudioContext(Document& document) 82 : BaseAudioContext(document)82 : AudioContext(document) 83 83 { 84 84 } … … 86 86 // Constructor for offline (non-realtime) rendering. 87 87 WebKitAudioContext::WebKitAudioContext(Document& document, Ref<AudioBuffer>&& renderTarget) 88 : BaseAudioContext(document, renderTarget->numberOfChannels(), WTFMove(renderTarget))88 : AudioContext(document, renderTarget->numberOfChannels(), WTFMove(renderTarget)) 89 89 { 90 90 } -
trunk/Source/WebCore/Modules/webaudio/WebKitAudioContext.h
r267147 r268565 26 26 #pragma once 27 27 28 #include " BaseAudioContext.h"28 #include "AudioContext.h" 29 29 #include "WebKitAudioListener.h" 30 30 … … 47 47 // For thread safety between the audio thread and the main thread, it has a rendering graph locking mechanism. 48 48 49 class WebKitAudioContext 50 : public BaseAudioContext 49 class WebKitAudioContext : public AudioContext 51 50 { 52 51 WTF_MAKE_ISO_ALLOCATED(WebKitAudioContext); … … 57 56 void close(DOMPromiseDeferred<void>&&); 58 57 59 WebKitAudioListener& listener() { return downcast<WebKitAudioListener>( BaseAudioContext::listener()); }58 WebKitAudioListener& listener() { return downcast<WebKitAudioListener>(AudioContext::listener()); } 60 59 61 60 // The AudioNode create methods are called on the main thread (from JavaScript).
Note: See TracChangeset
for help on using the changeset viewer.