Changeset 87013 in webkit
- Timestamp:
- May 21, 2011 2:42:08 AM (13 years ago)
- Location:
- trunk/Source/WebCore
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/WebCore/ChangeLog
r87011 r87013 1 2011-05-21 Chris Rogers <crogers@google.com> 2 3 Reviewed by Kenneth Russell. 4 5 AudioBufferSourceNode noteOff() method must respect scheduling time 6 https://bugs.webkit.org/show_bug.cgi?id=61226 7 8 No new tests since audio API is not yet implemented. 9 10 * webaudio/AudioBufferSourceNode.cpp: 11 (WebCore::AudioBufferSourceNode::AudioBufferSourceNode): 12 (WebCore::AudioBufferSourceNode::process): 13 (WebCore::AudioBufferSourceNode::provideInput): 14 (WebCore::AudioBufferSourceNode::finish): 15 (WebCore::AudioBufferSourceNode::noteGrainOn): 16 (WebCore::AudioBufferSourceNode::noteOff): 17 * webaudio/AudioBufferSourceNode.h: 18 1 19 2011-05-20 Dirk Schulze <krit@webkit.org> 2 20 -
trunk/Source/WebCore/webaudio/AudioBufferSourceNode.cpp
r73458 r87013 39 39 40 40 const double DefaultGrainDuration = 0.020; // 20ms 41 const double UnknownTime = -1; 41 42 42 43 PassRefPtr<AudioBufferSourceNode> AudioBufferSourceNode::create(AudioContext* context, double sampleRate) … … 52 53 , m_hasFinished(false) 53 54 , m_startTime(0.0) 55 , m_endTime(UnknownTime) 54 56 , m_schedulingFrameDelay(0) 55 57 , m_readIndex(0) … … 95 97 double quantumEndTime = quantumStartTime + framesToProcess / sampleRate; 96 98 99 // If we know the end time and it's already passed, then don't bother doing any more rendering this cycle. 100 if (m_endTime != UnknownTime && m_endTime <= quantumStartTime) { 101 m_isPlaying = false; 102 m_readIndex = 0; 103 finish(); 104 } 105 97 106 if (!m_isPlaying || m_hasFinished || !buffer() || m_startTime >= quantumEndTime) { 98 107 // FIXME: can optimize here by propagating silent hint instead of forcing the whole chain to process silence. … … 127 136 double totalGain = gain()->value() * m_buffer->gain(); 128 137 outputBus->copyWithGainFrom(*outputBus, &m_lastGain, totalGain); 138 139 // If the end time is somewhere in the middle of this time quantum, then simply zero out the 140 // frames starting at the end time. 141 if (m_endTime != UnknownTime && m_endTime >= quantumStartTime && m_endTime < quantumEndTime) { 142 unsigned zeroStartFrame = (m_endTime - quantumStartTime) * sampleRate; 143 unsigned framesToZero = framesToProcess - zeroStartFrame; 144 145 bool isSafe = zeroStartFrame < framesToProcess && framesToZero <= framesToProcess && zeroStartFrame + framesToZero <= framesToProcess; 146 ASSERT(isSafe); 147 148 if (isSafe) { 149 for (unsigned i = 0; i < outputBus->numberOfChannels(); ++i) 150 memset(outputBus->channel(i)->data() + zeroStartFrame, 0, sizeof(float) * framesToZero); 151 } 152 153 m_isPlaying = false; 154 m_readIndex = 0; 155 finish(); 156 } 129 157 130 158 m_processLock.unlock(); … … 250 278 } 251 279 252 if (!m_hasFinished) { 253 // Let the context dereference this AudioNode. 254 context()->notifyNodeFinishedProcessing(this); 255 m_hasFinished = true; 256 } 280 finish(); 257 281 return; 258 282 } … … 343 367 } 344 368 369 void AudioBufferSourceNode::finish() 370 { 371 if (!m_hasFinished) { 372 // Let the context dereference this AudioNode. 373 context()->notifyNodeFinishedProcessing(this); 374 m_hasFinished = true; 375 } 376 } 377 345 378 void AudioBufferSourceNode::setBuffer(AudioBuffer* buffer) 346 379 { … … 400 433 401 434 grainOffset = max(0.0, grainOffset); 402 grainOffset = min(maxGrainOffset, grainOffset); 435 grainOffset = min(maxGrainOffset, grainOffset); 403 436 m_grainOffset = grainOffset; 404 437 … … 412 445 } 413 446 414 void AudioBufferSourceNode::noteOff(double )447 void AudioBufferSourceNode::noteOff(double when) 415 448 { 416 449 ASSERT(isMainThread()); 417 450 if (!m_isPlaying) 418 451 return; 419 420 // FIXME: the "when" argument to this method is ignored. 421 m_isPlaying = false; 422 m_readIndex = 0; 452 453 when = max(0.0, when); 454 m_endTime = when; 423 455 } 424 456 -
trunk/Source/WebCore/webaudio/AudioBufferSourceNode.h
r71205 r87013 105 105 double m_startTime; // in seconds 106 106 107 // m_endTime is the time to stop playing based on the context's timeline (0.0 or a time less than the context's current time means "now"). 108 // If it hasn't been set explicitly, then the sound will not stop playing (if looping) or will stop when the end of the AudioBuffer 109 // has been reached. 110 double m_endTime; // in seconds 111 107 112 // m_schedulingFrameDelay is the sample-accurate scheduling offset. 108 113 // It's used so that we start rendering audio samples at a very precise point in time. … … 141 146 // readFromBufferWithGrainEnvelope() is a low-level blitter which reads from the AudioBuffer and applies a grain envelope. 142 147 void readFromBufferWithGrainEnvelope(float* sourceL, float* sourceR, float* destinationL, float* destinationR, size_t framesToProcess); 148 149 // Handles the time when we reach the end of sample data (non-looping) or the noteOff() time has been reached. 150 void finish(); 143 151 }; 144 152
Note: See TracChangeset
for help on using the changeset viewer.