Changeset 90042 in webkit
- Timestamp:
- Jun 29, 2011 2:20:08 PM (13 years ago)
- Location:
- trunk/Source/WebCore
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/WebCore/ChangeLog
r90041 r90042 1 2011-06-29 Chris Rogers <crogers@google.com> 2 3 Reviewed by Kenneth Russell. 4 5 Simplify AudioBufferSourceNode rendering 6 https://bugs.webkit.org/show_bug.cgi?id=63586 7 8 No new tests since this doesn't change API. 9 10 * webaudio/AudioBufferSourceNode.cpp: 11 (WebCore::AudioBufferSourceNode::AudioBufferSourceNode): 12 (WebCore::AudioBufferSourceNode::process): 13 (WebCore::AudioBufferSourceNode::renderFromBuffer): 14 (WebCore::AudioBufferSourceNode::reset): 15 (WebCore::AudioBufferSourceNode::setBuffer): 16 (WebCore::AudioBufferSourceNode::noteOn): 17 (WebCore::AudioBufferSourceNode::noteGrainOn): 18 (WebCore::AudioBufferSourceNode::totalPitchRate): 19 * webaudio/AudioBufferSourceNode.h: 20 1 21 2011-06-29 Martin Robinson <mrobinson@igalia.com> 2 22 -
trunk/Source/WebCore/webaudio/AudioBufferSourceNode.cpp
r89471 r90042 41 41 const double UnknownTime = -1; 42 42 43 // Arbitrary upper limit on playback rate. 44 // Higher than expected rates can be useful when playing back oversampled buffers 45 // to minimize linear interpolation aliasing. 46 const double MaxRate = 1024; 47 43 48 PassRefPtr<AudioBufferSourceNode> AudioBufferSourceNode::create(AudioContext* context, double sampleRate) 44 49 { … … 54 59 , m_startTime(0.0) 55 60 , m_endTime(UnknownTime) 56 , m_schedulingFrameDelay(0) 57 , m_readIndex(0) 61 , m_virtualReadIndex(0) 58 62 , m_isGrain(false) 59 63 , m_grainOffset(0.0) 60 64 , m_grainDuration(DefaultGrainDuration) 61 , m_grainFrameCount(0)62 65 , m_lastGain(1.0) 63 66 , m_pannerNode(0) … … 66 69 67 70 m_gain = AudioGain::create("gain", 1.0, 0.0, 1.0); 68 m_playbackRate = AudioParam::create("playbackRate", 1.0, 0.0, AudioResampler::MaxRate);71 m_playbackRate = AudioParam::create("playbackRate", 1.0, 0.0, MaxRate); 69 72 70 73 m_gain->setContext(context); … … 96 99 // Check if it's time to start playing. 97 100 double sampleRate = this->sampleRate(); 98 double pitchRate = totalPitchRate();99 101 double quantumStartTime = context()->currentTime(); 100 102 double quantumEndTime = quantumStartTime + framesToProcess / sampleRate; … … 103 105 if (m_endTime != UnknownTime && m_endTime <= quantumStartTime) { 104 106 m_isPlaying = false; 105 m_ readIndex = 0;107 m_virtualReadIndex = 0; 106 108 finish(); 107 109 } … … 114 116 } 115 117 116 // Handle sample-accurate scheduling so that buffer playback will happen at a very precise time. 117 m_schedulingFrameDelay = 0; 118 if (m_startTime >= quantumStartTime) { 119 // m_schedulingFrameDelay is set here only the very first render quantum (because of above check: m_startTime >= quantumEndTime) 120 // So: quantumStartTime <= m_startTime < quantumEndTime 121 ASSERT(m_startTime < quantumEndTime); 122 123 double startTimeInQuantum = m_startTime - quantumStartTime; 124 double startFrameInQuantum = startTimeInQuantum * sampleRate; 125 126 // m_schedulingFrameDelay is used in provideInput(), so factor in the current playback pitch rate. 127 m_schedulingFrameDelay = static_cast<int>(pitchRate * startFrameInQuantum); 128 } 129 130 // FIXME: optimization opportunity: 131 // With a bit of work, it should be possible to avoid going through the resampler completely when the pitchRate == 1, 132 // especially if the pitchRate has never deviated from 1 in the past. 133 134 // Read the samples through the pitch resampler. Our provideInput() method will be called by the resampler. 135 m_resampler.setRate(pitchRate); 136 m_resampler.process(this, outputBus, framesToProcess); 118 double quantumTimeOffset = m_startTime > quantumStartTime ? m_startTime - quantumStartTime : 0; 119 size_t quantumFrameOffset = static_cast<unsigned>(quantumTimeOffset * sampleRate); 120 quantumFrameOffset = min(quantumFrameOffset, framesToProcess); // clamp to valid range 121 size_t bufferFramesToProcess = framesToProcess - quantumFrameOffset; 122 123 // Render by reading directly from the buffer. 124 renderFromBuffer(outputBus, quantumFrameOffset, bufferFramesToProcess); 137 125 138 126 // Apply the gain (in-place) to the output bus. … … 155 143 156 144 m_isPlaying = false; 157 m_ readIndex = 0;145 m_virtualReadIndex = 0; 158 146 finish(); 159 147 } … … 166 154 } 167 155 168 // The resampler calls us back here to get the input samples from our buffer. 169 void AudioBufferSourceNode::provideInput(AudioBus* bus, size_t numberOfFrames) 156 void AudioBufferSourceNode::renderFromBuffer(AudioBus* bus, unsigned destinationFrameOffset, size_t numberOfFrames) 170 157 { 171 158 ASSERT(context()->isAudioThread()); … … 192 179 return; 193 180 float* destinationR = (numberOfChannels < 2) ? 0 : bus->channel(1)->data(); 181 182 bool isStereo = destinationR; 183 184 // Sanity check destinationFrameOffset, numberOfFrames. 185 size_t destinationLength = bus->length(); 186 bool isOffsetGood = destinationFrameOffset <= destinationLength && destinationFrameOffset + numberOfFrames <= destinationLength; 187 ASSERT(isOffsetGood); 188 if (!isOffsetGood) 189 return; 190 191 // Potentially zero out initial frames leading up to the offset. 192 if (destinationFrameOffset) { 193 memset(destinationL, 0, sizeof(float) * destinationFrameOffset); 194 if (destinationR) 195 memset(destinationR, 0, sizeof(float) * destinationFrameOffset); 196 } 197 198 // Offset the pointers to the correct offset frame. 199 destinationL += destinationFrameOffset; 200 if (destinationR) 201 destinationR += destinationFrameOffset; 194 202 195 203 size_t bufferLength = buffer()->length(); … … 200 208 unsigned startFrame = m_isGrain ? static_cast<unsigned>(m_grainOffset * bufferSampleRate) : 0; 201 209 unsigned endFrame = m_isGrain ? static_cast<unsigned>(startFrame + m_grainDuration * bufferSampleRate) : bufferLength; 202 210 211 ASSERT(endFrame >= startFrame); 212 if (endFrame < startFrame) 213 return; 214 215 unsigned deltaFrames = endFrame - startFrame; 216 203 217 // This is a HACK to allow for HRTF tail-time - avoids glitch at end. 204 218 // FIXME: implement tailTime for each AudioNode for a more general solution to this problem. … … 211 225 if (endFrame > bufferLength) 212 226 endFrame = bufferLength; 213 if (m_readIndex >= endFrame) 214 m_readIndex = startFrame; // reset to start 215 227 if (m_virtualReadIndex >= endFrame) 228 m_virtualReadIndex = startFrame; // reset to start 229 230 // Get pointers to the start of the sample buffer. 231 float* sourceL = m_buffer->getChannelData(0)->data(); 232 float* sourceR = m_buffer->numberOfChannels() == 2 ? m_buffer->getChannelData(1)->data() : 0; 233 234 double pitchRate = totalPitchRate(); 235 236 // Get local copy. 237 double virtualReadIndex = m_virtualReadIndex; 238 239 // Render loop - reading from the source buffer to the destination using linear interpolation. 240 // FIXME: optimize for the very common case of playing back with pitchRate == 1. 241 // We can avoid the linear interpolation. 216 242 int framesToProcess = numberOfFrames; 217 218 // Handle sample-accurate scheduling so that we play the buffer at a very precise time. 219 // m_schedulingFrameDelay will only be non-zero the very first time that provideInput() is called, which corresponds 220 // with the very start of the buffer playback. 221 if (m_schedulingFrameDelay > 0) { 222 ASSERT(m_schedulingFrameDelay <= framesToProcess); 223 if (m_schedulingFrameDelay <= framesToProcess) { 224 // Generate silence for the initial portion of the destination. 225 memset(destinationL, 0, sizeof(float) * m_schedulingFrameDelay); 226 destinationL += m_schedulingFrameDelay; 227 if (destinationR) { 228 memset(destinationR, 0, sizeof(float) * m_schedulingFrameDelay); 229 destinationR += m_schedulingFrameDelay; 230 } 231 232 // Since we just generated silence for the initial portion, we have fewer frames to provide. 233 framesToProcess -= m_schedulingFrameDelay; 234 } 235 } 236 237 // We have to generate a certain number of output sample-frames, but we need to handle the case where we wrap around 238 // from the end of the buffer to the start if playing back with looping and also the case where we simply reach the 239 // end of the sample data, but haven't yet rendered numberOfFrames worth of output. 240 while (framesToProcess > 0) { 241 ASSERT(m_readIndex <= endFrame); 242 if (m_readIndex > endFrame) 243 return; 244 245 // Figure out how many frames we can process this time. 246 int framesAvailable = endFrame - m_readIndex; 247 int framesThisTime = min(framesToProcess, framesAvailable); 243 while (framesToProcess--) { 244 unsigned readIndex = static_cast<unsigned>(virtualReadIndex); 245 double interpolationFactor = virtualReadIndex - readIndex; 248 246 249 // Create the destination bus for the part of the destination we're processing this time. 250 AudioBus currentDestinationBus(busNumberOfChannels, framesThisTime, false); 251 currentDestinationBus.setChannelMemory(0, destinationL, framesThisTime); 252 if (busNumberOfChannels > 1) 253 currentDestinationBus.setChannelMemory(1, destinationR, framesThisTime); 254 255 // Generate output from the buffer. 256 readFromBuffer(¤tDestinationBus, framesThisTime); 257 258 // Update the destination pointers. 259 destinationL += framesThisTime; 260 if (busNumberOfChannels > 1) 261 destinationR += framesThisTime; 262 263 framesToProcess -= framesThisTime; 264 265 // Handle the case where we reach the end of the part of the sample data we're supposed to play for the buffer. 266 if (m_readIndex >= endFrame) { 267 m_readIndex = startFrame; 268 m_grainFrameCount = 0; 269 247 // For linear interpolation we need the next sample-frame too. 248 unsigned readIndex2 = readIndex + 1; 249 if (readIndex2 >= endFrame) { 250 if (looping()) { 251 // Make sure to wrap around at the end of the buffer. 252 readIndex2 -= deltaFrames; 253 } else 254 readIndex2 = readIndex; 255 } 256 257 // Final sanity check on buffer access. 258 // FIXME: as an optimization, try to get rid of this inner-loop check and put assertions and guards before the loop. 259 if (readIndex >= bufferLength || readIndex2 >= bufferLength) 260 break; 261 262 // Linear interpolation. 263 double sampleL1 = sourceL[readIndex]; 264 double sampleL2 = sourceL[readIndex2]; 265 double sampleL = (1.0 - interpolationFactor) * sampleL1 + interpolationFactor * sampleL2; 266 *destinationL++ = sampleL; 267 268 if (isStereo) { 269 double sampleR1 = sourceR[readIndex]; 270 double sampleR2 = sourceR[readIndex2]; 271 double sampleR = (1.0 - interpolationFactor) * sampleR1 + interpolationFactor * sampleR2; 272 *destinationR++ = sampleR; 273 } 274 275 virtualReadIndex += pitchRate; 276 277 // Wrap-around, retaining sub-sample position since virtualReadIndex is floating-point. 278 if (virtualReadIndex >= endFrame) { 279 virtualReadIndex -= deltaFrames; 280 270 281 if (!looping()) { 271 282 // If we're not looping, then stop playing when we get to the end. … … 277 288 memset(destinationL, 0, sizeof(float) * framesToProcess); 278 289 279 if ( destinationR)290 if (isStereo) 280 291 memset(destinationR, 0, sizeof(float) * framesToProcess); 281 292 } 282 293 283 294 finish(); 284 return;295 break; 285 296 } 286 297 } 287 298 } 288 } 289 290 void AudioBufferSourceNode::readFromBuffer(AudioBus* destinationBus, size_t framesToProcess) 291 { 292 bool isBusGood = destinationBus && destinationBus->length() == framesToProcess && destinationBus->numberOfChannels() == numberOfChannels(); 293 ASSERT(isBusGood); 294 if (!isBusGood) 295 return; 296 297 unsigned numberOfChannels = this->numberOfChannels(); 298 // FIXME: we can add support for sources with more than two channels, but this is not a common case. 299 bool channelCountGood = numberOfChannels == 1 || numberOfChannels == 2; 300 ASSERT(channelCountGood); 301 if (!channelCountGood) 302 return; 303 304 // Get pointers to the start of the sample buffer. 305 float* sourceL = m_buffer->getChannelData(0)->data(); 306 float* sourceR = m_buffer->numberOfChannels() == 2 ? m_buffer->getChannelData(1)->data() : 0; 307 308 // Sanity check buffer access. 309 bool isSourceGood = sourceL && (numberOfChannels == 1 || sourceR) && m_readIndex + framesToProcess <= m_buffer->length(); 310 ASSERT(isSourceGood); 311 if (!isSourceGood) 312 return; 313 314 // Offset the pointers to the current read position in the sample buffer. 315 sourceL += m_readIndex; 316 sourceR += m_readIndex; 317 318 // Get pointers to the destination. 319 float* destinationL = destinationBus->channel(0)->data(); 320 float* destinationR = numberOfChannels == 2 ? destinationBus->channel(1)->data() : 0; 321 bool isDestinationGood = destinationL && (numberOfChannels == 1 || destinationR); 322 ASSERT(isDestinationGood); 323 if (!isDestinationGood) 324 return; 325 326 // Simply copy the data from the source buffer to the destination. 327 memcpy(destinationL, sourceL, sizeof(float) * framesToProcess); 328 if (numberOfChannels == 2) 329 memcpy(destinationR, sourceR, sizeof(float) * framesToProcess); 330 331 // Advance the buffer's read index. 332 m_readIndex += framesToProcess; 299 300 m_virtualReadIndex = virtualReadIndex; 333 301 } 334 302 335 303 void AudioBufferSourceNode::reset() 336 304 { 337 m_resampler.reset(); 338 m_readIndex = 0; 339 m_grainFrameCount = 0; 305 m_virtualReadIndex = 0; 340 306 m_lastGain = gain()->value(); 341 307 } … … 363 329 // Do any necesssary re-configuration to the buffer's number of channels. 364 330 unsigned numberOfChannels = buffer->numberOfChannels(); 365 m_resampler.configureChannels(numberOfChannels);366 331 output(0)->setNumberOfChannels(numberOfChannels); 367 332 } 368 333 369 m_ readIndex = 0;334 m_virtualReadIndex = 0; 370 335 m_buffer = buffer; 371 336 } … … 384 349 m_isGrain = false; 385 350 m_startTime = when; 386 m_ readIndex = 0;351 m_virtualReadIndex = 0; 387 352 m_isPlaying = true; 388 353 } … … 411 376 412 377 m_grainDuration = grainDuration; 413 m_grainFrameCount = 0;414 378 415 379 m_isGrain = true; 416 380 m_startTime = when; 417 m_readIndex = static_cast<int>(m_grainOffset * buffer()->sampleRate()); 381 382 // We call floor() here since at playbackRate == 1 we don't want to go through linear interpolation 383 // at a sub-sample position since it will degrade the quality. 384 // When aligned to the sample-frame the playback will be identical to the PCM data stored in the buffer. 385 // Since playbackRate == 1 is very common, it's worth considering quality. 386 m_virtualReadIndex = floor(m_grainOffset * buffer()->sampleRate()); 387 418 388 m_isPlaying = true; 419 389 } … … 447 417 // Sanity check the total rate. It's very important that the resampler not get any bad rate values. 448 418 totalRate = max(0.0, totalRate); 449 totalRate = min(AudioResampler::MaxRate, totalRate); 419 if (!totalRate) 420 totalRate = 1; // zero rate is considered illegal 421 totalRate = min(MaxRate, totalRate); 450 422 451 423 bool isTotalRateValid = !isnan(totalRate) && !isinf(totalRate); -
trunk/Source/WebCore/webaudio/AudioBufferSourceNode.h
r89471 r90042 30 30 #include "AudioGain.h" 31 31 #include "AudioPannerNode.h" 32 #include "AudioResampler.h"33 32 #include "AudioSourceNode.h" 34 #include "AudioSourceProvider.h"35 33 #include <wtf/PassRefPtr.h> 36 34 #include <wtf/RefPtr.h> … … 44 42 // It generally will be used for short sounds which require a high degree of scheduling flexibility (can playback in rhythmically perfect ways). 45 43 46 class AudioBufferSourceNode : public AudioSourceNode , public AudioSourceProvider{44 class AudioBufferSourceNode : public AudioSourceNode { 47 45 public: 48 46 static PassRefPtr<AudioBufferSourceNode> create(AudioContext*, double sampleRate); … … 53 51 virtual void process(size_t framesToProcess); 54 52 virtual void reset(); 55 56 // AudioSourceProvider57 // When process() is called, the resampler calls provideInput (in the audio thread) to gets its input stream.58 virtual void provideInput(AudioBus*, size_t numberOfFrames);59 53 60 54 // setBuffer() is called on the main thread. This is the buffer we use for playback. … … 84 78 AudioBufferSourceNode(AudioContext*, double sampleRate); 85 79 80 void renderFromBuffer(AudioBus*, unsigned destinationFrameOffset, size_t numberOfFrames); 81 86 82 // m_buffer holds the sample data which this node outputs. 87 83 RefPtr<AudioBuffer> m_buffer; … … 110 106 double m_endTime; // in seconds 111 107 112 // m_schedulingFrameDelay is the sample-accurate scheduling offset. 113 // It's used so that we start rendering audio samples at a very precise point in time. 114 // It will only be a non-zero value the very first render quantum that we render from the buffer. 115 int m_schedulingFrameDelay; 116 117 // m_readIndex is a sample-frame index into our buffer representing the current playback position. 118 unsigned m_readIndex; 108 // m_virtualReadIndex is a sample-frame index into our buffer representing the current playback position. 109 // Since it's floating-point, it has sub-sample accuracy. 110 double m_virtualReadIndex; 119 111 120 112 // Granular playback … … 122 114 double m_grainOffset; // in seconds 123 115 double m_grainDuration; // in seconds 124 int m_grainFrameCount; // keeps track of which frame in the grain we're currently rendering125 116 126 117 // totalPitchRate() returns the instantaneous pitch rate (non-time preserving). 127 118 // It incorporates the base pitch rate, any sample-rate conversion factor from the buffer, and any doppler shift from an associated panner node. 128 119 double totalPitchRate(); 129 130 // m_resampler performs the pitch rate changes to the buffer playback.131 AudioResampler m_resampler;132 120 133 121 // m_lastGain provides continuity when we dynamically adjust the gain. … … 140 128 mutable Mutex m_processLock; 141 129 142 // Reads the next framesToProcess sample-frames from the AudioBuffer into destinationBus.143 // A grain envelope will be applied if m_isGrain is set to true.144 void readFromBuffer(AudioBus* destinationBus, size_t framesToProcess);145 146 130 // Handles the time when we reach the end of sample data (non-looping) or the noteOff() time has been reached. 147 131 void finish();
Note: See TracChangeset
for help on using the changeset viewer.