Changeset 87013 in webkit


Ignore:
Timestamp:
May 21, 2011 2:42:08 AM (13 years ago)
Author:
crogers@google.com
Message:

2011-05-21 Chris Rogers <crogers@google.com>

Reviewed by Kenneth Russell.

AudioBufferSourceNode noteOff() method must respect scheduling time
https://bugs.webkit.org/show_bug.cgi?id=61226

No new tests since audio API is not yet implemented.

  • webaudio/AudioBufferSourceNode.cpp: (WebCore::AudioBufferSourceNode::AudioBufferSourceNode): (WebCore::AudioBufferSourceNode::process): (WebCore::AudioBufferSourceNode::provideInput): (WebCore::AudioBufferSourceNode::finish): (WebCore::AudioBufferSourceNode::noteGrainOn): (WebCore::AudioBufferSourceNode::noteOff):
  • webaudio/AudioBufferSourceNode.h:
Location:
trunk/Source/WebCore
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/Source/WebCore/ChangeLog

    r87011 r87013  
     12011-05-21  Chris Rogers  <crogers@google.com>
     2
     3        Reviewed by Kenneth Russell.
     4
     5        AudioBufferSourceNode noteOff() method must respect scheduling time
     6        https://bugs.webkit.org/show_bug.cgi?id=61226
     7
     8        No new tests since audio API is not yet implemented.
     9
     10        * webaudio/AudioBufferSourceNode.cpp:
     11        (WebCore::AudioBufferSourceNode::AudioBufferSourceNode):
     12        (WebCore::AudioBufferSourceNode::process):
     13        (WebCore::AudioBufferSourceNode::provideInput):
     14        (WebCore::AudioBufferSourceNode::finish):
     15        (WebCore::AudioBufferSourceNode::noteGrainOn):
     16        (WebCore::AudioBufferSourceNode::noteOff):
     17        * webaudio/AudioBufferSourceNode.h:
     18
    1192011-05-20  Dirk Schulze  <krit@webkit.org>
    220
  • trunk/Source/WebCore/webaudio/AudioBufferSourceNode.cpp

    r73458 r87013  
    3939
    4040const double DefaultGrainDuration = 0.020; // 20ms
     41const double UnknownTime = -1;
    4142
    4243PassRefPtr<AudioBufferSourceNode> AudioBufferSourceNode::create(AudioContext* context, double sampleRate)
     
    5253    , m_hasFinished(false)
    5354    , m_startTime(0.0)
     55    , m_endTime(UnknownTime)
    5456    , m_schedulingFrameDelay(0)
    5557    , m_readIndex(0)
     
    9597        double quantumEndTime = quantumStartTime + framesToProcess / sampleRate;
    9698
     99        // If we know the end time and it's already passed, then don't bother doing any more rendering this cycle.
     100        if (m_endTime != UnknownTime && m_endTime <= quantumStartTime) {
     101            m_isPlaying = false;
     102            m_readIndex = 0;
     103            finish();
     104        }
     105       
    97106        if (!m_isPlaying || m_hasFinished || !buffer() || m_startTime >= quantumEndTime) {
    98107            // FIXME: can optimize here by propagating silent hint instead of forcing the whole chain to process silence.
     
    127136        double totalGain = gain()->value() * m_buffer->gain();
    128137        outputBus->copyWithGainFrom(*outputBus, &m_lastGain, totalGain);
     138
     139        // If the end time is somewhere in the middle of this time quantum, then simply zero out the
     140        // frames starting at the end time.
     141        if (m_endTime != UnknownTime && m_endTime >= quantumStartTime && m_endTime < quantumEndTime) {
     142            unsigned zeroStartFrame = (m_endTime - quantumStartTime) * sampleRate;
     143            unsigned framesToZero = framesToProcess - zeroStartFrame;
     144
     145            bool isSafe = zeroStartFrame < framesToProcess && framesToZero <= framesToProcess && zeroStartFrame + framesToZero <= framesToProcess;
     146            ASSERT(isSafe);
     147           
     148            if (isSafe) {
     149                for (unsigned i = 0; i < outputBus->numberOfChannels(); ++i)
     150                    memset(outputBus->channel(i)->data() + zeroStartFrame, 0, sizeof(float) * framesToZero);
     151            }
     152
     153            m_isPlaying = false;
     154            m_readIndex = 0;
     155            finish();
     156        }
    129157
    130158        m_processLock.unlock();
     
    250278                }
    251279
    252                 if (!m_hasFinished) {
    253                     // Let the context dereference this AudioNode.
    254                     context()->notifyNodeFinishedProcessing(this);
    255                     m_hasFinished = true;
    256                 }
     280                finish();
    257281                return;
    258282            }
     
    343367}
    344368
     369void AudioBufferSourceNode::finish()
     370{
     371    if (!m_hasFinished) {
     372        // Let the context dereference this AudioNode.
     373        context()->notifyNodeFinishedProcessing(this);
     374        m_hasFinished = true;
     375    }
     376}
     377
    345378void AudioBufferSourceNode::setBuffer(AudioBuffer* buffer)
    346379{
     
    400433
    401434    grainOffset = max(0.0, grainOffset);
    402     grainOffset = min(maxGrainOffset, grainOffset);   
     435    grainOffset = min(maxGrainOffset, grainOffset);
    403436    m_grainOffset = grainOffset;
    404437
     
    412445}
    413446
    414 void AudioBufferSourceNode::noteOff(double)
     447void AudioBufferSourceNode::noteOff(double when)
    415448{
    416449    ASSERT(isMainThread());
    417450    if (!m_isPlaying)
    418451        return;
    419        
    420     // FIXME: the "when" argument to this method is ignored.
    421     m_isPlaying = false;
    422     m_readIndex = 0;
     452   
     453    when = max(0.0, when);
     454    m_endTime = when;
    423455}
    424456
  • trunk/Source/WebCore/webaudio/AudioBufferSourceNode.h

    r71205 r87013  
    105105    double m_startTime; // in seconds
    106106
     107    // m_endTime is the time to stop playing based on the context's timeline (0.0 or a time less than the context's current time means "now").
     108    // If it hasn't been set explicitly, then the sound will not stop playing (if looping) or will stop when the end of the AudioBuffer
     109    // has been reached.
     110    double m_endTime; // in seconds
     111   
    107112    // m_schedulingFrameDelay is the sample-accurate scheduling offset.
    108113    // It's used so that we start rendering audio samples at a very precise point in time.
     
    141146    // readFromBufferWithGrainEnvelope() is a low-level blitter which reads from the AudioBuffer and applies a grain envelope.
    142147    void readFromBufferWithGrainEnvelope(float* sourceL, float* sourceR, float* destinationL, float* destinationR, size_t framesToProcess);
     148
     149    // Handles the time when we reach the end of sample data (non-looping) or the noteOff() time has been reached.
     150    void finish();
    143151};
    144152
Note: See TracChangeset for help on using the changeset viewer.