Changeset 196603 in webkit


Ignore:
Timestamp:
Feb 15, 2016 2:37:53 PM (8 years ago)
Author:
jer.noble@apple.com
Message:

Null-deref crash in DefaultAudioDestinationNode::suspend()
https://bugs.webkit.org/show_bug.cgi?id=154248

Reviewed by Alex Christensen.

Drive-by fix: AudioContext should be a reference, not a pointer.

  • Modules/webaudio/AnalyserNode.cpp:

(WebCore::AnalyserNode::AnalyserNode):

  • Modules/webaudio/AnalyserNode.h:

(WebCore::AnalyserNode::create):

  • Modules/webaudio/AudioBasicInspectorNode.cpp:

(WebCore::AudioBasicInspectorNode::AudioBasicInspectorNode):
(WebCore::AudioBasicInspectorNode::connect):
(WebCore::AudioBasicInspectorNode::disconnect):
(WebCore::AudioBasicInspectorNode::checkNumberOfChannelsForInput):
(WebCore::AudioBasicInspectorNode::updatePullStatus):

  • Modules/webaudio/AudioBasicInspectorNode.h:
  • Modules/webaudio/AudioBasicProcessorNode.cpp:

(WebCore::AudioBasicProcessorNode::AudioBasicProcessorNode):
(WebCore::AudioBasicProcessorNode::checkNumberOfChannelsForInput):

  • Modules/webaudio/AudioBasicProcessorNode.h:
  • Modules/webaudio/AudioBufferSourceNode.cpp:

(WebCore::AudioBufferSourceNode::create):
(WebCore::AudioBufferSourceNode::AudioBufferSourceNode):
(WebCore::AudioBufferSourceNode::renderFromBuffer):
(WebCore::AudioBufferSourceNode::setBuffer):
(WebCore::AudioBufferSourceNode::startPlaying):
(WebCore::AudioBufferSourceNode::looping):
(WebCore::AudioBufferSourceNode::setLooping):

  • Modules/webaudio/AudioBufferSourceNode.h:
  • Modules/webaudio/AudioContext.cpp:

(WebCore::AudioContext::AudioContext):
(WebCore::AudioContext::createBufferSource):
(WebCore::AudioContext::createMediaElementSource):
(WebCore::AudioContext::createMediaStreamDestination):
(WebCore::AudioContext::createScriptProcessor):
(WebCore::AudioContext::createBiquadFilter):
(WebCore::AudioContext::createWaveShaper):
(WebCore::AudioContext::createPanner):
(WebCore::AudioContext::createConvolver):
(WebCore::AudioContext::createDynamicsCompressor):
(WebCore::AudioContext::createAnalyser):
(WebCore::AudioContext::createGain):
(WebCore::AudioContext::createDelay):
(WebCore::AudioContext::createChannelSplitter):
(WebCore::AudioContext::createChannelMerger):
(WebCore::AudioContext::createOscillator):

  • Modules/webaudio/AudioContext.h:

(WebCore::operator==):
(WebCore::operator!=):

  • Modules/webaudio/AudioDestinationNode.cpp:

(WebCore::AudioDestinationNode::AudioDestinationNode):
(WebCore::AudioDestinationNode::render):
(WebCore::AudioDestinationNode::updateIsEffectivelyPlayingAudio):

  • Modules/webaudio/AudioDestinationNode.h:
  • Modules/webaudio/AudioNode.cpp:

(WebCore::AudioNode::AudioNode):
(WebCore::AudioNode::connect):
(WebCore::AudioNode::disconnect):
(WebCore::AudioNode::setChannelCount):
(WebCore::AudioNode::setChannelCountMode):
(WebCore::AudioNode::setChannelInterpretation):
(WebCore::AudioNode::scriptExecutionContext):
(WebCore::AudioNode::processIfNecessary):
(WebCore::AudioNode::checkNumberOfChannelsForInput):
(WebCore::AudioNode::propagatesSilence):
(WebCore::AudioNode::pullInputs):
(WebCore::AudioNode::enableOutputsIfNecessary):
(WebCore::AudioNode::deref):
(WebCore::AudioNode::finishDeref):

  • Modules/webaudio/AudioNode.h:

(WebCore::AudioNode::context):

  • Modules/webaudio/AudioNodeInput.cpp:

(WebCore::AudioNodeInput::connect):
(WebCore::AudioNodeInput::disconnect):
(WebCore::AudioNodeInput::disable):
(WebCore::AudioNodeInput::enable):
(WebCore::AudioNodeInput::updateInternalBus):
(WebCore::AudioNodeInput::bus):
(WebCore::AudioNodeInput::internalSummingBus):
(WebCore::AudioNodeInput::sumAllConnections):
(WebCore::AudioNodeInput::pull):

  • Modules/webaudio/AudioNodeOutput.cpp:

(WebCore::AudioNodeOutput::setNumberOfChannels):
(WebCore::AudioNodeOutput::updateNumberOfChannels):
(WebCore::AudioNodeOutput::propagateChannelCount):
(WebCore::AudioNodeOutput::pull):
(WebCore::AudioNodeOutput::bus):
(WebCore::AudioNodeOutput::fanOutCount):
(WebCore::AudioNodeOutput::paramFanOutCount):
(WebCore::AudioNodeOutput::addInput):
(WebCore::AudioNodeOutput::removeInput):
(WebCore::AudioNodeOutput::disconnectAllInputs):
(WebCore::AudioNodeOutput::addParam):
(WebCore::AudioNodeOutput::removeParam):
(WebCore::AudioNodeOutput::disconnectAllParams):
(WebCore::AudioNodeOutput::disable):
(WebCore::AudioNodeOutput::enable):

  • Modules/webaudio/AudioNodeOutput.h:

(WebCore::AudioNodeOutput::context):

  • Modules/webaudio/AudioParam.cpp:

(WebCore::AudioParam::value):
(WebCore::AudioParam::smooth):
(WebCore::AudioParam::calculateSampleAccurateValues):
(WebCore::AudioParam::calculateFinalValues):
(WebCore::AudioParam::calculateTimelineValues):
(WebCore::AudioParam::connect):
(WebCore::AudioParam::disconnect):

  • Modules/webaudio/AudioParam.h:

(WebCore::AudioParam::create):
(WebCore::AudioParam::AudioParam):

  • Modules/webaudio/AudioParamTimeline.cpp:

(WebCore::AudioParamTimeline::valueForContextTime):

  • Modules/webaudio/AudioParamTimeline.h:
  • Modules/webaudio/AudioScheduledSourceNode.cpp:

(WebCore::AudioScheduledSourceNode::AudioScheduledSourceNode):
(WebCore::AudioScheduledSourceNode::updateSchedulingInfo):
(WebCore::AudioScheduledSourceNode::start):
(WebCore::AudioScheduledSourceNode::finish):

  • Modules/webaudio/AudioScheduledSourceNode.h:
  • Modules/webaudio/AudioSummingJunction.cpp:

(WebCore::AudioSummingJunction::AudioSummingJunction):
(WebCore::AudioSummingJunction::~AudioSummingJunction):
(WebCore::AudioSummingJunction::changedOutputs):
(WebCore::AudioSummingJunction::updateRenderingState):

  • Modules/webaudio/AudioSummingJunction.h:

(WebCore::AudioSummingJunction::context):

  • Modules/webaudio/BiquadFilterNode.cpp:

(WebCore::BiquadFilterNode::BiquadFilterNode):

  • Modules/webaudio/BiquadFilterNode.h:

(WebCore::BiquadFilterNode::create):

  • Modules/webaudio/BiquadProcessor.cpp:

(WebCore::BiquadProcessor::BiquadProcessor):

  • Modules/webaudio/BiquadProcessor.h:
  • Modules/webaudio/ChannelMergerNode.cpp:

(WebCore::ChannelMergerNode::create):
(WebCore::ChannelMergerNode::ChannelMergerNode):
(WebCore::ChannelMergerNode::checkNumberOfChannelsForInput):

  • Modules/webaudio/ChannelMergerNode.h:
  • Modules/webaudio/ChannelSplitterNode.cpp:

(WebCore::ChannelSplitterNode::create):
(WebCore::ChannelSplitterNode::ChannelSplitterNode):

  • Modules/webaudio/ChannelSplitterNode.h:
  • Modules/webaudio/ConvolverNode.cpp:

(WebCore::ConvolverNode::ConvolverNode):
(WebCore::ConvolverNode::setBuffer):

  • Modules/webaudio/ConvolverNode.h:

(WebCore::ConvolverNode::create):

  • Modules/webaudio/DefaultAudioDestinationNode.cpp:

(WebCore::DefaultAudioDestinationNode::DefaultAudioDestinationNode):
(WebCore::DefaultAudioDestinationNode::resume):
(WebCore::DefaultAudioDestinationNode::suspend):
(WebCore::DefaultAudioDestinationNode::close):

  • Modules/webaudio/DefaultAudioDestinationNode.h:

(WebCore::DefaultAudioDestinationNode::create):

  • Modules/webaudio/DelayNode.cpp:

(WebCore::DelayNode::DelayNode):

  • Modules/webaudio/DelayNode.h:

(WebCore::DelayNode::create):

  • Modules/webaudio/DelayProcessor.cpp:

(WebCore::DelayProcessor::DelayProcessor):

  • Modules/webaudio/DelayProcessor.h:
  • Modules/webaudio/DynamicsCompressorNode.cpp:

(WebCore::DynamicsCompressorNode::DynamicsCompressorNode):

  • Modules/webaudio/DynamicsCompressorNode.h:

(WebCore::DynamicsCompressorNode::create):

  • Modules/webaudio/GainNode.cpp:

(WebCore::GainNode::GainNode):
(WebCore::GainNode::checkNumberOfChannelsForInput):

  • Modules/webaudio/GainNode.h:

(WebCore::GainNode::create):

  • Modules/webaudio/MediaElementAudioSourceNode.cpp:

(WebCore::MediaElementAudioSourceNode::create):
(WebCore::MediaElementAudioSourceNode::MediaElementAudioSourceNode):
(WebCore::MediaElementAudioSourceNode::setFormat):

  • Modules/webaudio/MediaElementAudioSourceNode.h:
  • Modules/webaudio/MediaStreamAudioDestinationNode.cpp:

(WebCore::MediaStreamAudioDestinationNode::create):
(WebCore::MediaStreamAudioDestinationNode::MediaStreamAudioDestinationNode):

  • Modules/webaudio/MediaStreamAudioDestinationNode.h:
  • Modules/webaudio/MediaStreamAudioSourceNode.cpp:

(WebCore::MediaStreamAudioSourceNode::MediaStreamAudioSourceNode):
(WebCore::MediaStreamAudioSourceNode::setFormat):

  • Modules/webaudio/OfflineAudioDestinationNode.cpp:

(WebCore::OfflineAudioDestinationNode::OfflineAudioDestinationNode):
(WebCore::OfflineAudioDestinationNode::offlineRender):
(WebCore::OfflineAudioDestinationNode::notifyComplete):

  • Modules/webaudio/OfflineAudioDestinationNode.h:

(WebCore::OfflineAudioDestinationNode::create):

  • Modules/webaudio/OscillatorNode.cpp:

(WebCore::OscillatorNode::create):
(WebCore::OscillatorNode::OscillatorNode):

  • Modules/webaudio/OscillatorNode.h:
  • Modules/webaudio/PannerNode.cpp:

(WebCore::PannerNode::PannerNode):
(WebCore::PannerNode::pullInputs):
(WebCore::PannerNode::process):
(WebCore::PannerNode::listener):
(WebCore::PannerNode::setPanningModel):

  • Modules/webaudio/PannerNode.h:

(WebCore::PannerNode::create):

  • Modules/webaudio/ScriptProcessorNode.cpp:

(WebCore::ScriptProcessorNode::create):
(WebCore::ScriptProcessorNode::ScriptProcessorNode):
(WebCore::ScriptProcessorNode::initialize):
(WebCore::ScriptProcessorNode::fireProcessEvent):

  • Modules/webaudio/ScriptProcessorNode.h:
  • Modules/webaudio/WaveShaperNode.cpp:

(WebCore::WaveShaperNode::WaveShaperNode):
(WebCore::WaveShaperNode::setOversample):

  • Modules/webaudio/WaveShaperNode.h:

(WebCore::WaveShaperNode::create):

Location:
trunk/Source/WebCore
Files:
61 edited

Legend:

Unmodified
Added
Removed
  • trunk/Source/WebCore/ChangeLog

    r196602 r196603  
     12016-02-15  Jer Noble  <jer.noble@apple.com>
     2
     3        Null-deref crash in DefaultAudioDestinationNode::suspend()
     4        https://bugs.webkit.org/show_bug.cgi?id=154248
     5
     6        Reviewed by Alex Christensen.
     7
     8        Drive-by fix: AudioContext should be a reference, not a pointer.
     9
     10        * Modules/webaudio/AnalyserNode.cpp:
     11        (WebCore::AnalyserNode::AnalyserNode):
     12        * Modules/webaudio/AnalyserNode.h:
     13        (WebCore::AnalyserNode::create):
     14        * Modules/webaudio/AudioBasicInspectorNode.cpp:
     15        (WebCore::AudioBasicInspectorNode::AudioBasicInspectorNode):
     16        (WebCore::AudioBasicInspectorNode::connect):
     17        (WebCore::AudioBasicInspectorNode::disconnect):
     18        (WebCore::AudioBasicInspectorNode::checkNumberOfChannelsForInput):
     19        (WebCore::AudioBasicInspectorNode::updatePullStatus):
     20        * Modules/webaudio/AudioBasicInspectorNode.h:
     21        * Modules/webaudio/AudioBasicProcessorNode.cpp:
     22        (WebCore::AudioBasicProcessorNode::AudioBasicProcessorNode):
     23        (WebCore::AudioBasicProcessorNode::checkNumberOfChannelsForInput):
     24        * Modules/webaudio/AudioBasicProcessorNode.h:
     25        * Modules/webaudio/AudioBufferSourceNode.cpp:
     26        (WebCore::AudioBufferSourceNode::create):
     27        (WebCore::AudioBufferSourceNode::AudioBufferSourceNode):
     28        (WebCore::AudioBufferSourceNode::renderFromBuffer):
     29        (WebCore::AudioBufferSourceNode::setBuffer):
     30        (WebCore::AudioBufferSourceNode::startPlaying):
     31        (WebCore::AudioBufferSourceNode::looping):
     32        (WebCore::AudioBufferSourceNode::setLooping):
     33        * Modules/webaudio/AudioBufferSourceNode.h:
     34        * Modules/webaudio/AudioContext.cpp:
     35        (WebCore::AudioContext::AudioContext):
     36        (WebCore::AudioContext::createBufferSource):
     37        (WebCore::AudioContext::createMediaElementSource):
     38        (WebCore::AudioContext::createMediaStreamDestination):
     39        (WebCore::AudioContext::createScriptProcessor):
     40        (WebCore::AudioContext::createBiquadFilter):
     41        (WebCore::AudioContext::createWaveShaper):
     42        (WebCore::AudioContext::createPanner):
     43        (WebCore::AudioContext::createConvolver):
     44        (WebCore::AudioContext::createDynamicsCompressor):
     45        (WebCore::AudioContext::createAnalyser):
     46        (WebCore::AudioContext::createGain):
     47        (WebCore::AudioContext::createDelay):
     48        (WebCore::AudioContext::createChannelSplitter):
     49        (WebCore::AudioContext::createChannelMerger):
     50        (WebCore::AudioContext::createOscillator):
     51        * Modules/webaudio/AudioContext.h:
     52        (WebCore::operator==):
     53        (WebCore::operator!=):
     54        * Modules/webaudio/AudioDestinationNode.cpp:
     55        (WebCore::AudioDestinationNode::AudioDestinationNode):
     56        (WebCore::AudioDestinationNode::render):
     57        (WebCore::AudioDestinationNode::updateIsEffectivelyPlayingAudio):
     58        * Modules/webaudio/AudioDestinationNode.h:
     59        * Modules/webaudio/AudioNode.cpp:
     60        (WebCore::AudioNode::AudioNode):
     61        (WebCore::AudioNode::connect):
     62        (WebCore::AudioNode::disconnect):
     63        (WebCore::AudioNode::setChannelCount):
     64        (WebCore::AudioNode::setChannelCountMode):
     65        (WebCore::AudioNode::setChannelInterpretation):
     66        (WebCore::AudioNode::scriptExecutionContext):
     67        (WebCore::AudioNode::processIfNecessary):
     68        (WebCore::AudioNode::checkNumberOfChannelsForInput):
     69        (WebCore::AudioNode::propagatesSilence):
     70        (WebCore::AudioNode::pullInputs):
     71        (WebCore::AudioNode::enableOutputsIfNecessary):
     72        (WebCore::AudioNode::deref):
     73        (WebCore::AudioNode::finishDeref):
     74        * Modules/webaudio/AudioNode.h:
     75        (WebCore::AudioNode::context):
     76        * Modules/webaudio/AudioNodeInput.cpp:
     77        (WebCore::AudioNodeInput::connect):
     78        (WebCore::AudioNodeInput::disconnect):
     79        (WebCore::AudioNodeInput::disable):
     80        (WebCore::AudioNodeInput::enable):
     81        (WebCore::AudioNodeInput::updateInternalBus):
     82        (WebCore::AudioNodeInput::bus):
     83        (WebCore::AudioNodeInput::internalSummingBus):
     84        (WebCore::AudioNodeInput::sumAllConnections):
     85        (WebCore::AudioNodeInput::pull):
     86        * Modules/webaudio/AudioNodeOutput.cpp:
     87        (WebCore::AudioNodeOutput::setNumberOfChannels):
     88        (WebCore::AudioNodeOutput::updateNumberOfChannels):
     89        (WebCore::AudioNodeOutput::propagateChannelCount):
     90        (WebCore::AudioNodeOutput::pull):
     91        (WebCore::AudioNodeOutput::bus):
     92        (WebCore::AudioNodeOutput::fanOutCount):
     93        (WebCore::AudioNodeOutput::paramFanOutCount):
     94        (WebCore::AudioNodeOutput::addInput):
     95        (WebCore::AudioNodeOutput::removeInput):
     96        (WebCore::AudioNodeOutput::disconnectAllInputs):
     97        (WebCore::AudioNodeOutput::addParam):
     98        (WebCore::AudioNodeOutput::removeParam):
     99        (WebCore::AudioNodeOutput::disconnectAllParams):
     100        (WebCore::AudioNodeOutput::disable):
     101        (WebCore::AudioNodeOutput::enable):
     102        * Modules/webaudio/AudioNodeOutput.h:
     103        (WebCore::AudioNodeOutput::context):
     104        * Modules/webaudio/AudioParam.cpp:
     105        (WebCore::AudioParam::value):
     106        (WebCore::AudioParam::smooth):
     107        (WebCore::AudioParam::calculateSampleAccurateValues):
     108        (WebCore::AudioParam::calculateFinalValues):
     109        (WebCore::AudioParam::calculateTimelineValues):
     110        (WebCore::AudioParam::connect):
     111        (WebCore::AudioParam::disconnect):
     112        * Modules/webaudio/AudioParam.h:
     113        (WebCore::AudioParam::create):
     114        (WebCore::AudioParam::AudioParam):
     115        * Modules/webaudio/AudioParamTimeline.cpp:
     116        (WebCore::AudioParamTimeline::valueForContextTime):
     117        * Modules/webaudio/AudioParamTimeline.h:
     118        * Modules/webaudio/AudioScheduledSourceNode.cpp:
     119        (WebCore::AudioScheduledSourceNode::AudioScheduledSourceNode):
     120        (WebCore::AudioScheduledSourceNode::updateSchedulingInfo):
     121        (WebCore::AudioScheduledSourceNode::start):
     122        (WebCore::AudioScheduledSourceNode::finish):
     123        * Modules/webaudio/AudioScheduledSourceNode.h:
     124        * Modules/webaudio/AudioSummingJunction.cpp:
     125        (WebCore::AudioSummingJunction::AudioSummingJunction):
     126        (WebCore::AudioSummingJunction::~AudioSummingJunction):
     127        (WebCore::AudioSummingJunction::changedOutputs):
     128        (WebCore::AudioSummingJunction::updateRenderingState):
     129        * Modules/webaudio/AudioSummingJunction.h:
     130        (WebCore::AudioSummingJunction::context):
     131        * Modules/webaudio/BiquadFilterNode.cpp:
     132        (WebCore::BiquadFilterNode::BiquadFilterNode):
     133        * Modules/webaudio/BiquadFilterNode.h:
     134        (WebCore::BiquadFilterNode::create):
     135        * Modules/webaudio/BiquadProcessor.cpp:
     136        (WebCore::BiquadProcessor::BiquadProcessor):
     137        * Modules/webaudio/BiquadProcessor.h:
     138        * Modules/webaudio/ChannelMergerNode.cpp:
     139        (WebCore::ChannelMergerNode::create):
     140        (WebCore::ChannelMergerNode::ChannelMergerNode):
     141        (WebCore::ChannelMergerNode::checkNumberOfChannelsForInput):
     142        * Modules/webaudio/ChannelMergerNode.h:
     143        * Modules/webaudio/ChannelSplitterNode.cpp:
     144        (WebCore::ChannelSplitterNode::create):
     145        (WebCore::ChannelSplitterNode::ChannelSplitterNode):
     146        * Modules/webaudio/ChannelSplitterNode.h:
     147        * Modules/webaudio/ConvolverNode.cpp:
     148        (WebCore::ConvolverNode::ConvolverNode):
     149        (WebCore::ConvolverNode::setBuffer):
     150        * Modules/webaudio/ConvolverNode.h:
     151        (WebCore::ConvolverNode::create):
     152        * Modules/webaudio/DefaultAudioDestinationNode.cpp:
     153        (WebCore::DefaultAudioDestinationNode::DefaultAudioDestinationNode):
     154        (WebCore::DefaultAudioDestinationNode::resume):
     155        (WebCore::DefaultAudioDestinationNode::suspend):
     156        (WebCore::DefaultAudioDestinationNode::close):
     157        * Modules/webaudio/DefaultAudioDestinationNode.h:
     158        (WebCore::DefaultAudioDestinationNode::create):
     159        * Modules/webaudio/DelayNode.cpp:
     160        (WebCore::DelayNode::DelayNode):
     161        * Modules/webaudio/DelayNode.h:
     162        (WebCore::DelayNode::create):
     163        * Modules/webaudio/DelayProcessor.cpp:
     164        (WebCore::DelayProcessor::DelayProcessor):
     165        * Modules/webaudio/DelayProcessor.h:
     166        * Modules/webaudio/DynamicsCompressorNode.cpp:
     167        (WebCore::DynamicsCompressorNode::DynamicsCompressorNode):
     168        * Modules/webaudio/DynamicsCompressorNode.h:
     169        (WebCore::DynamicsCompressorNode::create):
     170        * Modules/webaudio/GainNode.cpp:
     171        (WebCore::GainNode::GainNode):
     172        (WebCore::GainNode::checkNumberOfChannelsForInput):
     173        * Modules/webaudio/GainNode.h:
     174        (WebCore::GainNode::create):
     175        * Modules/webaudio/MediaElementAudioSourceNode.cpp:
     176        (WebCore::MediaElementAudioSourceNode::create):
     177        (WebCore::MediaElementAudioSourceNode::MediaElementAudioSourceNode):
     178        (WebCore::MediaElementAudioSourceNode::setFormat):
     179        * Modules/webaudio/MediaElementAudioSourceNode.h:
     180        * Modules/webaudio/MediaStreamAudioDestinationNode.cpp:
     181        (WebCore::MediaStreamAudioDestinationNode::create):
     182        (WebCore::MediaStreamAudioDestinationNode::MediaStreamAudioDestinationNode):
     183        * Modules/webaudio/MediaStreamAudioDestinationNode.h:
     184        * Modules/webaudio/MediaStreamAudioSourceNode.cpp:
     185        (WebCore::MediaStreamAudioSourceNode::MediaStreamAudioSourceNode):
     186        (WebCore::MediaStreamAudioSourceNode::setFormat):
     187        * Modules/webaudio/OfflineAudioDestinationNode.cpp:
     188        (WebCore::OfflineAudioDestinationNode::OfflineAudioDestinationNode):
     189        (WebCore::OfflineAudioDestinationNode::offlineRender):
     190        (WebCore::OfflineAudioDestinationNode::notifyComplete):
     191        * Modules/webaudio/OfflineAudioDestinationNode.h:
     192        (WebCore::OfflineAudioDestinationNode::create):
     193        * Modules/webaudio/OscillatorNode.cpp:
     194        (WebCore::OscillatorNode::create):
     195        (WebCore::OscillatorNode::OscillatorNode):
     196        * Modules/webaudio/OscillatorNode.h:
     197        * Modules/webaudio/PannerNode.cpp:
     198        (WebCore::PannerNode::PannerNode):
     199        (WebCore::PannerNode::pullInputs):
     200        (WebCore::PannerNode::process):
     201        (WebCore::PannerNode::listener):
     202        (WebCore::PannerNode::setPanningModel):
     203        * Modules/webaudio/PannerNode.h:
     204        (WebCore::PannerNode::create):
     205        * Modules/webaudio/ScriptProcessorNode.cpp:
     206        (WebCore::ScriptProcessorNode::create):
     207        (WebCore::ScriptProcessorNode::ScriptProcessorNode):
     208        (WebCore::ScriptProcessorNode::initialize):
     209        (WebCore::ScriptProcessorNode::fireProcessEvent):
     210        * Modules/webaudio/ScriptProcessorNode.h:
     211        * Modules/webaudio/WaveShaperNode.cpp:
     212        (WebCore::WaveShaperNode::WaveShaperNode):
     213        (WebCore::WaveShaperNode::setOversample):
     214        * Modules/webaudio/WaveShaperNode.h:
     215        (WebCore::WaveShaperNode::create):
     216
    12172016-02-15  Jer Noble  <jer.noble@apple.com>
    2218
  • trunk/Source/WebCore/Modules/webaudio/AnalyserNode.cpp

    r155112 r196603  
    3535namespace WebCore {
    3636
    37 AnalyserNode::AnalyserNode(AudioContext* context, float sampleRate)
     37AnalyserNode::AnalyserNode(AudioContext& context, float sampleRate)
    3838    : AudioBasicInspectorNode(context, sampleRate, 2)
    3939{
  • trunk/Source/WebCore/Modules/webaudio/AnalyserNode.h

    r177733 r196603  
    3434class AnalyserNode : public AudioBasicInspectorNode {
    3535public:
    36     static Ref<AnalyserNode> create(AudioContext* context, float sampleRate)
     36    static Ref<AnalyserNode> create(AudioContext& context, float sampleRate)
    3737    {
    3838        return adoptRef(*new AnalyserNode(context, sampleRate));
     
    6868    virtual double latencyTime() const override { return 0; }
    6969
    70     AnalyserNode(AudioContext*, float sampleRate);
     70    AnalyserNode(AudioContext&, float sampleRate);
    7171
    7272    RealtimeAnalyser m_analyser;
  • trunk/Source/WebCore/Modules/webaudio/AudioBasicInspectorNode.cpp

    r162368 r196603  
    3535namespace WebCore {
    3636
    37 AudioBasicInspectorNode::AudioBasicInspectorNode(AudioContext* context, float sampleRate, unsigned outputChannelCount)
     37AudioBasicInspectorNode::AudioBasicInspectorNode(AudioContext& context, float sampleRate, unsigned outputChannelCount)
    3838    : AudioNode(context, sampleRate)
    3939    , m_needAutomaticPull(false)
     
    5656    ASSERT(isMainThread());
    5757
    58     AudioContext::AutoLocker locker(*context());
     58    AudioContext::AutoLocker locker(context());
    5959
    6060    AudioNode::connect(destination, outputIndex, inputIndex, ec);
     
    6666    ASSERT(isMainThread());
    6767
    68     AudioContext::AutoLocker locker(*context());
     68    AudioContext::AutoLocker locker(context());
    6969
    7070    AudioNode::disconnect(outputIndex, ec);
     
    7474void AudioBasicInspectorNode::checkNumberOfChannelsForInput(AudioNodeInput* input)
    7575{
    76     ASSERT(context()->isAudioThread() && context()->isGraphOwner());
     76    ASSERT(context().isAudioThread() && context().isGraphOwner());
    7777
    7878    ASSERT(input == this->input(0));
     
    9494void AudioBasicInspectorNode::updatePullStatus()
    9595{
    96     ASSERT(context()->isGraphOwner());
     96    ASSERT(context().isGraphOwner());
    9797
    9898    if (output(0)->isConnected()) {
     
    100100        // downstream node, thus remove it from the context's automatic pull list.
    101101        if (m_needAutomaticPull) {
    102             context()->removeAutomaticPullNode(this);
     102            context().removeAutomaticPullNode(this);
    103103            m_needAutomaticPull = false;
    104104        }
     
    108108            // When an AudioBasicInspectorNode is not connected to any downstream node while still connected from
    109109            // upstream node(s), add it to the context's automatic pull list.
    110             context()->addAutomaticPullNode(this);
     110            context().addAutomaticPullNode(this);
    111111            m_needAutomaticPull = true;
    112112        } else if (!numberOfInputConnections && m_needAutomaticPull) {
    113113            // The AudioBasicInspectorNode is connected to nothing, remove it from the context's automatic pull list.
    114             context()->removeAutomaticPullNode(this);
     114            context().removeAutomaticPullNode(this);
    115115            m_needAutomaticPull = false;
    116116        }
  • trunk/Source/WebCore/Modules/webaudio/AudioBasicInspectorNode.h

    r162139 r196603  
    3535class AudioBasicInspectorNode : public AudioNode {
    3636public:
    37     AudioBasicInspectorNode(AudioContext*, float sampleRate, unsigned outputChannelCount);
     37    AudioBasicInspectorNode(AudioContext&, float sampleRate, unsigned outputChannelCount);
    3838
    3939    // AudioNode
  • trunk/Source/WebCore/Modules/webaudio/AudioBasicProcessorNode.cpp

    r162368 r196603  
    3737namespace WebCore {
    3838
    39 AudioBasicProcessorNode::AudioBasicProcessorNode(AudioContext* context, float sampleRate)
     39AudioBasicProcessorNode::AudioBasicProcessorNode(AudioContext& context, float sampleRate)
    4040    : AudioNode(context, sampleRate)
    4141{
     
    103103void AudioBasicProcessorNode::checkNumberOfChannelsForInput(AudioNodeInput* input)
    104104{
    105     ASSERT(context()->isAudioThread() && context()->isGraphOwner());
     105    ASSERT(context().isAudioThread() && context().isGraphOwner());
    106106   
    107107    ASSERT(input == this->input(0));
  • trunk/Source/WebCore/Modules/webaudio/AudioBasicProcessorNode.h

    r162368 r196603  
    4141class AudioBasicProcessorNode : public AudioNode {
    4242public:
    43     AudioBasicProcessorNode(AudioContext*, float sampleRate);
     43    AudioBasicProcessorNode(AudioContext&, float sampleRate);
    4444
    4545    // AudioNode
  • trunk/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.cpp

    r188642 r196603  
    5050const double MaxRate = 1024;
    5151
    52 Ref<AudioBufferSourceNode> AudioBufferSourceNode::create(AudioContext* context, float sampleRate)
     52Ref<AudioBufferSourceNode> AudioBufferSourceNode::create(AudioContext& context, float sampleRate)
    5353{
    5454    return adoptRef(*new AudioBufferSourceNode(context, sampleRate));
    5555}
    5656
    57 AudioBufferSourceNode::AudioBufferSourceNode(AudioContext* context, float sampleRate)
     57AudioBufferSourceNode::AudioBufferSourceNode(AudioContext& context, float sampleRate)
    5858    : AudioScheduledSourceNode(context, sampleRate)
    5959    , m_buffer(nullptr)
     
    161161bool AudioBufferSourceNode::renderFromBuffer(AudioBus* bus, unsigned destinationFrameOffset, size_t numberOfFrames)
    162162{
    163     ASSERT(context()->isAudioThread());
     163    ASSERT(context().isAudioThread());
    164164
    165165    // Basic sanity checking
     
    414414   
    415415    // The context must be locked since changing the buffer can re-configure the number of channels that are output.
    416     AudioContext::AutoLocker contextLocker(*context());
     416    AudioContext::AutoLocker contextLocker(context());
    417417   
    418418    // This synchronizes with process().
     
    470470    ASSERT(isMainThread());
    471471
    472     context()->nodeWillBeginPlayback();
     472    context().nodeWillBeginPlayback();
    473473
    474474    if (m_playbackState != UNSCHEDULED_STATE) {
     
    562562{
    563563    static bool firstTime = true;
    564     if (firstTime && context() && context()->scriptExecutionContext()) {
    565         context()->scriptExecutionContext()->addConsoleMessage(MessageSource::JS, MessageLevel::Warning, ASCIILiteral("AudioBufferSourceNode 'looping' attribute is deprecated.  Use 'loop' instead."));
     564    if (firstTime && context().scriptExecutionContext()) {
     565        context().scriptExecutionContext()->addConsoleMessage(MessageSource::JS, MessageLevel::Warning, ASCIILiteral("AudioBufferSourceNode 'looping' attribute is deprecated.  Use 'loop' instead."));
    566566        firstTime = false;
    567567    }
     
    573573{
    574574    static bool firstTime = true;
    575     if (firstTime && context() && context()->scriptExecutionContext()) {
    576         context()->scriptExecutionContext()->addConsoleMessage(MessageSource::JS, MessageLevel::Warning, ASCIILiteral("AudioBufferSourceNode 'looping' attribute is deprecated.  Use 'loop' instead."));
     575    if (firstTime && context().scriptExecutionContext()) {
     576        context().scriptExecutionContext()->addConsoleMessage(MessageSource::JS, MessageLevel::Warning, ASCIILiteral("AudioBufferSourceNode 'looping' attribute is deprecated.  Use 'loop' instead."));
    577577        firstTime = false;
    578578    }
  • trunk/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.h

    r188642 r196603  
    4646class AudioBufferSourceNode : public AudioScheduledSourceNode {
    4747public:
    48     static Ref<AudioBufferSourceNode> create(AudioContext*, float sampleRate);
     48    static Ref<AudioBufferSourceNode> create(AudioContext&, float sampleRate);
    4949
    5050    virtual ~AudioBufferSourceNode();
     
    103103
    104104private:
    105     AudioBufferSourceNode(AudioContext*, float sampleRate);
     105    AudioBufferSourceNode(AudioContext&, float sampleRate);
    106106
    107107    virtual double tailTime() const override { return 0; }
  • trunk/Source/WebCore/Modules/webaudio/AudioContext.cpp

    r196130 r196603  
    137137    constructCommon();
    138138
    139     m_destinationNode = DefaultAudioDestinationNode::create(this);
     139    m_destinationNode = DefaultAudioDestinationNode::create(*this);
    140140
    141141    // Initialize the destination node's muted state to match the page's current muted state.
     
    155155    // Create a new destination for offline rendering.
    156156    m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
    157     m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTarget.get());
     157    m_destinationNode = OfflineAudioDestinationNode::create(*this, m_renderTarget.get());
    158158}
    159159
     
    419419    ASSERT(isMainThread());
    420420    lazyInitialize();
    421     RefPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_destinationNode->sampleRate());
     421    RefPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(*this, m_destinationNode->sampleRate());
    422422
    423423    // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
     
    446446    }
    447447       
    448     RefPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::create(this, mediaElement);
     448    RefPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::create(*this, mediaElement);
    449449
    450450    mediaElement->setAudioSourceNode(node.get());
     
    498498    // FIXME: Add support for an optional argument which specifies the number of channels.
    499499    // FIXME: The default should probably be stereo instead of mono.
    500     return MediaStreamAudioDestinationNode::create(this, 1);
     500    return MediaStreamAudioDestinationNode::create(*this, 1);
    501501}
    502502
     
    519519    ASSERT(isMainThread());
    520520    lazyInitialize();
    521     RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
     521    RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(*this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
    522522
    523523    if (!node.get()) {
     
    534534    ASSERT(isMainThread());
    535535    lazyInitialize();
    536     return BiquadFilterNode::create(this, m_destinationNode->sampleRate());
     536    return BiquadFilterNode::create(*this, m_destinationNode->sampleRate());
    537537}
    538538
     
    541541    ASSERT(isMainThread());
    542542    lazyInitialize();
    543     return WaveShaperNode::create(this);
     543    return WaveShaperNode::create(*this);
    544544}
    545545
     
    548548    ASSERT(isMainThread());
    549549    lazyInitialize();
    550     return PannerNode::create(this, m_destinationNode->sampleRate());
     550    return PannerNode::create(*this, m_destinationNode->sampleRate());
    551551}
    552552
     
    555555    ASSERT(isMainThread());
    556556    lazyInitialize();
    557     return ConvolverNode::create(this, m_destinationNode->sampleRate());
     557    return ConvolverNode::create(*this, m_destinationNode->sampleRate());
    558558}
    559559
     
    562562    ASSERT(isMainThread());
    563563    lazyInitialize();
    564     return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate());
     564    return DynamicsCompressorNode::create(*this, m_destinationNode->sampleRate());
    565565}
    566566
     
    569569    ASSERT(isMainThread());
    570570    lazyInitialize();
    571     return AnalyserNode::create(this, m_destinationNode->sampleRate());
     571    return AnalyserNode::create(*this, m_destinationNode->sampleRate());
    572572}
    573573
     
    576576    ASSERT(isMainThread());
    577577    lazyInitialize();
    578     return GainNode::create(this, m_destinationNode->sampleRate());
     578    return GainNode::create(*this, m_destinationNode->sampleRate());
    579579}
    580580
     
    589589    ASSERT(isMainThread());
    590590    lazyInitialize();
    591     RefPtr<DelayNode> node = DelayNode::create(this, m_destinationNode->sampleRate(), maxDelayTime, ec);
     591    RefPtr<DelayNode> node = DelayNode::create(*this, m_destinationNode->sampleRate(), maxDelayTime, ec);
    592592    if (ec)
    593593        return nullptr;
     
    606606    lazyInitialize();
    607607
    608     RefPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_destinationNode->sampleRate(), numberOfOutputs);
     608    RefPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(*this, m_destinationNode->sampleRate(), numberOfOutputs);
    609609
    610610    if (!node.get()) {
     
    627627    lazyInitialize();
    628628
    629     RefPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinationNode->sampleRate(), numberOfInputs);
     629    RefPtr<ChannelMergerNode> node = ChannelMergerNode::create(*this, m_destinationNode->sampleRate(), numberOfInputs);
    630630
    631631    if (!node.get()) {
     
    642642    lazyInitialize();
    643643
    644     RefPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode->sampleRate());
     644    RefPtr<OscillatorNode> node = OscillatorNode::create(*this, m_destinationNode->sampleRate());
    645645
    646646    // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
  • trunk/Source/WebCore/Modules/webaudio/AudioContext.h

    r192848 r196603  
    399399};
    400400
     401inline bool operator==(const AudioContext& lhs, const AudioContext& rhs) {
     402    return &lhs == &rhs;
     403}
     404
     405inline bool operator!=(const AudioContext& lhs, const AudioContext& rhs) {
     406    return &lhs != &rhs;
     407}
     408
    401409} // WebCore
    402410
  • trunk/Source/WebCore/Modules/webaudio/AudioDestinationNode.cpp

    r190115 r196603  
    3737namespace WebCore {
    3838   
    39 AudioDestinationNode::AudioDestinationNode(AudioContext* context, float sampleRate)
     39AudioDestinationNode::AudioDestinationNode(AudioContext& context, float sampleRate)
    4040    : AudioNode(context, sampleRate)
    4141    , m_currentSampleFrame(0)
     
    6161    DenormalDisabler denormalDisabler;
    6262   
    63     context()->setAudioThread(currentThread());
     63    context().setAudioThread(currentThread());
    6464   
    65     if (!context()->isInitialized()) {
     65    if (!context().isInitialized()) {
    6666        destinationBus->zero();
    6767        setIsSilent(true);
     
    7070
    7171    // Let the context take care of any business at the start of each render quantum.
    72     context()->handlePreRenderTasks();
     72    context().handlePreRenderTasks();
    7373
    7474    // This will cause the node(s) connected to us to process, which in turn will pull on their input(s),
     
    8484
    8585    // Process nodes which need a little extra help because they are not connected to anything, but still need to process.
    86     context()->processAutomaticPullNodes(numberOfFrames);
     86    context().processAutomaticPullNodes(numberOfFrames);
    8787
    8888    // Let the context take care of any business at the end of each render quantum.
    89     context()->handlePostRenderTasks();
     89    context().handlePostRenderTasks();
    9090   
    9191    // Advance current sample-frame.
     
    121121
    122122    m_isEffectivelyPlayingAudio = isEffectivelyPlayingAudio;
    123     if (context())
    124         context()->isPlayingAudioDidChange();
     123    context().isPlayingAudioDidChange();
    125124}
    126125
  • trunk/Source/WebCore/Modules/webaudio/AudioDestinationNode.h

    r190115 r196603  
    3939class AudioDestinationNode : public AudioNode, public AudioIOCallback {
    4040public:
    41     AudioDestinationNode(AudioContext*, float sampleRate);
     41    AudioDestinationNode(AudioContext&, float sampleRate);
    4242    virtual ~AudioDestinationNode();
    4343   
  • trunk/Source/WebCore/Modules/webaudio/AudioNode.cpp

    r194496 r196603  
    4343namespace WebCore {
    4444
    45 AudioNode::AudioNode(AudioContext* context, float sampleRate)
     45AudioNode::AudioNode(AudioContext& context, float sampleRate)
    4646    : m_isInitialized(false)
    4747    , m_nodeType(NodeTypeUnknown)
     
    127127{
    128128    ASSERT(isMainThread());
    129     AudioContext::AutoLocker locker(*context());
     129    AudioContext::AutoLocker locker(context());
    130130
    131131    if (!destination) {
     
    155155
    156156    // Let context know that a connection has been made.
    157     context()->incrementConnectionCount();
     157    context().incrementConnectionCount();
    158158}
    159159
     
    161161{
    162162    ASSERT(isMainThread());
    163     AudioContext::AutoLocker locker(*context());
     163    AudioContext::AutoLocker locker(context());
    164164
    165165    if (!param) {
     
    185185{
    186186    ASSERT(isMainThread());
    187     AudioContext::AutoLocker locker(*context());
     187    AudioContext::AutoLocker locker(context());
    188188
    189189    // Sanity check input and output indices.
     
    205205{
    206206    ASSERT(isMainThread());
    207     AudioContext::AutoLocker locker(*context());
     207    AudioContext::AutoLocker locker(context());
    208208
    209209    if (channelCount > 0 && channelCount <= AudioContext::maxNumberOfChannels()) {
     
    234234{
    235235    ASSERT(isMainThread());
    236     AudioContext::AutoLocker locker(*context());
     236    AudioContext::AutoLocker locker(context());
    237237
    238238    ChannelCountMode oldMode = m_channelCountMode;
     
    266266{
    267267    ASSERT(isMainThread());
    268     AudioContext::AutoLocker locker(*context());
     268    AudioContext::AutoLocker locker(context());
    269269
    270270    if (interpretation == "speakers")
     
    289289ScriptExecutionContext* AudioNode::scriptExecutionContext() const
    290290{
    291     return const_cast<AudioNode*>(this)->context()->scriptExecutionContext();
     291    return const_cast<AudioNode*>(this)->context().scriptExecutionContext();
    292292}
    293293
    294294void AudioNode::processIfNecessary(size_t framesToProcess)
    295295{
    296     ASSERT(context()->isAudioThread());
     296    ASSERT(context().isAudioThread());
    297297
    298298    if (!isInitialized())
     
    303303    // The first time we're called during this time slice we process, but after that we don't want to re-process,
    304304    // instead our output(s) will already have the results cached in their bus;
    305     double currentTime = context()->currentTime();
     305    double currentTime = context().currentTime();
    306306    if (m_lastProcessingTime != currentTime) {
    307307        m_lastProcessingTime = currentTime; // important to first update this time because of feedback loops in the rendering graph
     
    311311        bool silentInputs = inputsAreSilent();
    312312        if (!silentInputs)
    313             m_lastNonSilentTime = (context()->currentSampleFrame() + framesToProcess) / static_cast<double>(m_sampleRate);
     313            m_lastNonSilentTime = (context().currentSampleFrame() + framesToProcess) / static_cast<double>(m_sampleRate);
    314314
    315315        if (silentInputs && propagatesSilence())
     
    324324void AudioNode::checkNumberOfChannelsForInput(AudioNodeInput* input)
    325325{
    326     ASSERT(context()->isAudioThread() && context()->isGraphOwner());
     326    ASSERT(context().isAudioThread() && context().isGraphOwner());
    327327
    328328    for (auto& savedInput : m_inputs) {
     
    338338bool AudioNode::propagatesSilence() const
    339339{
    340     return m_lastNonSilentTime + latencyTime() + tailTime() < context()->currentTime();
     340    return m_lastNonSilentTime + latencyTime() + tailTime() < context().currentTime();
    341341}
    342342
    343343void AudioNode::pullInputs(size_t framesToProcess)
    344344{
    345     ASSERT(context()->isAudioThread());
     345    ASSERT(context().isAudioThread());
    346346   
    347347    // Process all of the AudioNodes connected to our inputs.
     
    375375    if (m_isDisabled && m_connectionRefCount > 0) {
    376376        ASSERT(isMainThread());
    377         AudioContext::AutoLocker locker(*context());
     377        AudioContext::AutoLocker locker(context());
    378378
    379379        m_isDisabled = false;
     
    440440    bool mustReleaseLock = false;
    441441   
    442     if (context()->isAudioThread()) {
     442    if (context().isAudioThread()) {
    443443        // Real-time audio thread must not contend lock (to avoid glitches).
    444         hasLock = context()->tryLock(mustReleaseLock);
     444        hasLock = context().tryLock(mustReleaseLock);
    445445    } else {
    446         context()->lock(mustReleaseLock);
     446        context().lock(mustReleaseLock);
    447447        hasLock = true;
    448448    }
     
    453453
    454454        if (mustReleaseLock)
    455             context()->unlock();
     455            context().unlock();
    456456    } else {
    457457        // We were unable to get the lock, so put this in a list to finish up later.
    458         ASSERT(context()->isAudioThread());
     458        ASSERT(context().isAudioThread());
    459459        ASSERT(refType == RefTypeConnection);
    460         context()->addDeferredFinishDeref(this);
     460        context().addDeferredFinishDeref(this);
    461461    }
    462462
     
    464464    // We can't call in AudioContext::~AudioContext() since it will never be called as long as any AudioNode is alive
    465465    // because AudioNodes keep a reference to the context.
    466     if (context()->isAudioThreadFinished())
    467         context()->deleteMarkedNodes();
     466    if (context().isAudioThreadFinished())
     467        context().deleteMarkedNodes();
    468468}
    469469
    470470void AudioNode::finishDeref(RefType refType)
    471471{
    472     ASSERT(context()->isGraphOwner());
     472    ASSERT(context().isGraphOwner());
    473473   
    474474    switch (refType) {
     
    497497
    498498                // Mark for deletion at end of each render quantum or when context shuts down.
    499                 context()->markForDeletion(this);
     499                context().markForDeletion(this);
    500500                m_isMarkedForDeletion = true;
    501501            }
  • trunk/Source/WebCore/Modules/webaudio/AudioNode.h

    r162777 r196603  
    5454    enum { ProcessingSizeInFrames = 128 };
    5555
    56     AudioNode(AudioContext*, float sampleRate);
     56    AudioNode(AudioContext&, float sampleRate);
    5757    virtual ~AudioNode();
    5858
    59     AudioContext* context() { return m_context.get(); }
    60     const AudioContext* context() const { return m_context.get(); }
     59    AudioContext& context() { return m_context.get(); }
     60    const AudioContext& context() const { return m_context.get(); }
    6161
    6262    enum NodeType {
     
    199199    volatile bool m_isInitialized;
    200200    NodeType m_nodeType;
    201     RefPtr<AudioContext> m_context;
     201    Ref<AudioContext> m_context;
    202202    float m_sampleRate;
    203203    Vector<std::unique_ptr<AudioNodeInput>> m_inputs;
  • trunk/Source/WebCore/Modules/webaudio/AudioNodeInput.cpp

    r185316 r196603  
    4646void AudioNodeInput::connect(AudioNodeOutput* output)
    4747{
    48     ASSERT(context()->isGraphOwner());
     48    ASSERT(context().isGraphOwner());
    4949   
    5050    ASSERT(output && node());
     
    6565void AudioNodeInput::disconnect(AudioNodeOutput* output)
    6666{
    67     ASSERT(context()->isGraphOwner());
     67    ASSERT(context().isGraphOwner());
    6868
    6969    ASSERT(output && node());
     
    9191void AudioNodeInput::disable(AudioNodeOutput* output)
    9292{
    93     ASSERT(context()->isGraphOwner());
     93    ASSERT(context().isGraphOwner());
    9494
    9595    ASSERT(output && node());
     
    109109void AudioNodeInput::enable(AudioNodeOutput* output)
    110110{
    111     ASSERT(context()->isGraphOwner());
     111    ASSERT(context().isGraphOwner());
    112112
    113113    ASSERT(output && node());
     
    133133void AudioNodeInput::updateInternalBus()
    134134{
    135     ASSERT(context()->isAudioThread() && context()->isGraphOwner());
     135    ASSERT(context().isAudioThread() && context().isGraphOwner());
    136136
    137137    unsigned numberOfInputChannels = numberOfChannels();
     
    166166AudioBus* AudioNodeInput::bus()
    167167{
    168     ASSERT(context()->isAudioThread());
     168    ASSERT(context().isAudioThread());
    169169
    170170    // Handle single connection specially to allow for in-place processing.
     
    178178AudioBus* AudioNodeInput::internalSummingBus()
    179179{
    180     ASSERT(context()->isAudioThread());
     180    ASSERT(context().isAudioThread());
    181181
    182182    return m_internalSummingBus.get();
     
    185185void AudioNodeInput::sumAllConnections(AudioBus* summingBus, size_t framesToProcess)
    186186{
    187     ASSERT(context()->isAudioThread());
     187    ASSERT(context().isAudioThread());
    188188
    189189    // We shouldn't be calling this method if there's only one connection, since it's less efficient.
     
    211211AudioBus* AudioNodeInput::pull(AudioBus* inPlaceBus, size_t framesToProcess)
    212212{
    213     ASSERT(context()->isAudioThread());
     213    ASSERT(context().isAudioThread());
    214214
    215215    // Handle single connection case.
  • trunk/Source/WebCore/Modules/webaudio/AudioNodeOutput.cpp

    r185316 r196603  
    5454{
    5555    ASSERT(numberOfChannels <= AudioContext::maxNumberOfChannels());
    56     ASSERT(context()->isGraphOwner());
     56    ASSERT(context().isGraphOwner());
    5757
    5858    m_desiredNumberOfChannels = numberOfChannels;
    5959
    60     if (context()->isAudioThread()) {
     60    if (context().isAudioThread()) {
    6161        // If we're in the audio thread then we can take care of it right away (we should be at the very start or end of a rendering quantum).
    6262        updateNumberOfChannels();
    6363    } else {
    6464        // Let the context take care of it in the audio thread in the pre and post render tasks.
    65         context()->markAudioNodeOutputDirty(this);
     65        context().markAudioNodeOutputDirty(this);
    6666    }
    6767}
     
    8484void AudioNodeOutput::updateNumberOfChannels()
    8585{
    86     ASSERT(context()->isAudioThread() && context()->isGraphOwner());
     86    ASSERT(context().isAudioThread() && context().isGraphOwner());
    8787
    8888    if (m_numberOfChannels != m_desiredNumberOfChannels) {
     
    9595void AudioNodeOutput::propagateChannelCount()
    9696{
    97     ASSERT(context()->isAudioThread() && context()->isGraphOwner());
     97    ASSERT(context().isAudioThread() && context().isGraphOwner());
    9898   
    9999    if (isChannelCountKnown()) {
     
    108108AudioBus* AudioNodeOutput::pull(AudioBus* inPlaceBus, size_t framesToProcess)
    109109{
    110     ASSERT(context()->isAudioThread());
     110    ASSERT(context().isAudioThread());
    111111    ASSERT(m_renderingFanOutCount > 0 || m_renderingParamFanOutCount > 0);
    112112   
     
    127127AudioBus* AudioNodeOutput::bus() const
    128128{
    129     ASSERT(const_cast<AudioNodeOutput*>(this)->context()->isAudioThread());
     129    ASSERT(const_cast<AudioNodeOutput*>(this)->context().isAudioThread());
    130130    return m_isInPlace ? m_inPlaceBus.get() : m_internalBus.get();
    131131}
     
    133133unsigned AudioNodeOutput::fanOutCount()
    134134{
    135     ASSERT(context()->isGraphOwner());
     135    ASSERT(context().isGraphOwner());
    136136    return m_inputs.size();
    137137}
     
    139139unsigned AudioNodeOutput::paramFanOutCount()
    140140{
    141     ASSERT(context()->isGraphOwner());
     141    ASSERT(context().isGraphOwner());
    142142    return m_params.size();
    143143}
     
    155155void AudioNodeOutput::addInput(AudioNodeInput* input)
    156156{
    157     ASSERT(context()->isGraphOwner());
     157    ASSERT(context().isGraphOwner());
    158158
    159159    ASSERT(input);
     
    166166void AudioNodeOutput::removeInput(AudioNodeInput* input)
    167167{
    168     ASSERT(context()->isGraphOwner());
     168    ASSERT(context().isGraphOwner());
    169169
    170170    ASSERT(input);
     
    177177void AudioNodeOutput::disconnectAllInputs()
    178178{
    179     ASSERT(context()->isGraphOwner());
     179    ASSERT(context().isGraphOwner());
    180180   
    181181    // AudioNodeInput::disconnect() changes m_inputs by calling removeInput().
     
    188188void AudioNodeOutput::addParam(AudioParam* param)
    189189{
    190     ASSERT(context()->isGraphOwner());
     190    ASSERT(context().isGraphOwner());
    191191
    192192    ASSERT(param);
     
    199199void AudioNodeOutput::removeParam(AudioParam* param)
    200200{
    201     ASSERT(context()->isGraphOwner());
     201    ASSERT(context().isGraphOwner());
    202202
    203203    ASSERT(param);
     
    210210void AudioNodeOutput::disconnectAllParams()
    211211{
    212     ASSERT(context()->isGraphOwner());
     212    ASSERT(context().isGraphOwner());
    213213
    214214    // AudioParam::disconnect() changes m_params by calling removeParam().
     
    227227void AudioNodeOutput::disable()
    228228{
    229     ASSERT(context()->isGraphOwner());
     229    ASSERT(context().isGraphOwner());
    230230
    231231    if (m_isEnabled) {
     
    238238void AudioNodeOutput::enable()
    239239{
    240     ASSERT(context()->isGraphOwner());
     240    ASSERT(context().isGraphOwner());
    241241
    242242    if (!m_isEnabled) {
  • trunk/Source/WebCore/Modules/webaudio/AudioNodeOutput.h

    r157653 r196603  
    4848    // Can be called from any thread.
    4949    AudioNode* node() const { return m_node; }
    50     AudioContext* context() { return m_node->context(); }
     50    AudioContext& context() { return m_node->context(); }
    5151   
    5252    // Causes our AudioNode to process if it hasn't already for this render quantum.
  • trunk/Source/WebCore/Modules/webaudio/AudioParam.cpp

    r185316 r196603  
    4444{
    4545    // Update value for timeline.
    46     if (context() && context()->isAudioThread()) {
     46    if (context().isAudioThread()) {
    4747        bool hasValue;
    4848        float timelineValue = m_timeline.valueForContextTime(context(), narrowPrecisionToFloat(m_value), hasValue);
     
    7373    // Smoothing effectively is performed by the timeline.
    7474    bool useTimelineValue = false;
    75     if (context())
    76         m_value = m_timeline.valueForContextTime(context(), narrowPrecisionToFloat(m_value), useTimelineValue);
    77    
     75    m_value = m_timeline.valueForContextTime(context(), narrowPrecisionToFloat(m_value), useTimelineValue);
     76
    7877    if (m_smoothedValue == m_value) {
    7978        // Smoothed value has already approached and snapped to value.
     
    104103void AudioParam::calculateSampleAccurateValues(float* values, unsigned numberOfValues)
    105104{
    106     bool isSafe = context() && context()->isAudioThread() && values && numberOfValues;
     105    bool isSafe = context().isAudioThread() && values && numberOfValues;
    107106    ASSERT(isSafe);
    108107    if (!isSafe)
     
    114113void AudioParam::calculateFinalValues(float* values, unsigned numberOfValues, bool sampleAccurate)
    115114{
    116     bool isGood = context() && context()->isAudioThread() && values && numberOfValues;
     115    bool isGood = context().isAudioThread() && values && numberOfValues;
    117116    ASSERT(isGood);
    118117    if (!isGood)
     
    155154    // Calculate values for this render quantum.
    156155    // Normally numberOfValues will equal AudioNode::ProcessingSizeInFrames (the render quantum size).
    157     double sampleRate = context()->sampleRate();
    158     double startTime = context()->currentTime();
     156    double sampleRate = context().sampleRate();
     157    double startTime = context().currentTime();
    159158    double endTime = startTime + numberOfValues / sampleRate;
    160159
     
    166165void AudioParam::connect(AudioNodeOutput* output)
    167166{
    168     ASSERT(context()->isGraphOwner());
     167    ASSERT(context().isGraphOwner());
    169168
    170169    ASSERT(output);
     
    181180void AudioParam::disconnect(AudioNodeOutput* output)
    182181{
    183     ASSERT(context()->isGraphOwner());
     182    ASSERT(context().isGraphOwner());
    184183
    185184    ASSERT(output);
  • trunk/Source/WebCore/Modules/webaudio/AudioParam.h

    r177733 r196603  
    4848    static const double SnapThreshold;
    4949
    50     static Ref<AudioParam> create(AudioContext* context, const String& name, double defaultValue, double minValue, double maxValue, unsigned units = 0)
     50    static Ref<AudioParam> create(AudioContext& context, const String& name, double defaultValue, double minValue, double maxValue, unsigned units = 0)
    5151    {
    5252        return adoptRef(*new AudioParam(context, name, defaultValue, minValue, maxValue, units));
     
    104104
    105105protected:
    106     AudioParam(AudioContext* context, const String& name, double defaultValue, double minValue, double maxValue, unsigned units = 0)
     106    AudioParam(AudioContext& context, const String& name, double defaultValue, double minValue, double maxValue, unsigned units = 0)
    107107        : AudioSummingJunction(context)
    108108        , m_name(name)
  • trunk/Source/WebCore/Modules/webaudio/AudioParamTimeline.cpp

    r188642 r196603  
    114114}
    115115
    116 float AudioParamTimeline::valueForContextTime(AudioContext* context, float defaultValue, bool& hasValue)
    117 {
    118     ASSERT(context);
    119 
     116float AudioParamTimeline::valueForContextTime(AudioContext& context, float defaultValue, bool& hasValue)
     117{
    120118    {
    121119        std::unique_lock<Lock> lock(m_eventsMutex, std::try_to_lock);
    122         if (!lock.owns_lock() || !context || !m_events.size() || context->currentTime() < m_events[0].time()) {
     120        if (!lock.owns_lock() || !m_events.size() || context.currentTime() < m_events[0].time()) {
    123121            hasValue = false;
    124122            return defaultValue;
     
    128126    // Ask for just a single value.
    129127    float value;
    130     double sampleRate = context->sampleRate();
    131     double startTime = context->currentTime();
     128    double sampleRate = context.sampleRate();
     129    double startTime = context.currentTime();
    132130    double endTime = startTime + 1.1 / sampleRate; // time just beyond one sample-frame
    133131    double controlRate = sampleRate / AudioNode::ProcessingSizeInFrames; // one parameter change per render quantum
  • trunk/Source/WebCore/Modules/webaudio/AudioParamTimeline.h

    r188642 r196603  
    5454    // hasValue is set to true if a valid timeline value is returned.
    5555    // otherwise defaultValue is returned.
    56     float valueForContextTime(AudioContext*, float defaultValue, bool& hasValue);
     56    float valueForContextTime(AudioContext&, float defaultValue, bool& hasValue);
    5757
    5858    // Given the time range, calculates parameter values into the values buffer
  • trunk/Source/WebCore/Modules/webaudio/AudioScheduledSourceNode.cpp

    r194496 r196603  
    4444const double AudioScheduledSourceNode::UnknownTime = -1;
    4545
    46 AudioScheduledSourceNode::AudioScheduledSourceNode(AudioContext* context, float sampleRate)
     46AudioScheduledSourceNode::AudioScheduledSourceNode(AudioContext& context, float sampleRate)
    4747    : AudioNode(context, sampleRate)
    4848    , m_playbackState(UNSCHEDULED_STATE)
     
    7272    // startFrame            : Start frame for this source.
    7373    // endFrame              : End frame for this source.
    74     size_t quantumStartFrame = context()->currentSampleFrame();
     74    size_t quantumStartFrame = context().currentSampleFrame();
    7575    size_t quantumEndFrame = quantumStartFrame + quantumFrameSize;
    7676    size_t startFrame = AudioUtilities::timeToSampleFrame(m_startTime, sampleRate);
     
    9292        // Increment the active source count only if we're transitioning from SCHEDULED_STATE to PLAYING_STATE.
    9393        m_playbackState = PLAYING_STATE;
    94         context()->incrementActiveSourceCount();
     94        context().incrementActiveSourceCount();
    9595    }
    9696
     
    147147    ASSERT(isMainThread());
    148148
    149     context()->nodeWillBeginPlayback();
     149    context().nodeWillBeginPlayback();
    150150
    151151    if (m_playbackState != UNSCHEDULED_STATE) {
     
    200200    if (m_playbackState != FINISHED_STATE) {
    201201        // Let the context dereference this AudioNode.
    202         context()->notifyNodeFinishedProcessing(this);
     202        context().notifyNodeFinishedProcessing(this);
    203203        m_playbackState = FINISHED_STATE;
    204         context()->decrementActiveSourceCount();
     204        context().decrementActiveSourceCount();
    205205    }
    206206
  • trunk/Source/WebCore/Modules/webaudio/AudioScheduledSourceNode.h

    r189565 r196603  
    5656    };
    5757   
    58     AudioScheduledSourceNode(AudioContext*, float sampleRate);
     58    AudioScheduledSourceNode(AudioContext&, float sampleRate);
    5959
    6060    // Scheduling.
  • trunk/Source/WebCore/Modules/webaudio/AudioSummingJunction.cpp

    r185316 r196603  
    3535namespace WebCore {
    3636
    37 AudioSummingJunction::AudioSummingJunction(AudioContext* context)
     37AudioSummingJunction::AudioSummingJunction(AudioContext& context)
    3838    : m_context(context)
    3939    , m_renderingStateNeedUpdating(false)
     
    4343AudioSummingJunction::~AudioSummingJunction()
    4444{
    45     if (m_renderingStateNeedUpdating && m_context.get())
    46         m_context->removeMarkedSummingJunction(this);
     45    if (m_renderingStateNeedUpdating)
     46        context().removeMarkedSummingJunction(this);
    4747}
    4848
    4949void AudioSummingJunction::changedOutputs()
    5050{
    51     ASSERT(context()->isGraphOwner());
     51    ASSERT(context().isGraphOwner());
    5252    if (!m_renderingStateNeedUpdating && canUpdateState()) {
    53         context()->markSummingJunctionDirty(this);
     53        context().markSummingJunctionDirty(this);
    5454        m_renderingStateNeedUpdating = true;
    5555    }
     
    5858void AudioSummingJunction::updateRenderingState()
    5959{
    60     ASSERT(context()->isAudioThread() && context()->isGraphOwner());
     60    ASSERT(context().isAudioThread() && context().isGraphOwner());
    6161
    6262    if (m_renderingStateNeedUpdating && canUpdateState()) {
  • trunk/Source/WebCore/Modules/webaudio/AudioSummingJunction.h

    r123945 r196603  
    3939class AudioSummingJunction {
    4040public:
    41     explicit AudioSummingJunction(AudioContext*);
     41    explicit AudioSummingJunction(AudioContext&);
    4242    virtual ~AudioSummingJunction();
    4343
    4444    // Can be called from any thread.
    45     AudioContext* context() { return m_context.get(); }
     45    AudioContext& context() { return m_context.get(); }
    4646
    4747    // This must be called whenever we modify m_outputs.
     
    6262
    6363protected:
    64     RefPtr<AudioContext> m_context;
     64    Ref<AudioContext> m_context;
    6565
    6666    // m_outputs contains the AudioNodeOutputs representing current connections which are not disabled.
  • trunk/Source/WebCore/Modules/webaudio/BiquadFilterNode.cpp

    r162368 r196603  
    3333namespace WebCore {
    3434
    35 BiquadFilterNode::BiquadFilterNode(AudioContext* context, float sampleRate)
     35BiquadFilterNode::BiquadFilterNode(AudioContext& context, float sampleRate)
    3636    : AudioBasicProcessorNode(context, sampleRate)
    3737{
  • trunk/Source/WebCore/Modules/webaudio/BiquadFilterNode.h

    r177733 r196603  
    4747    };
    4848
    49     static Ref<BiquadFilterNode> create(AudioContext* context, float sampleRate)
     49    static Ref<BiquadFilterNode> create(AudioContext& context, float sampleRate)
    5050    {
    5151        return adoptRef(*new BiquadFilterNode(context, sampleRate));
     
    6868
    6969private:
    70     BiquadFilterNode(AudioContext*, float sampleRate);
     70    BiquadFilterNode(AudioContext&, float sampleRate);
    7171
    7272    BiquadProcessor* biquadProcessor() { return static_cast<BiquadProcessor*>(processor()); }
  • trunk/Source/WebCore/Modules/webaudio/BiquadProcessor.cpp

    r162368 r196603  
    3333namespace WebCore {
    3434   
    35 BiquadProcessor::BiquadProcessor(AudioContext* context, float sampleRate, size_t numberOfChannels, bool autoInitialize)
     35BiquadProcessor::BiquadProcessor(AudioContext& context, float sampleRate, size_t numberOfChannels, bool autoInitialize)
    3636    : AudioDSPKernelProcessor(sampleRate, numberOfChannels)
    3737    , m_type(LowPass)
  • trunk/Source/WebCore/Modules/webaudio/BiquadProcessor.h

    r162368 r196603  
    5151    };
    5252
    53     BiquadProcessor(AudioContext*, float sampleRate, size_t numberOfChannels, bool autoInitialize);
     53    BiquadProcessor(AudioContext&, float sampleRate, size_t numberOfChannels, bool autoInitialize);
    5454
    5555    virtual ~BiquadProcessor();
  • trunk/Source/WebCore/Modules/webaudio/ChannelMergerNode.cpp

    r184940 r196603  
    4141namespace WebCore {
    4242
    43 RefPtr<ChannelMergerNode> ChannelMergerNode::create(AudioContext* context, float sampleRate, unsigned numberOfInputs)
     43RefPtr<ChannelMergerNode> ChannelMergerNode::create(AudioContext& context, float sampleRate, unsigned numberOfInputs)
    4444{
    4545    if (!numberOfInputs || numberOfInputs > AudioContext::maxNumberOfChannels())
     
    4949}
    5050
    51 ChannelMergerNode::ChannelMergerNode(AudioContext* context, float sampleRate, unsigned numberOfInputs)
     51ChannelMergerNode::ChannelMergerNode(AudioContext& context, float sampleRate, unsigned numberOfInputs)
    5252    : AudioNode(context, sampleRate)
    5353    , m_desiredNumberOfOutputChannels(DefaultNumberOfOutputChannels)
     
    105105void ChannelMergerNode::checkNumberOfChannelsForInput(AudioNodeInput* input)
    106106{
    107     ASSERT(context()->isAudioThread() && context()->isGraphOwner());
     107    ASSERT(context().isAudioThread() && context().isGraphOwner());
    108108
    109109    // Count how many channels we have all together from all of the inputs.
  • trunk/Source/WebCore/Modules/webaudio/ChannelMergerNode.h

    r184940 r196603  
    3939class ChannelMergerNode : public AudioNode {
    4040public:
    41     static RefPtr<ChannelMergerNode> create(AudioContext*, float sampleRate, unsigned numberOfInputs);
     41    static RefPtr<ChannelMergerNode> create(AudioContext&, float sampleRate, unsigned numberOfInputs);
    4242
    4343    // AudioNode
     
    5454    virtual double latencyTime() const override { return 0; }
    5555
    56     ChannelMergerNode(AudioContext*, float sampleRate, unsigned numberOfInputs);
     56    ChannelMergerNode(AudioContext&, float sampleRate, unsigned numberOfInputs);
    5757};
    5858
  • trunk/Source/WebCore/Modules/webaudio/ChannelSplitterNode.cpp

    r184940 r196603  
    3535namespace WebCore {
    3636   
    37 RefPtr<ChannelSplitterNode> ChannelSplitterNode::create(AudioContext* context, float sampleRate, unsigned numberOfOutputs)
     37RefPtr<ChannelSplitterNode> ChannelSplitterNode::create(AudioContext& context, float sampleRate, unsigned numberOfOutputs)
    3838{
    3939    if (!numberOfOutputs || numberOfOutputs > AudioContext::maxNumberOfChannels())
     
    4343}
    4444
    45 ChannelSplitterNode::ChannelSplitterNode(AudioContext* context, float sampleRate, unsigned numberOfOutputs)
     45ChannelSplitterNode::ChannelSplitterNode(AudioContext& context, float sampleRate, unsigned numberOfOutputs)
    4646    : AudioNode(context, sampleRate)
    4747{
  • trunk/Source/WebCore/Modules/webaudio/ChannelSplitterNode.h

    r184940 r196603  
    3535class ChannelSplitterNode : public AudioNode {
    3636public:
    37     static RefPtr<ChannelSplitterNode> create(AudioContext*, float sampleRate, unsigned numberOfOutputs);
     37    static RefPtr<ChannelSplitterNode> create(AudioContext&, float sampleRate, unsigned numberOfOutputs);
    3838
    3939    // AudioNode
     
    4545    virtual double latencyTime() const override { return 0; }
    4646
    47     ChannelSplitterNode(AudioContext*, float sampleRate, unsigned numberOfOutputs);
     47    ChannelSplitterNode(AudioContext&, float sampleRate, unsigned numberOfOutputs);
    4848};
    4949
  • trunk/Source/WebCore/Modules/webaudio/ConvolverNode.cpp

    r194496 r196603  
    4747namespace WebCore {
    4848
    49 ConvolverNode::ConvolverNode(AudioContext* context, float sampleRate)
     49ConvolverNode::ConvolverNode(AudioContext& context, float sampleRate)
    5050    : AudioNode(context, sampleRate)
    5151    , m_normalize(true)
     
    124124        return;
    125125
    126     if (buffer->sampleRate() != context()->sampleRate()) {
     126    if (buffer->sampleRate() != context().sampleRate()) {
    127127        ec = NOT_SUPPORTED_ERR;
    128128        return;
     
    147147
    148148    // Create the reverb with the given impulse response.
    149     bool useBackgroundThreads = !context()->isOfflineContext();
     149    bool useBackgroundThreads = !context().isOfflineContext();
    150150    auto reverb = std::make_unique<Reverb>(bufferBus.get(), AudioNode::ProcessingSizeInFrames, MaxFFTSize, 2, useBackgroundThreads, m_normalize);
    151151
  • trunk/Source/WebCore/Modules/webaudio/ConvolverNode.h

    r191492 r196603  
    3838class ConvolverNode : public AudioNode {
    3939public:
    40     static Ref<ConvolverNode> create(AudioContext* context, float sampleRate)
     40    static Ref<ConvolverNode> create(AudioContext& context, float sampleRate)
    4141    {
    4242        return adoptRef(*new ConvolverNode(context, sampleRate));
     
    5959
    6060private:
    61     ConvolverNode(AudioContext*, float sampleRate);
     61    ConvolverNode(AudioContext&, float sampleRate);
    6262
    6363    virtual double tailTime() const override;
  • trunk/Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.cpp

    r196602 r196603  
    3939namespace WebCore {
    4040   
    41 DefaultAudioDestinationNode::DefaultAudioDestinationNode(AudioContext* context)
     41DefaultAudioDestinationNode::DefaultAudioDestinationNode(AudioContext& context)
    4242    : AudioDestinationNode(context, AudioDestination::hardwareSampleRate())
    4343    , m_numberOfInputChannels(0)
     
    112112    if (isInitialized())
    113113        m_destination->start();
    114     if (auto scriptExecutionContext = context()->scriptExecutionContext())
     114    if (auto scriptExecutionContext = context().scriptExecutionContext())
    115115        scriptExecutionContext->postTask(function);
    116116}
     
    121121    if (isInitialized())
    122122        m_destination->stop();
    123     if (auto scriptExecutionContext = context()->scriptExecutionContext())
     123    if (auto scriptExecutionContext = context().scriptExecutionContext())
    124124        scriptExecutionContext->postTask(function);
    125125}
     
    129129    ASSERT(isInitialized());
    130130    uninitialize();
    131     if (auto scriptExecutionContext = context()->scriptExecutionContext())
     131    if (auto scriptExecutionContext = context().scriptExecutionContext())
    132132        scriptExecutionContext->postTask(function);
    133133}
  • trunk/Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.h

    r184940 r196603  
    3636class DefaultAudioDestinationNode : public AudioDestinationNode {
    3737public:
    38     static Ref<DefaultAudioDestinationNode> create(AudioContext* context)
     38    static Ref<DefaultAudioDestinationNode> create(AudioContext& context)
    3939    {
    4040        return adoptRef(*new DefaultAudioDestinationNode(context));     
     
    5858
    5959private:
    60     explicit DefaultAudioDestinationNode(AudioContext*);
     60    explicit DefaultAudioDestinationNode(AudioContext&);
    6161    void createDestination();
    6262
  • trunk/Source/WebCore/Modules/webaudio/DelayNode.cpp

    r162368 r196603  
    3333const double maximumAllowedDelayTime = 180;
    3434
    35 DelayNode::DelayNode(AudioContext* context, float sampleRate, double maxDelayTime, ExceptionCode& ec)
     35DelayNode::DelayNode(AudioContext& context, float sampleRate, double maxDelayTime, ExceptionCode& ec)
    3636    : AudioBasicProcessorNode(context, sampleRate)
    3737{
  • trunk/Source/WebCore/Modules/webaudio/DelayNode.h

    r177733 r196603  
    3737class DelayNode : public AudioBasicProcessorNode {
    3838public:
    39     static Ref<DelayNode> create(AudioContext* context, float sampleRate, double maxDelayTime, ExceptionCode& ec)
     39    static Ref<DelayNode> create(AudioContext& context, float sampleRate, double maxDelayTime, ExceptionCode& ec)
    4040    {
    4141        return adoptRef(*new DelayNode(context, sampleRate, maxDelayTime, ec));
     
    4545
    4646private:
    47     DelayNode(AudioContext*, float sampleRate, double maxDelayTime, ExceptionCode&);
     47    DelayNode(AudioContext&, float sampleRate, double maxDelayTime, ExceptionCode&);
    4848
    4949    DelayProcessor* delayProcessor() { return static_cast<DelayProcessor*>(processor()); }
  • trunk/Source/WebCore/Modules/webaudio/DelayProcessor.cpp

    r162368 r196603  
    3333namespace WebCore {
    3434
    35 DelayProcessor::DelayProcessor(AudioContext* context, float sampleRate, unsigned numberOfChannels, double maxDelayTime)
     35DelayProcessor::DelayProcessor(AudioContext& context, float sampleRate, unsigned numberOfChannels, double maxDelayTime)
    3636    : AudioDSPKernelProcessor(sampleRate, numberOfChannels)
    3737    , m_maxDelayTime(maxDelayTime)
  • trunk/Source/WebCore/Modules/webaudio/DelayProcessor.h

    r162368 r196603  
    3737class DelayProcessor : public AudioDSPKernelProcessor {
    3838public:
    39     DelayProcessor(AudioContext*, float sampleRate, unsigned numberOfChannels, double maxDelayTime);
     39    DelayProcessor(AudioContext&, float sampleRate, unsigned numberOfChannels, double maxDelayTime);
    4040    virtual ~DelayProcessor();
    4141   
  • trunk/Source/WebCore/Modules/webaudio/DynamicsCompressorNode.cpp

    r162376 r196603  
    3939namespace WebCore {
    4040
    41 DynamicsCompressorNode::DynamicsCompressorNode(AudioContext* context, float sampleRate)
     41DynamicsCompressorNode::DynamicsCompressorNode(AudioContext& context, float sampleRate)
    4242    : AudioNode(context, sampleRate)
    4343{
  • trunk/Source/WebCore/Modules/webaudio/DynamicsCompressorNode.h

    r177733 r196603  
    3636class DynamicsCompressorNode : public AudioNode {
    3737public:
    38     static Ref<DynamicsCompressorNode> create(AudioContext* context, float sampleRate)
     38    static Ref<DynamicsCompressorNode> create(AudioContext& context, float sampleRate)
    3939    {
    4040        return adoptRef(*new DynamicsCompressorNode(context, sampleRate));
     
    6363    virtual double latencyTime() const override;
    6464
    65     DynamicsCompressorNode(AudioContext*, float sampleRate);
     65    DynamicsCompressorNode(AudioContext&, float sampleRate);
    6666
    6767    std::unique_ptr<DynamicsCompressor> m_dynamicsCompressor;
  • trunk/Source/WebCore/Modules/webaudio/GainNode.cpp

    r162368 r196603  
    3535namespace WebCore {
    3636
    37 GainNode::GainNode(AudioContext* context, float sampleRate)
     37GainNode::GainNode(AudioContext& context, float sampleRate)
    3838    : AudioNode(context, sampleRate)
    3939    , m_lastGain(1.0)
     
    9292void GainNode::checkNumberOfChannelsForInput(AudioNodeInput* input)
    9393{
    94     ASSERT(context()->isAudioThread() && context()->isGraphOwner());
     94    ASSERT(context().isAudioThread() && context().isGraphOwner());
    9595
    9696    ASSERT(input && input == this->input(0));
  • trunk/Source/WebCore/Modules/webaudio/GainNode.h

    r184940 r196603  
    4040class GainNode : public AudioNode {
    4141public:
    42     static Ref<GainNode> create(AudioContext* context, float sampleRate)
     42    static Ref<GainNode> create(AudioContext& context, float sampleRate)
    4343    {
    4444        return adoptRef(*new GainNode(context, sampleRate));
     
    5959    virtual double latencyTime() const override { return 0; }
    6060
    61     GainNode(AudioContext*, float sampleRate);
     61    GainNode(AudioContext&, float sampleRate);
    6262
    6363    float m_lastGain; // for de-zippering
  • trunk/Source/WebCore/Modules/webaudio/MediaElementAudioSourceNode.cpp

    r188642 r196603  
    4141namespace WebCore {
    4242
    43 Ref<MediaElementAudioSourceNode> MediaElementAudioSourceNode::create(AudioContext* context, HTMLMediaElement* mediaElement)
     43Ref<MediaElementAudioSourceNode> MediaElementAudioSourceNode::create(AudioContext& context, HTMLMediaElement* mediaElement)
    4444{
    4545    return adoptRef(*new MediaElementAudioSourceNode(context, mediaElement));
    4646}
    4747
    48 MediaElementAudioSourceNode::MediaElementAudioSourceNode(AudioContext* context, HTMLMediaElement* mediaElement)
    49     : AudioNode(context, context->sampleRate())
     48MediaElementAudioSourceNode::MediaElementAudioSourceNode(AudioContext& context, HTMLMediaElement* mediaElement)
     49    : AudioNode(context, context.sampleRate())
    5050    , m_mediaElement(mediaElement)
    5151    , m_sourceNumberOfChannels(0)
     
    9393        {
    9494            // The context must be locked when changing the number of output channels.
    95             AudioContext::AutoLocker contextLocker(*context());
     95            AudioContext::AutoLocker contextLocker(context());
    9696
    9797            // Do any necesssary re-configuration to the output's number of channels.
  • trunk/Source/WebCore/Modules/webaudio/MediaElementAudioSourceNode.h

    r188642 r196603  
    4242class MediaElementAudioSourceNode : public AudioNode, public AudioSourceProviderClient {
    4343public:
    44     static Ref<MediaElementAudioSourceNode> create(AudioContext*, HTMLMediaElement*);
     44    static Ref<MediaElementAudioSourceNode> create(AudioContext&, HTMLMediaElement*);
    4545
    4646    virtual ~MediaElementAudioSourceNode();
     
    5959
    6060private:
    61     MediaElementAudioSourceNode(AudioContext*, HTMLMediaElement*);
     61    MediaElementAudioSourceNode(AudioContext&, HTMLMediaElement*);
    6262
    6363    virtual double tailTime() const override { return 0; }
  • trunk/Source/WebCore/Modules/webaudio/MediaStreamAudioDestinationNode.cpp

    r194496 r196603  
    3838namespace WebCore {
    3939
    40 Ref<MediaStreamAudioDestinationNode> MediaStreamAudioDestinationNode::create(AudioContext* context, size_t numberOfChannels)
     40Ref<MediaStreamAudioDestinationNode> MediaStreamAudioDestinationNode::create(AudioContext& context, size_t numberOfChannels)
    4141{
    4242    return adoptRef(*new MediaStreamAudioDestinationNode(context, numberOfChannels));
    4343}
    4444
    45 MediaStreamAudioDestinationNode::MediaStreamAudioDestinationNode(AudioContext* context, size_t numberOfChannels)
    46     : AudioBasicInspectorNode(context, context->sampleRate(), numberOfChannels)
     45MediaStreamAudioDestinationNode::MediaStreamAudioDestinationNode(AudioContext& context, size_t numberOfChannels)
     46    : AudioBasicInspectorNode(context, context.sampleRate(), numberOfChannels)
    4747    , m_mixBus(AudioBus::create(numberOfChannels, ProcessingSizeInFrames))
    4848{
     
    5151    m_source = MediaStreamAudioSource::create();
    5252    Vector<RefPtr<RealtimeMediaSource>> audioSources(1, m_source);
    53     m_stream = MediaStream::create(*context->scriptExecutionContext(), MediaStreamPrivate::create(WTFMove(audioSources), Vector<RefPtr<RealtimeMediaSource>>()));
     53    m_stream = MediaStream::create(*context.scriptExecutionContext(), MediaStreamPrivate::create(WTFMove(audioSources), Vector<RefPtr<RealtimeMediaSource>>()));
    5454
    55     m_source->setAudioFormat(numberOfChannels, context->sampleRate());
     55    m_source->setAudioFormat(numberOfChannels, context.sampleRate());
    5656
    5757    initialize();
  • trunk/Source/WebCore/Modules/webaudio/MediaStreamAudioDestinationNode.h

    r184940 r196603  
    4040class MediaStreamAudioDestinationNode : public AudioBasicInspectorNode {
    4141public:
    42     static Ref<MediaStreamAudioDestinationNode> create(AudioContext*, size_t numberOfChannels);
     42    static Ref<MediaStreamAudioDestinationNode> create(AudioContext&, size_t numberOfChannels);
    4343
    4444    virtual ~MediaStreamAudioDestinationNode();
     
    5353
    5454private:
    55     MediaStreamAudioDestinationNode(AudioContext*, size_t numberOfChannels);
     55    MediaStreamAudioDestinationNode(AudioContext&, size_t numberOfChannels);
    5656
    5757    virtual double tailTime() const override { return 0; }
  • trunk/Source/WebCore/Modules/webaudio/MediaStreamAudioSourceNode.cpp

    r191721 r196603  
    4242
    4343MediaStreamAudioSourceNode::MediaStreamAudioSourceNode(AudioContext& context, MediaStream& mediaStream, MediaStreamTrack& audioTrack)
    44     : AudioNode(&context, context.sampleRate())
     44    : AudioNode(context, context.sampleRate())
    4545    , m_mediaStream(mediaStream)
    4646    , m_audioTrack(audioTrack)
     
    9898    {
    9999        // The context must be locked when changing the number of output channels.
    100         AudioContext::AutoLocker contextLocker(*context());
     100        AudioContext::AutoLocker contextLocker(context());
    101101
    102102        // Do any necesssary re-configuration to the output's number of channels.
  • trunk/Source/WebCore/Modules/webaudio/OfflineAudioDestinationNode.cpp

    r188772 r196603  
    3939const size_t renderQuantumSize = 128;   
    4040
    41 OfflineAudioDestinationNode::OfflineAudioDestinationNode(AudioContext* context, AudioBuffer* renderTarget)
     41OfflineAudioDestinationNode::OfflineAudioDestinationNode(AudioContext& context, AudioBuffer* renderTarget)
    4242    : AudioDestinationNode(context, renderTarget->sampleRate())
    4343    , m_renderTarget(renderTarget)
     
    103103        return;
    104104
    105     bool isAudioContextInitialized = context()->isInitialized();
     105    bool isAudioContextInitialized = context().isInitialized();
    106106    ASSERT(isAudioContextInitialized);
    107107    if (!isAudioContextInitialized)
     
    149149void OfflineAudioDestinationNode::notifyComplete()
    150150{
    151     context()->fireCompletionEvent();
     151    context().fireCompletionEvent();
    152152}
    153153
  • trunk/Source/WebCore/Modules/webaudio/OfflineAudioDestinationNode.h

    r188772 r196603  
    3939class OfflineAudioDestinationNode : public AudioDestinationNode {
    4040public:
    41     static Ref<OfflineAudioDestinationNode> create(AudioContext* context, AudioBuffer* renderTarget)
     41    static Ref<OfflineAudioDestinationNode> create(AudioContext& context, AudioBuffer* renderTarget)
    4242    {
    4343        return adoptRef(*new OfflineAudioDestinationNode(context, renderTarget));     
     
    5757
    5858private:
    59     OfflineAudioDestinationNode(AudioContext*, AudioBuffer* renderTarget);
     59    OfflineAudioDestinationNode(AudioContext&, AudioBuffer* renderTarget);
    6060
    6161    // This AudioNode renders into this AudioBuffer.
  • trunk/Source/WebCore/Modules/webaudio/OscillatorNode.cpp

    r188642 r196603  
    4747PeriodicWave* OscillatorNode::s_periodicWaveTriangle = nullptr;
    4848
    49 Ref<OscillatorNode> OscillatorNode::create(AudioContext* context, float sampleRate)
     49Ref<OscillatorNode> OscillatorNode::create(AudioContext& context, float sampleRate)
    5050{
    5151    return adoptRef(*new OscillatorNode(context, sampleRate));
    5252}
    5353
    54 OscillatorNode::OscillatorNode(AudioContext* context, float sampleRate)
     54OscillatorNode::OscillatorNode(AudioContext& context, float sampleRate)
    5555    : AudioScheduledSourceNode(context, sampleRate)
    5656    , m_type(SINE)
  • trunk/Source/WebCore/Modules/webaudio/OscillatorNode.h

    r188642 r196603  
    5252    };
    5353
    54     static Ref<OscillatorNode> create(AudioContext*, float sampleRate);
     54    static Ref<OscillatorNode> create(AudioContext&, float sampleRate);
    5555
    5656    virtual ~OscillatorNode();
     
    7171
    7272private:
    73     OscillatorNode(AudioContext*, float sampleRate);
     73    OscillatorNode(AudioContext&, float sampleRate);
    7474
    7575    virtual double tailTime() const override { return 0; }
  • trunk/Source/WebCore/Modules/webaudio/PannerNode.cpp

    r192281 r196603  
    4747}
    4848
    49 PannerNode::PannerNode(AudioContext* context, float sampleRate)
     49PannerNode::PannerNode(AudioContext& context, float sampleRate)
    5050    : AudioNode(context, sampleRate)
    5151    , m_panningModel(Panner::PanningModelHRTF)
     
    5454{
    5555    // Load the HRTF database asynchronously so we don't block the Javascript thread while creating the HRTF database.
    56     m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(context->sampleRate());
     56    m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(context.sampleRate());
    5757
    5858    addInput(std::make_unique<AudioNodeInput>(this));
     
    8585    // We override pullInputs(), so we can detect new AudioSourceNodes which have connected to us when new connections are made.
    8686    // These AudioSourceNodes need to be made aware of our existence in order to handle doppler shift pitch changes.
    87     if (m_connectionCount != context()->connectionCount()) {
    88         m_connectionCount = context()->connectionCount();
     87    if (m_connectionCount != context().connectionCount()) {
     88        m_connectionCount = context().connectionCount();
    8989
    9090        // Recursively go through all nodes connected to us.
     
    113113    // HRTFDatabase should be loaded before proceeding for offline audio context when panningModel() is "HRTF".
    114114    if (panningModel() == "HRTF" && !m_hrtfDatabaseLoader->isLoaded()) {
    115         if (context()->isOfflineContext())
     115        if (context().isOfflineContext())
    116116            m_hrtfDatabaseLoader->waitForLoaderThreadCompletion();
    117117        else {
     
    174174AudioListener* PannerNode::listener()
    175175{
    176     return context()->listener();
     176    return context().listener();
    177177}
    178178
     
    219219    case SOUNDFIELD:
    220220        // FIXME: Implement sound field model. See // https://bugs.webkit.org/show_bug.cgi?id=77367.
    221         context()->scriptExecutionContext()->addConsoleMessage(MessageSource::JS, MessageLevel::Warning, ASCIILiteral("'soundfield' panning model not implemented."));
     221        context().scriptExecutionContext()->addConsoleMessage(MessageSource::JS, MessageLevel::Warning, ASCIILiteral("'soundfield' panning model not implemented."));
    222222        break;
    223223    default:
  • trunk/Source/WebCore/Modules/webaudio/PannerNode.h

    r192281 r196603  
    6565    };
    6666
    67     static Ref<PannerNode> create(AudioContext* context, float sampleRate)
     67    static Ref<PannerNode> create(AudioContext& context, float sampleRate)
    6868    {
    6969        return adoptRef(*new PannerNode(context, sampleRate));
     
    134134
    135135private:
    136     PannerNode(AudioContext*, float sampleRate);
     136    PannerNode(AudioContext&, float sampleRate);
    137137
    138138    // Returns the combined distance and cone gain attenuation.
  • trunk/Source/WebCore/Modules/webaudio/ScriptProcessorNode.cpp

    r194496 r196603  
    4141namespace WebCore {
    4242
    43 RefPtr<ScriptProcessorNode> ScriptProcessorNode::create(AudioContext* context, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels)
     43RefPtr<ScriptProcessorNode> ScriptProcessorNode::create(AudioContext& context, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels)
    4444{
    4545    // Check for valid buffer size.
     
    6969}
    7070
    71 ScriptProcessorNode::ScriptProcessorNode(AudioContext* context, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels)
     71ScriptProcessorNode::ScriptProcessorNode(AudioContext& context, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels)
    7272    : AudioNode(context, sampleRate)
    7373    , m_doubleBufferIndex(0)
     
    105105        return;
    106106
    107     float sampleRate = context()->sampleRate();
     107    float sampleRate = context().sampleRate();
    108108
    109109    // Create double buffers on both the input and output sides.
     
    240240
    241241    // Avoid firing the event if the document has already gone away.
    242     if (context()->scriptExecutionContext()) {
     242    if (context().scriptExecutionContext()) {
    243243        // Let the audio thread know we've gotten to the point where it's OK for it to make another request.
    244244        m_isRequestOutstanding = false;
     
    246246        // Calculate playbackTime with the buffersize which needs to be processed each time when onaudioprocess is called.
    247247        // The outputBuffer being passed to JS will be played after exhausting previous outputBuffer by double-buffering.
    248         double playbackTime = (context()->currentSampleFrame() + m_bufferSize) / static_cast<double>(context()->sampleRate());
     248        double playbackTime = (context().currentSampleFrame() + m_bufferSize) / static_cast<double>(context().sampleRate());
    249249
    250250        // Call the JavaScript event handler which will do the audio processing.
  • trunk/Source/WebCore/Modules/webaudio/ScriptProcessorNode.h

    r189565 r196603  
    5454    // Lower numbers for bufferSize will result in a lower (better) latency. Higher numbers will be necessary to avoid audio breakup and glitches.
    5555    // The value chosen must carefully balance between latency and audio quality.
    56     static RefPtr<ScriptProcessorNode> create(AudioContext*, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels);
     56    static RefPtr<ScriptProcessorNode> create(AudioContext&, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels);
    5757
    5858    virtual ~ScriptProcessorNode();
     
    7070    virtual double latencyTime() const override;
    7171
    72     ScriptProcessorNode(AudioContext*, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels);
     72    ScriptProcessorNode(AudioContext&, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels);
    7373
    7474    void fireProcessEvent();
  • trunk/Source/WebCore/Modules/webaudio/WaveShaperNode.cpp

    r162368 r196603  
    3434namespace WebCore {
    3535
    36 WaveShaperNode::WaveShaperNode(AudioContext* context)
    37     : AudioBasicProcessorNode(context, context->sampleRate())
     36WaveShaperNode::WaveShaperNode(AudioContext& context)
     37    : AudioBasicProcessorNode(context, context.sampleRate())
    3838{
    39     m_processor = std::make_unique<WaveShaperProcessor>(context->sampleRate(), 1);
     39    m_processor = std::make_unique<WaveShaperProcessor>(context.sampleRate(), 1);
    4040    setNodeType(NodeTypeWaveShaper);
    4141
     
    5959
    6060    // Synchronize with any graph changes or changes to channel configuration.
    61     AudioContext::AutoLocker contextLocker(*context());
     61    AudioContext::AutoLocker contextLocker(context());
    6262
    6363    if (type == "none")
  • trunk/Source/WebCore/Modules/webaudio/WaveShaperNode.h

    r184940 r196603  
    3535class WaveShaperNode : public AudioBasicProcessorNode {
    3636public:
    37     static Ref<WaveShaperNode> create(AudioContext* context)
     37    static Ref<WaveShaperNode> create(AudioContext& context)
    3838    {
    3939        return adoptRef(*new WaveShaperNode(context));
     
    5050
    5151private:   
    52     explicit WaveShaperNode(AudioContext*);   
     52    explicit WaveShaperNode(AudioContext&);   
    5353
    5454    WaveShaperProcessor* waveShaperProcessor() { return static_cast<WaveShaperProcessor*>(processor()); }
Note: See TracChangeset for help on using the changeset viewer.