Changeset 196603 in webkit
- Timestamp:
- Feb 15, 2016 2:37:53 PM (8 years ago)
- Location:
- trunk/Source/WebCore
- Files:
-
- 61 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/WebCore/ChangeLog
r196602 r196603 1 2016-02-15 Jer Noble <jer.noble@apple.com> 2 3 Null-deref crash in DefaultAudioDestinationNode::suspend() 4 https://bugs.webkit.org/show_bug.cgi?id=154248 5 6 Reviewed by Alex Christensen. 7 8 Drive-by fix: AudioContext should be a reference, not a pointer. 9 10 * Modules/webaudio/AnalyserNode.cpp: 11 (WebCore::AnalyserNode::AnalyserNode): 12 * Modules/webaudio/AnalyserNode.h: 13 (WebCore::AnalyserNode::create): 14 * Modules/webaudio/AudioBasicInspectorNode.cpp: 15 (WebCore::AudioBasicInspectorNode::AudioBasicInspectorNode): 16 (WebCore::AudioBasicInspectorNode::connect): 17 (WebCore::AudioBasicInspectorNode::disconnect): 18 (WebCore::AudioBasicInspectorNode::checkNumberOfChannelsForInput): 19 (WebCore::AudioBasicInspectorNode::updatePullStatus): 20 * Modules/webaudio/AudioBasicInspectorNode.h: 21 * Modules/webaudio/AudioBasicProcessorNode.cpp: 22 (WebCore::AudioBasicProcessorNode::AudioBasicProcessorNode): 23 (WebCore::AudioBasicProcessorNode::checkNumberOfChannelsForInput): 24 * Modules/webaudio/AudioBasicProcessorNode.h: 25 * Modules/webaudio/AudioBufferSourceNode.cpp: 26 (WebCore::AudioBufferSourceNode::create): 27 (WebCore::AudioBufferSourceNode::AudioBufferSourceNode): 28 (WebCore::AudioBufferSourceNode::renderFromBuffer): 29 (WebCore::AudioBufferSourceNode::setBuffer): 30 (WebCore::AudioBufferSourceNode::startPlaying): 31 (WebCore::AudioBufferSourceNode::looping): 32 (WebCore::AudioBufferSourceNode::setLooping): 33 * Modules/webaudio/AudioBufferSourceNode.h: 34 * Modules/webaudio/AudioContext.cpp: 35 (WebCore::AudioContext::AudioContext): 36 (WebCore::AudioContext::createBufferSource): 37 (WebCore::AudioContext::createMediaElementSource): 38 (WebCore::AudioContext::createMediaStreamDestination): 39 (WebCore::AudioContext::createScriptProcessor): 40 (WebCore::AudioContext::createBiquadFilter): 41 (WebCore::AudioContext::createWaveShaper): 42 (WebCore::AudioContext::createPanner): 43 (WebCore::AudioContext::createConvolver): 44 (WebCore::AudioContext::createDynamicsCompressor): 45 (WebCore::AudioContext::createAnalyser): 46 (WebCore::AudioContext::createGain): 47 (WebCore::AudioContext::createDelay): 48 (WebCore::AudioContext::createChannelSplitter): 49 (WebCore::AudioContext::createChannelMerger): 50 (WebCore::AudioContext::createOscillator): 51 * Modules/webaudio/AudioContext.h: 52 (WebCore::operator==): 53 (WebCore::operator!=): 54 * Modules/webaudio/AudioDestinationNode.cpp: 55 (WebCore::AudioDestinationNode::AudioDestinationNode): 56 (WebCore::AudioDestinationNode::render): 57 (WebCore::AudioDestinationNode::updateIsEffectivelyPlayingAudio): 58 * Modules/webaudio/AudioDestinationNode.h: 59 * Modules/webaudio/AudioNode.cpp: 60 (WebCore::AudioNode::AudioNode): 61 (WebCore::AudioNode::connect): 62 (WebCore::AudioNode::disconnect): 63 (WebCore::AudioNode::setChannelCount): 64 (WebCore::AudioNode::setChannelCountMode): 65 (WebCore::AudioNode::setChannelInterpretation): 66 (WebCore::AudioNode::scriptExecutionContext): 67 (WebCore::AudioNode::processIfNecessary): 68 (WebCore::AudioNode::checkNumberOfChannelsForInput): 69 (WebCore::AudioNode::propagatesSilence): 70 (WebCore::AudioNode::pullInputs): 71 (WebCore::AudioNode::enableOutputsIfNecessary): 72 (WebCore::AudioNode::deref): 73 (WebCore::AudioNode::finishDeref): 74 * Modules/webaudio/AudioNode.h: 75 (WebCore::AudioNode::context): 76 * Modules/webaudio/AudioNodeInput.cpp: 77 (WebCore::AudioNodeInput::connect): 78 (WebCore::AudioNodeInput::disconnect): 79 (WebCore::AudioNodeInput::disable): 80 (WebCore::AudioNodeInput::enable): 81 (WebCore::AudioNodeInput::updateInternalBus): 82 (WebCore::AudioNodeInput::bus): 83 (WebCore::AudioNodeInput::internalSummingBus): 84 (WebCore::AudioNodeInput::sumAllConnections): 85 (WebCore::AudioNodeInput::pull): 86 * Modules/webaudio/AudioNodeOutput.cpp: 87 (WebCore::AudioNodeOutput::setNumberOfChannels): 88 (WebCore::AudioNodeOutput::updateNumberOfChannels): 89 (WebCore::AudioNodeOutput::propagateChannelCount): 90 (WebCore::AudioNodeOutput::pull): 91 (WebCore::AudioNodeOutput::bus): 92 (WebCore::AudioNodeOutput::fanOutCount): 93 (WebCore::AudioNodeOutput::paramFanOutCount): 94 (WebCore::AudioNodeOutput::addInput): 95 (WebCore::AudioNodeOutput::removeInput): 96 (WebCore::AudioNodeOutput::disconnectAllInputs): 97 (WebCore::AudioNodeOutput::addParam): 98 (WebCore::AudioNodeOutput::removeParam): 99 (WebCore::AudioNodeOutput::disconnectAllParams): 100 (WebCore::AudioNodeOutput::disable): 101 (WebCore::AudioNodeOutput::enable): 102 * Modules/webaudio/AudioNodeOutput.h: 103 (WebCore::AudioNodeOutput::context): 104 * Modules/webaudio/AudioParam.cpp: 105 (WebCore::AudioParam::value): 106 (WebCore::AudioParam::smooth): 107 (WebCore::AudioParam::calculateSampleAccurateValues): 108 (WebCore::AudioParam::calculateFinalValues): 109 (WebCore::AudioParam::calculateTimelineValues): 110 (WebCore::AudioParam::connect): 111 (WebCore::AudioParam::disconnect): 112 * Modules/webaudio/AudioParam.h: 113 (WebCore::AudioParam::create): 114 (WebCore::AudioParam::AudioParam): 115 * Modules/webaudio/AudioParamTimeline.cpp: 116 (WebCore::AudioParamTimeline::valueForContextTime): 117 * Modules/webaudio/AudioParamTimeline.h: 118 * Modules/webaudio/AudioScheduledSourceNode.cpp: 119 (WebCore::AudioScheduledSourceNode::AudioScheduledSourceNode): 120 (WebCore::AudioScheduledSourceNode::updateSchedulingInfo): 121 (WebCore::AudioScheduledSourceNode::start): 122 (WebCore::AudioScheduledSourceNode::finish): 123 * Modules/webaudio/AudioScheduledSourceNode.h: 124 * Modules/webaudio/AudioSummingJunction.cpp: 125 (WebCore::AudioSummingJunction::AudioSummingJunction): 126 (WebCore::AudioSummingJunction::~AudioSummingJunction): 127 (WebCore::AudioSummingJunction::changedOutputs): 128 (WebCore::AudioSummingJunction::updateRenderingState): 129 * Modules/webaudio/AudioSummingJunction.h: 130 (WebCore::AudioSummingJunction::context): 131 * Modules/webaudio/BiquadFilterNode.cpp: 132 (WebCore::BiquadFilterNode::BiquadFilterNode): 133 * Modules/webaudio/BiquadFilterNode.h: 134 (WebCore::BiquadFilterNode::create): 135 * Modules/webaudio/BiquadProcessor.cpp: 136 (WebCore::BiquadProcessor::BiquadProcessor): 137 * Modules/webaudio/BiquadProcessor.h: 138 * Modules/webaudio/ChannelMergerNode.cpp: 139 (WebCore::ChannelMergerNode::create): 140 (WebCore::ChannelMergerNode::ChannelMergerNode): 141 (WebCore::ChannelMergerNode::checkNumberOfChannelsForInput): 142 * Modules/webaudio/ChannelMergerNode.h: 143 * Modules/webaudio/ChannelSplitterNode.cpp: 144 (WebCore::ChannelSplitterNode::create): 145 (WebCore::ChannelSplitterNode::ChannelSplitterNode): 146 * Modules/webaudio/ChannelSplitterNode.h: 147 * Modules/webaudio/ConvolverNode.cpp: 148 (WebCore::ConvolverNode::ConvolverNode): 149 (WebCore::ConvolverNode::setBuffer): 150 * Modules/webaudio/ConvolverNode.h: 151 (WebCore::ConvolverNode::create): 152 * Modules/webaudio/DefaultAudioDestinationNode.cpp: 153 (WebCore::DefaultAudioDestinationNode::DefaultAudioDestinationNode): 154 (WebCore::DefaultAudioDestinationNode::resume): 155 (WebCore::DefaultAudioDestinationNode::suspend): 156 (WebCore::DefaultAudioDestinationNode::close): 157 * Modules/webaudio/DefaultAudioDestinationNode.h: 158 (WebCore::DefaultAudioDestinationNode::create): 159 * Modules/webaudio/DelayNode.cpp: 160 (WebCore::DelayNode::DelayNode): 161 * Modules/webaudio/DelayNode.h: 162 (WebCore::DelayNode::create): 163 * Modules/webaudio/DelayProcessor.cpp: 164 (WebCore::DelayProcessor::DelayProcessor): 165 * Modules/webaudio/DelayProcessor.h: 166 * Modules/webaudio/DynamicsCompressorNode.cpp: 167 (WebCore::DynamicsCompressorNode::DynamicsCompressorNode): 168 * Modules/webaudio/DynamicsCompressorNode.h: 169 (WebCore::DynamicsCompressorNode::create): 170 * Modules/webaudio/GainNode.cpp: 171 (WebCore::GainNode::GainNode): 172 (WebCore::GainNode::checkNumberOfChannelsForInput): 173 * Modules/webaudio/GainNode.h: 174 (WebCore::GainNode::create): 175 * Modules/webaudio/MediaElementAudioSourceNode.cpp: 176 (WebCore::MediaElementAudioSourceNode::create): 177 (WebCore::MediaElementAudioSourceNode::MediaElementAudioSourceNode): 178 (WebCore::MediaElementAudioSourceNode::setFormat): 179 * Modules/webaudio/MediaElementAudioSourceNode.h: 180 * Modules/webaudio/MediaStreamAudioDestinationNode.cpp: 181 (WebCore::MediaStreamAudioDestinationNode::create): 182 (WebCore::MediaStreamAudioDestinationNode::MediaStreamAudioDestinationNode): 183 * Modules/webaudio/MediaStreamAudioDestinationNode.h: 184 * Modules/webaudio/MediaStreamAudioSourceNode.cpp: 185 (WebCore::MediaStreamAudioSourceNode::MediaStreamAudioSourceNode): 186 (WebCore::MediaStreamAudioSourceNode::setFormat): 187 * Modules/webaudio/OfflineAudioDestinationNode.cpp: 188 (WebCore::OfflineAudioDestinationNode::OfflineAudioDestinationNode): 189 (WebCore::OfflineAudioDestinationNode::offlineRender): 190 (WebCore::OfflineAudioDestinationNode::notifyComplete): 191 * Modules/webaudio/OfflineAudioDestinationNode.h: 192 (WebCore::OfflineAudioDestinationNode::create): 193 * Modules/webaudio/OscillatorNode.cpp: 194 (WebCore::OscillatorNode::create): 195 (WebCore::OscillatorNode::OscillatorNode): 196 * Modules/webaudio/OscillatorNode.h: 197 * Modules/webaudio/PannerNode.cpp: 198 (WebCore::PannerNode::PannerNode): 199 (WebCore::PannerNode::pullInputs): 200 (WebCore::PannerNode::process): 201 (WebCore::PannerNode::listener): 202 (WebCore::PannerNode::setPanningModel): 203 * Modules/webaudio/PannerNode.h: 204 (WebCore::PannerNode::create): 205 * Modules/webaudio/ScriptProcessorNode.cpp: 206 (WebCore::ScriptProcessorNode::create): 207 (WebCore::ScriptProcessorNode::ScriptProcessorNode): 208 (WebCore::ScriptProcessorNode::initialize): 209 (WebCore::ScriptProcessorNode::fireProcessEvent): 210 * Modules/webaudio/ScriptProcessorNode.h: 211 * Modules/webaudio/WaveShaperNode.cpp: 212 (WebCore::WaveShaperNode::WaveShaperNode): 213 (WebCore::WaveShaperNode::setOversample): 214 * Modules/webaudio/WaveShaperNode.h: 215 (WebCore::WaveShaperNode::create): 216 1 217 2016-02-15 Jer Noble <jer.noble@apple.com> 2 218 -
trunk/Source/WebCore/Modules/webaudio/AnalyserNode.cpp
r155112 r196603 35 35 namespace WebCore { 36 36 37 AnalyserNode::AnalyserNode(AudioContext *context, float sampleRate)37 AnalyserNode::AnalyserNode(AudioContext& context, float sampleRate) 38 38 : AudioBasicInspectorNode(context, sampleRate, 2) 39 39 { -
trunk/Source/WebCore/Modules/webaudio/AnalyserNode.h
r177733 r196603 34 34 class AnalyserNode : public AudioBasicInspectorNode { 35 35 public: 36 static Ref<AnalyserNode> create(AudioContext *context, float sampleRate)36 static Ref<AnalyserNode> create(AudioContext& context, float sampleRate) 37 37 { 38 38 return adoptRef(*new AnalyserNode(context, sampleRate)); … … 68 68 virtual double latencyTime() const override { return 0; } 69 69 70 AnalyserNode(AudioContext *, float sampleRate);70 AnalyserNode(AudioContext&, float sampleRate); 71 71 72 72 RealtimeAnalyser m_analyser; -
trunk/Source/WebCore/Modules/webaudio/AudioBasicInspectorNode.cpp
r162368 r196603 35 35 namespace WebCore { 36 36 37 AudioBasicInspectorNode::AudioBasicInspectorNode(AudioContext *context, float sampleRate, unsigned outputChannelCount)37 AudioBasicInspectorNode::AudioBasicInspectorNode(AudioContext& context, float sampleRate, unsigned outputChannelCount) 38 38 : AudioNode(context, sampleRate) 39 39 , m_needAutomaticPull(false) … … 56 56 ASSERT(isMainThread()); 57 57 58 AudioContext::AutoLocker locker( *context());58 AudioContext::AutoLocker locker(context()); 59 59 60 60 AudioNode::connect(destination, outputIndex, inputIndex, ec); … … 66 66 ASSERT(isMainThread()); 67 67 68 AudioContext::AutoLocker locker( *context());68 AudioContext::AutoLocker locker(context()); 69 69 70 70 AudioNode::disconnect(outputIndex, ec); … … 74 74 void AudioBasicInspectorNode::checkNumberOfChannelsForInput(AudioNodeInput* input) 75 75 { 76 ASSERT(context() ->isAudioThread() && context()->isGraphOwner());76 ASSERT(context().isAudioThread() && context().isGraphOwner()); 77 77 78 78 ASSERT(input == this->input(0)); … … 94 94 void AudioBasicInspectorNode::updatePullStatus() 95 95 { 96 ASSERT(context() ->isGraphOwner());96 ASSERT(context().isGraphOwner()); 97 97 98 98 if (output(0)->isConnected()) { … … 100 100 // downstream node, thus remove it from the context's automatic pull list. 101 101 if (m_needAutomaticPull) { 102 context() ->removeAutomaticPullNode(this);102 context().removeAutomaticPullNode(this); 103 103 m_needAutomaticPull = false; 104 104 } … … 108 108 // When an AudioBasicInspectorNode is not connected to any downstream node while still connected from 109 109 // upstream node(s), add it to the context's automatic pull list. 110 context() ->addAutomaticPullNode(this);110 context().addAutomaticPullNode(this); 111 111 m_needAutomaticPull = true; 112 112 } else if (!numberOfInputConnections && m_needAutomaticPull) { 113 113 // The AudioBasicInspectorNode is connected to nothing, remove it from the context's automatic pull list. 114 context() ->removeAutomaticPullNode(this);114 context().removeAutomaticPullNode(this); 115 115 m_needAutomaticPull = false; 116 116 } -
trunk/Source/WebCore/Modules/webaudio/AudioBasicInspectorNode.h
r162139 r196603 35 35 class AudioBasicInspectorNode : public AudioNode { 36 36 public: 37 AudioBasicInspectorNode(AudioContext *, float sampleRate, unsigned outputChannelCount);37 AudioBasicInspectorNode(AudioContext&, float sampleRate, unsigned outputChannelCount); 38 38 39 39 // AudioNode -
trunk/Source/WebCore/Modules/webaudio/AudioBasicProcessorNode.cpp
r162368 r196603 37 37 namespace WebCore { 38 38 39 AudioBasicProcessorNode::AudioBasicProcessorNode(AudioContext *context, float sampleRate)39 AudioBasicProcessorNode::AudioBasicProcessorNode(AudioContext& context, float sampleRate) 40 40 : AudioNode(context, sampleRate) 41 41 { … … 103 103 void AudioBasicProcessorNode::checkNumberOfChannelsForInput(AudioNodeInput* input) 104 104 { 105 ASSERT(context() ->isAudioThread() && context()->isGraphOwner());105 ASSERT(context().isAudioThread() && context().isGraphOwner()); 106 106 107 107 ASSERT(input == this->input(0)); -
trunk/Source/WebCore/Modules/webaudio/AudioBasicProcessorNode.h
r162368 r196603 41 41 class AudioBasicProcessorNode : public AudioNode { 42 42 public: 43 AudioBasicProcessorNode(AudioContext *, float sampleRate);43 AudioBasicProcessorNode(AudioContext&, float sampleRate); 44 44 45 45 // AudioNode -
trunk/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.cpp
r188642 r196603 50 50 const double MaxRate = 1024; 51 51 52 Ref<AudioBufferSourceNode> AudioBufferSourceNode::create(AudioContext *context, float sampleRate)52 Ref<AudioBufferSourceNode> AudioBufferSourceNode::create(AudioContext& context, float sampleRate) 53 53 { 54 54 return adoptRef(*new AudioBufferSourceNode(context, sampleRate)); 55 55 } 56 56 57 AudioBufferSourceNode::AudioBufferSourceNode(AudioContext *context, float sampleRate)57 AudioBufferSourceNode::AudioBufferSourceNode(AudioContext& context, float sampleRate) 58 58 : AudioScheduledSourceNode(context, sampleRate) 59 59 , m_buffer(nullptr) … … 161 161 bool AudioBufferSourceNode::renderFromBuffer(AudioBus* bus, unsigned destinationFrameOffset, size_t numberOfFrames) 162 162 { 163 ASSERT(context() ->isAudioThread());163 ASSERT(context().isAudioThread()); 164 164 165 165 // Basic sanity checking … … 414 414 415 415 // The context must be locked since changing the buffer can re-configure the number of channels that are output. 416 AudioContext::AutoLocker contextLocker( *context());416 AudioContext::AutoLocker contextLocker(context()); 417 417 418 418 // This synchronizes with process(). … … 470 470 ASSERT(isMainThread()); 471 471 472 context() ->nodeWillBeginPlayback();472 context().nodeWillBeginPlayback(); 473 473 474 474 if (m_playbackState != UNSCHEDULED_STATE) { … … 562 562 { 563 563 static bool firstTime = true; 564 if (firstTime && context() && context()->scriptExecutionContext()) {565 context() ->scriptExecutionContext()->addConsoleMessage(MessageSource::JS, MessageLevel::Warning, ASCIILiteral("AudioBufferSourceNode 'looping' attribute is deprecated. Use 'loop' instead."));564 if (firstTime && context().scriptExecutionContext()) { 565 context().scriptExecutionContext()->addConsoleMessage(MessageSource::JS, MessageLevel::Warning, ASCIILiteral("AudioBufferSourceNode 'looping' attribute is deprecated. Use 'loop' instead.")); 566 566 firstTime = false; 567 567 } … … 573 573 { 574 574 static bool firstTime = true; 575 if (firstTime && context() && context()->scriptExecutionContext()) {576 context() ->scriptExecutionContext()->addConsoleMessage(MessageSource::JS, MessageLevel::Warning, ASCIILiteral("AudioBufferSourceNode 'looping' attribute is deprecated. Use 'loop' instead."));575 if (firstTime && context().scriptExecutionContext()) { 576 context().scriptExecutionContext()->addConsoleMessage(MessageSource::JS, MessageLevel::Warning, ASCIILiteral("AudioBufferSourceNode 'looping' attribute is deprecated. Use 'loop' instead.")); 577 577 firstTime = false; 578 578 } -
trunk/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.h
r188642 r196603 46 46 class AudioBufferSourceNode : public AudioScheduledSourceNode { 47 47 public: 48 static Ref<AudioBufferSourceNode> create(AudioContext *, float sampleRate);48 static Ref<AudioBufferSourceNode> create(AudioContext&, float sampleRate); 49 49 50 50 virtual ~AudioBufferSourceNode(); … … 103 103 104 104 private: 105 AudioBufferSourceNode(AudioContext *, float sampleRate);105 AudioBufferSourceNode(AudioContext&, float sampleRate); 106 106 107 107 virtual double tailTime() const override { return 0; } -
trunk/Source/WebCore/Modules/webaudio/AudioContext.cpp
r196130 r196603 137 137 constructCommon(); 138 138 139 m_destinationNode = DefaultAudioDestinationNode::create( this);139 m_destinationNode = DefaultAudioDestinationNode::create(*this); 140 140 141 141 // Initialize the destination node's muted state to match the page's current muted state. … … 155 155 // Create a new destination for offline rendering. 156 156 m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate); 157 m_destinationNode = OfflineAudioDestinationNode::create( this, m_renderTarget.get());157 m_destinationNode = OfflineAudioDestinationNode::create(*this, m_renderTarget.get()); 158 158 } 159 159 … … 419 419 ASSERT(isMainThread()); 420 420 lazyInitialize(); 421 RefPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create( this, m_destinationNode->sampleRate());421 RefPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(*this, m_destinationNode->sampleRate()); 422 422 423 423 // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing. … … 446 446 } 447 447 448 RefPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::create( this, mediaElement);448 RefPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::create(*this, mediaElement); 449 449 450 450 mediaElement->setAudioSourceNode(node.get()); … … 498 498 // FIXME: Add support for an optional argument which specifies the number of channels. 499 499 // FIXME: The default should probably be stereo instead of mono. 500 return MediaStreamAudioDestinationNode::create( this, 1);500 return MediaStreamAudioDestinationNode::create(*this, 1); 501 501 } 502 502 … … 519 519 ASSERT(isMainThread()); 520 520 lazyInitialize(); 521 RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create( this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);521 RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(*this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels); 522 522 523 523 if (!node.get()) { … … 534 534 ASSERT(isMainThread()); 535 535 lazyInitialize(); 536 return BiquadFilterNode::create( this, m_destinationNode->sampleRate());536 return BiquadFilterNode::create(*this, m_destinationNode->sampleRate()); 537 537 } 538 538 … … 541 541 ASSERT(isMainThread()); 542 542 lazyInitialize(); 543 return WaveShaperNode::create( this);543 return WaveShaperNode::create(*this); 544 544 } 545 545 … … 548 548 ASSERT(isMainThread()); 549 549 lazyInitialize(); 550 return PannerNode::create( this, m_destinationNode->sampleRate());550 return PannerNode::create(*this, m_destinationNode->sampleRate()); 551 551 } 552 552 … … 555 555 ASSERT(isMainThread()); 556 556 lazyInitialize(); 557 return ConvolverNode::create( this, m_destinationNode->sampleRate());557 return ConvolverNode::create(*this, m_destinationNode->sampleRate()); 558 558 } 559 559 … … 562 562 ASSERT(isMainThread()); 563 563 lazyInitialize(); 564 return DynamicsCompressorNode::create( this, m_destinationNode->sampleRate());564 return DynamicsCompressorNode::create(*this, m_destinationNode->sampleRate()); 565 565 } 566 566 … … 569 569 ASSERT(isMainThread()); 570 570 lazyInitialize(); 571 return AnalyserNode::create( this, m_destinationNode->sampleRate());571 return AnalyserNode::create(*this, m_destinationNode->sampleRate()); 572 572 } 573 573 … … 576 576 ASSERT(isMainThread()); 577 577 lazyInitialize(); 578 return GainNode::create( this, m_destinationNode->sampleRate());578 return GainNode::create(*this, m_destinationNode->sampleRate()); 579 579 } 580 580 … … 589 589 ASSERT(isMainThread()); 590 590 lazyInitialize(); 591 RefPtr<DelayNode> node = DelayNode::create( this, m_destinationNode->sampleRate(), maxDelayTime, ec);591 RefPtr<DelayNode> node = DelayNode::create(*this, m_destinationNode->sampleRate(), maxDelayTime, ec); 592 592 if (ec) 593 593 return nullptr; … … 606 606 lazyInitialize(); 607 607 608 RefPtr<ChannelSplitterNode> node = ChannelSplitterNode::create( this, m_destinationNode->sampleRate(), numberOfOutputs);608 RefPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(*this, m_destinationNode->sampleRate(), numberOfOutputs); 609 609 610 610 if (!node.get()) { … … 627 627 lazyInitialize(); 628 628 629 RefPtr<ChannelMergerNode> node = ChannelMergerNode::create( this, m_destinationNode->sampleRate(), numberOfInputs);629 RefPtr<ChannelMergerNode> node = ChannelMergerNode::create(*this, m_destinationNode->sampleRate(), numberOfInputs); 630 630 631 631 if (!node.get()) { … … 642 642 lazyInitialize(); 643 643 644 RefPtr<OscillatorNode> node = OscillatorNode::create( this, m_destinationNode->sampleRate());644 RefPtr<OscillatorNode> node = OscillatorNode::create(*this, m_destinationNode->sampleRate()); 645 645 646 646 // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing. -
trunk/Source/WebCore/Modules/webaudio/AudioContext.h
r192848 r196603 399 399 }; 400 400 401 inline bool operator==(const AudioContext& lhs, const AudioContext& rhs) { 402 return &lhs == &rhs; 403 } 404 405 inline bool operator!=(const AudioContext& lhs, const AudioContext& rhs) { 406 return &lhs != &rhs; 407 } 408 401 409 } // WebCore 402 410 -
trunk/Source/WebCore/Modules/webaudio/AudioDestinationNode.cpp
r190115 r196603 37 37 namespace WebCore { 38 38 39 AudioDestinationNode::AudioDestinationNode(AudioContext *context, float sampleRate)39 AudioDestinationNode::AudioDestinationNode(AudioContext& context, float sampleRate) 40 40 : AudioNode(context, sampleRate) 41 41 , m_currentSampleFrame(0) … … 61 61 DenormalDisabler denormalDisabler; 62 62 63 context() ->setAudioThread(currentThread());63 context().setAudioThread(currentThread()); 64 64 65 if (!context() ->isInitialized()) {65 if (!context().isInitialized()) { 66 66 destinationBus->zero(); 67 67 setIsSilent(true); … … 70 70 71 71 // Let the context take care of any business at the start of each render quantum. 72 context() ->handlePreRenderTasks();72 context().handlePreRenderTasks(); 73 73 74 74 // This will cause the node(s) connected to us to process, which in turn will pull on their input(s), … … 84 84 85 85 // Process nodes which need a little extra help because they are not connected to anything, but still need to process. 86 context() ->processAutomaticPullNodes(numberOfFrames);86 context().processAutomaticPullNodes(numberOfFrames); 87 87 88 88 // Let the context take care of any business at the end of each render quantum. 89 context() ->handlePostRenderTasks();89 context().handlePostRenderTasks(); 90 90 91 91 // Advance current sample-frame. … … 121 121 122 122 m_isEffectivelyPlayingAudio = isEffectivelyPlayingAudio; 123 if (context()) 124 context()->isPlayingAudioDidChange(); 123 context().isPlayingAudioDidChange(); 125 124 } 126 125 -
trunk/Source/WebCore/Modules/webaudio/AudioDestinationNode.h
r190115 r196603 39 39 class AudioDestinationNode : public AudioNode, public AudioIOCallback { 40 40 public: 41 AudioDestinationNode(AudioContext *, float sampleRate);41 AudioDestinationNode(AudioContext&, float sampleRate); 42 42 virtual ~AudioDestinationNode(); 43 43 -
trunk/Source/WebCore/Modules/webaudio/AudioNode.cpp
r194496 r196603 43 43 namespace WebCore { 44 44 45 AudioNode::AudioNode(AudioContext *context, float sampleRate)45 AudioNode::AudioNode(AudioContext& context, float sampleRate) 46 46 : m_isInitialized(false) 47 47 , m_nodeType(NodeTypeUnknown) … … 127 127 { 128 128 ASSERT(isMainThread()); 129 AudioContext::AutoLocker locker( *context());129 AudioContext::AutoLocker locker(context()); 130 130 131 131 if (!destination) { … … 155 155 156 156 // Let context know that a connection has been made. 157 context() ->incrementConnectionCount();157 context().incrementConnectionCount(); 158 158 } 159 159 … … 161 161 { 162 162 ASSERT(isMainThread()); 163 AudioContext::AutoLocker locker( *context());163 AudioContext::AutoLocker locker(context()); 164 164 165 165 if (!param) { … … 185 185 { 186 186 ASSERT(isMainThread()); 187 AudioContext::AutoLocker locker( *context());187 AudioContext::AutoLocker locker(context()); 188 188 189 189 // Sanity check input and output indices. … … 205 205 { 206 206 ASSERT(isMainThread()); 207 AudioContext::AutoLocker locker( *context());207 AudioContext::AutoLocker locker(context()); 208 208 209 209 if (channelCount > 0 && channelCount <= AudioContext::maxNumberOfChannels()) { … … 234 234 { 235 235 ASSERT(isMainThread()); 236 AudioContext::AutoLocker locker( *context());236 AudioContext::AutoLocker locker(context()); 237 237 238 238 ChannelCountMode oldMode = m_channelCountMode; … … 266 266 { 267 267 ASSERT(isMainThread()); 268 AudioContext::AutoLocker locker( *context());268 AudioContext::AutoLocker locker(context()); 269 269 270 270 if (interpretation == "speakers") … … 289 289 ScriptExecutionContext* AudioNode::scriptExecutionContext() const 290 290 { 291 return const_cast<AudioNode*>(this)->context() ->scriptExecutionContext();291 return const_cast<AudioNode*>(this)->context().scriptExecutionContext(); 292 292 } 293 293 294 294 void AudioNode::processIfNecessary(size_t framesToProcess) 295 295 { 296 ASSERT(context() ->isAudioThread());296 ASSERT(context().isAudioThread()); 297 297 298 298 if (!isInitialized()) … … 303 303 // The first time we're called during this time slice we process, but after that we don't want to re-process, 304 304 // instead our output(s) will already have the results cached in their bus; 305 double currentTime = context() ->currentTime();305 double currentTime = context().currentTime(); 306 306 if (m_lastProcessingTime != currentTime) { 307 307 m_lastProcessingTime = currentTime; // important to first update this time because of feedback loops in the rendering graph … … 311 311 bool silentInputs = inputsAreSilent(); 312 312 if (!silentInputs) 313 m_lastNonSilentTime = (context() ->currentSampleFrame() + framesToProcess) / static_cast<double>(m_sampleRate);313 m_lastNonSilentTime = (context().currentSampleFrame() + framesToProcess) / static_cast<double>(m_sampleRate); 314 314 315 315 if (silentInputs && propagatesSilence()) … … 324 324 void AudioNode::checkNumberOfChannelsForInput(AudioNodeInput* input) 325 325 { 326 ASSERT(context() ->isAudioThread() && context()->isGraphOwner());326 ASSERT(context().isAudioThread() && context().isGraphOwner()); 327 327 328 328 for (auto& savedInput : m_inputs) { … … 338 338 bool AudioNode::propagatesSilence() const 339 339 { 340 return m_lastNonSilentTime + latencyTime() + tailTime() < context() ->currentTime();340 return m_lastNonSilentTime + latencyTime() + tailTime() < context().currentTime(); 341 341 } 342 342 343 343 void AudioNode::pullInputs(size_t framesToProcess) 344 344 { 345 ASSERT(context() ->isAudioThread());345 ASSERT(context().isAudioThread()); 346 346 347 347 // Process all of the AudioNodes connected to our inputs. … … 375 375 if (m_isDisabled && m_connectionRefCount > 0) { 376 376 ASSERT(isMainThread()); 377 AudioContext::AutoLocker locker( *context());377 AudioContext::AutoLocker locker(context()); 378 378 379 379 m_isDisabled = false; … … 440 440 bool mustReleaseLock = false; 441 441 442 if (context() ->isAudioThread()) {442 if (context().isAudioThread()) { 443 443 // Real-time audio thread must not contend lock (to avoid glitches). 444 hasLock = context() ->tryLock(mustReleaseLock);444 hasLock = context().tryLock(mustReleaseLock); 445 445 } else { 446 context() ->lock(mustReleaseLock);446 context().lock(mustReleaseLock); 447 447 hasLock = true; 448 448 } … … 453 453 454 454 if (mustReleaseLock) 455 context() ->unlock();455 context().unlock(); 456 456 } else { 457 457 // We were unable to get the lock, so put this in a list to finish up later. 458 ASSERT(context() ->isAudioThread());458 ASSERT(context().isAudioThread()); 459 459 ASSERT(refType == RefTypeConnection); 460 context() ->addDeferredFinishDeref(this);460 context().addDeferredFinishDeref(this); 461 461 } 462 462 … … 464 464 // We can't call in AudioContext::~AudioContext() since it will never be called as long as any AudioNode is alive 465 465 // because AudioNodes keep a reference to the context. 466 if (context() ->isAudioThreadFinished())467 context() ->deleteMarkedNodes();466 if (context().isAudioThreadFinished()) 467 context().deleteMarkedNodes(); 468 468 } 469 469 470 470 void AudioNode::finishDeref(RefType refType) 471 471 { 472 ASSERT(context() ->isGraphOwner());472 ASSERT(context().isGraphOwner()); 473 473 474 474 switch (refType) { … … 497 497 498 498 // Mark for deletion at end of each render quantum or when context shuts down. 499 context() ->markForDeletion(this);499 context().markForDeletion(this); 500 500 m_isMarkedForDeletion = true; 501 501 } -
trunk/Source/WebCore/Modules/webaudio/AudioNode.h
r162777 r196603 54 54 enum { ProcessingSizeInFrames = 128 }; 55 55 56 AudioNode(AudioContext *, float sampleRate);56 AudioNode(AudioContext&, float sampleRate); 57 57 virtual ~AudioNode(); 58 58 59 AudioContext *context() { return m_context.get(); }60 const AudioContext *context() const { return m_context.get(); }59 AudioContext& context() { return m_context.get(); } 60 const AudioContext& context() const { return m_context.get(); } 61 61 62 62 enum NodeType { … … 199 199 volatile bool m_isInitialized; 200 200 NodeType m_nodeType; 201 Ref Ptr<AudioContext> m_context;201 Ref<AudioContext> m_context; 202 202 float m_sampleRate; 203 203 Vector<std::unique_ptr<AudioNodeInput>> m_inputs; -
trunk/Source/WebCore/Modules/webaudio/AudioNodeInput.cpp
r185316 r196603 46 46 void AudioNodeInput::connect(AudioNodeOutput* output) 47 47 { 48 ASSERT(context() ->isGraphOwner());48 ASSERT(context().isGraphOwner()); 49 49 50 50 ASSERT(output && node()); … … 65 65 void AudioNodeInput::disconnect(AudioNodeOutput* output) 66 66 { 67 ASSERT(context() ->isGraphOwner());67 ASSERT(context().isGraphOwner()); 68 68 69 69 ASSERT(output && node()); … … 91 91 void AudioNodeInput::disable(AudioNodeOutput* output) 92 92 { 93 ASSERT(context() ->isGraphOwner());93 ASSERT(context().isGraphOwner()); 94 94 95 95 ASSERT(output && node()); … … 109 109 void AudioNodeInput::enable(AudioNodeOutput* output) 110 110 { 111 ASSERT(context() ->isGraphOwner());111 ASSERT(context().isGraphOwner()); 112 112 113 113 ASSERT(output && node()); … … 133 133 void AudioNodeInput::updateInternalBus() 134 134 { 135 ASSERT(context() ->isAudioThread() && context()->isGraphOwner());135 ASSERT(context().isAudioThread() && context().isGraphOwner()); 136 136 137 137 unsigned numberOfInputChannels = numberOfChannels(); … … 166 166 AudioBus* AudioNodeInput::bus() 167 167 { 168 ASSERT(context() ->isAudioThread());168 ASSERT(context().isAudioThread()); 169 169 170 170 // Handle single connection specially to allow for in-place processing. … … 178 178 AudioBus* AudioNodeInput::internalSummingBus() 179 179 { 180 ASSERT(context() ->isAudioThread());180 ASSERT(context().isAudioThread()); 181 181 182 182 return m_internalSummingBus.get(); … … 185 185 void AudioNodeInput::sumAllConnections(AudioBus* summingBus, size_t framesToProcess) 186 186 { 187 ASSERT(context() ->isAudioThread());187 ASSERT(context().isAudioThread()); 188 188 189 189 // We shouldn't be calling this method if there's only one connection, since it's less efficient. … … 211 211 AudioBus* AudioNodeInput::pull(AudioBus* inPlaceBus, size_t framesToProcess) 212 212 { 213 ASSERT(context() ->isAudioThread());213 ASSERT(context().isAudioThread()); 214 214 215 215 // Handle single connection case. -
trunk/Source/WebCore/Modules/webaudio/AudioNodeOutput.cpp
r185316 r196603 54 54 { 55 55 ASSERT(numberOfChannels <= AudioContext::maxNumberOfChannels()); 56 ASSERT(context() ->isGraphOwner());56 ASSERT(context().isGraphOwner()); 57 57 58 58 m_desiredNumberOfChannels = numberOfChannels; 59 59 60 if (context() ->isAudioThread()) {60 if (context().isAudioThread()) { 61 61 // If we're in the audio thread then we can take care of it right away (we should be at the very start or end of a rendering quantum). 62 62 updateNumberOfChannels(); 63 63 } else { 64 64 // Let the context take care of it in the audio thread in the pre and post render tasks. 65 context() ->markAudioNodeOutputDirty(this);65 context().markAudioNodeOutputDirty(this); 66 66 } 67 67 } … … 84 84 void AudioNodeOutput::updateNumberOfChannels() 85 85 { 86 ASSERT(context() ->isAudioThread() && context()->isGraphOwner());86 ASSERT(context().isAudioThread() && context().isGraphOwner()); 87 87 88 88 if (m_numberOfChannels != m_desiredNumberOfChannels) { … … 95 95 void AudioNodeOutput::propagateChannelCount() 96 96 { 97 ASSERT(context() ->isAudioThread() && context()->isGraphOwner());97 ASSERT(context().isAudioThread() && context().isGraphOwner()); 98 98 99 99 if (isChannelCountKnown()) { … … 108 108 AudioBus* AudioNodeOutput::pull(AudioBus* inPlaceBus, size_t framesToProcess) 109 109 { 110 ASSERT(context() ->isAudioThread());110 ASSERT(context().isAudioThread()); 111 111 ASSERT(m_renderingFanOutCount > 0 || m_renderingParamFanOutCount > 0); 112 112 … … 127 127 AudioBus* AudioNodeOutput::bus() const 128 128 { 129 ASSERT(const_cast<AudioNodeOutput*>(this)->context() ->isAudioThread());129 ASSERT(const_cast<AudioNodeOutput*>(this)->context().isAudioThread()); 130 130 return m_isInPlace ? m_inPlaceBus.get() : m_internalBus.get(); 131 131 } … … 133 133 unsigned AudioNodeOutput::fanOutCount() 134 134 { 135 ASSERT(context() ->isGraphOwner());135 ASSERT(context().isGraphOwner()); 136 136 return m_inputs.size(); 137 137 } … … 139 139 unsigned AudioNodeOutput::paramFanOutCount() 140 140 { 141 ASSERT(context() ->isGraphOwner());141 ASSERT(context().isGraphOwner()); 142 142 return m_params.size(); 143 143 } … … 155 155 void AudioNodeOutput::addInput(AudioNodeInput* input) 156 156 { 157 ASSERT(context() ->isGraphOwner());157 ASSERT(context().isGraphOwner()); 158 158 159 159 ASSERT(input); … … 166 166 void AudioNodeOutput::removeInput(AudioNodeInput* input) 167 167 { 168 ASSERT(context() ->isGraphOwner());168 ASSERT(context().isGraphOwner()); 169 169 170 170 ASSERT(input); … … 177 177 void AudioNodeOutput::disconnectAllInputs() 178 178 { 179 ASSERT(context() ->isGraphOwner());179 ASSERT(context().isGraphOwner()); 180 180 181 181 // AudioNodeInput::disconnect() changes m_inputs by calling removeInput(). … … 188 188 void AudioNodeOutput::addParam(AudioParam* param) 189 189 { 190 ASSERT(context() ->isGraphOwner());190 ASSERT(context().isGraphOwner()); 191 191 192 192 ASSERT(param); … … 199 199 void AudioNodeOutput::removeParam(AudioParam* param) 200 200 { 201 ASSERT(context() ->isGraphOwner());201 ASSERT(context().isGraphOwner()); 202 202 203 203 ASSERT(param); … … 210 210 void AudioNodeOutput::disconnectAllParams() 211 211 { 212 ASSERT(context() ->isGraphOwner());212 ASSERT(context().isGraphOwner()); 213 213 214 214 // AudioParam::disconnect() changes m_params by calling removeParam(). … … 227 227 void AudioNodeOutput::disable() 228 228 { 229 ASSERT(context() ->isGraphOwner());229 ASSERT(context().isGraphOwner()); 230 230 231 231 if (m_isEnabled) { … … 238 238 void AudioNodeOutput::enable() 239 239 { 240 ASSERT(context() ->isGraphOwner());240 ASSERT(context().isGraphOwner()); 241 241 242 242 if (!m_isEnabled) { -
trunk/Source/WebCore/Modules/webaudio/AudioNodeOutput.h
r157653 r196603 48 48 // Can be called from any thread. 49 49 AudioNode* node() const { return m_node; } 50 AudioContext *context() { return m_node->context(); }50 AudioContext& context() { return m_node->context(); } 51 51 52 52 // Causes our AudioNode to process if it hasn't already for this render quantum. -
trunk/Source/WebCore/Modules/webaudio/AudioParam.cpp
r185316 r196603 44 44 { 45 45 // Update value for timeline. 46 if (context() && context()->isAudioThread()) {46 if (context().isAudioThread()) { 47 47 bool hasValue; 48 48 float timelineValue = m_timeline.valueForContextTime(context(), narrowPrecisionToFloat(m_value), hasValue); … … 73 73 // Smoothing effectively is performed by the timeline. 74 74 bool useTimelineValue = false; 75 if (context()) 76 m_value = m_timeline.valueForContextTime(context(), narrowPrecisionToFloat(m_value), useTimelineValue); 77 75 m_value = m_timeline.valueForContextTime(context(), narrowPrecisionToFloat(m_value), useTimelineValue); 76 78 77 if (m_smoothedValue == m_value) { 79 78 // Smoothed value has already approached and snapped to value. … … 104 103 void AudioParam::calculateSampleAccurateValues(float* values, unsigned numberOfValues) 105 104 { 106 bool isSafe = context() && context()->isAudioThread() && values && numberOfValues;105 bool isSafe = context().isAudioThread() && values && numberOfValues; 107 106 ASSERT(isSafe); 108 107 if (!isSafe) … … 114 113 void AudioParam::calculateFinalValues(float* values, unsigned numberOfValues, bool sampleAccurate) 115 114 { 116 bool isGood = context() && context()->isAudioThread() && values && numberOfValues;115 bool isGood = context().isAudioThread() && values && numberOfValues; 117 116 ASSERT(isGood); 118 117 if (!isGood) … … 155 154 // Calculate values for this render quantum. 156 155 // Normally numberOfValues will equal AudioNode::ProcessingSizeInFrames (the render quantum size). 157 double sampleRate = context() ->sampleRate();158 double startTime = context() ->currentTime();156 double sampleRate = context().sampleRate(); 157 double startTime = context().currentTime(); 159 158 double endTime = startTime + numberOfValues / sampleRate; 160 159 … … 166 165 void AudioParam::connect(AudioNodeOutput* output) 167 166 { 168 ASSERT(context() ->isGraphOwner());167 ASSERT(context().isGraphOwner()); 169 168 170 169 ASSERT(output); … … 181 180 void AudioParam::disconnect(AudioNodeOutput* output) 182 181 { 183 ASSERT(context() ->isGraphOwner());182 ASSERT(context().isGraphOwner()); 184 183 185 184 ASSERT(output); -
trunk/Source/WebCore/Modules/webaudio/AudioParam.h
r177733 r196603 48 48 static const double SnapThreshold; 49 49 50 static Ref<AudioParam> create(AudioContext *context, const String& name, double defaultValue, double minValue, double maxValue, unsigned units = 0)50 static Ref<AudioParam> create(AudioContext& context, const String& name, double defaultValue, double minValue, double maxValue, unsigned units = 0) 51 51 { 52 52 return adoptRef(*new AudioParam(context, name, defaultValue, minValue, maxValue, units)); … … 104 104 105 105 protected: 106 AudioParam(AudioContext *context, const String& name, double defaultValue, double minValue, double maxValue, unsigned units = 0)106 AudioParam(AudioContext& context, const String& name, double defaultValue, double minValue, double maxValue, unsigned units = 0) 107 107 : AudioSummingJunction(context) 108 108 , m_name(name) -
trunk/Source/WebCore/Modules/webaudio/AudioParamTimeline.cpp
r188642 r196603 114 114 } 115 115 116 float AudioParamTimeline::valueForContextTime(AudioContext* context, float defaultValue, bool& hasValue) 117 { 118 ASSERT(context); 119 116 float AudioParamTimeline::valueForContextTime(AudioContext& context, float defaultValue, bool& hasValue) 117 { 120 118 { 121 119 std::unique_lock<Lock> lock(m_eventsMutex, std::try_to_lock); 122 if (!lock.owns_lock() || ! context || !m_events.size() || context->currentTime() < m_events[0].time()) {120 if (!lock.owns_lock() || !m_events.size() || context.currentTime() < m_events[0].time()) { 123 121 hasValue = false; 124 122 return defaultValue; … … 128 126 // Ask for just a single value. 129 127 float value; 130 double sampleRate = context ->sampleRate();131 double startTime = context ->currentTime();128 double sampleRate = context.sampleRate(); 129 double startTime = context.currentTime(); 132 130 double endTime = startTime + 1.1 / sampleRate; // time just beyond one sample-frame 133 131 double controlRate = sampleRate / AudioNode::ProcessingSizeInFrames; // one parameter change per render quantum -
trunk/Source/WebCore/Modules/webaudio/AudioParamTimeline.h
r188642 r196603 54 54 // hasValue is set to true if a valid timeline value is returned. 55 55 // otherwise defaultValue is returned. 56 float valueForContextTime(AudioContext *, float defaultValue, bool& hasValue);56 float valueForContextTime(AudioContext&, float defaultValue, bool& hasValue); 57 57 58 58 // Given the time range, calculates parameter values into the values buffer -
trunk/Source/WebCore/Modules/webaudio/AudioScheduledSourceNode.cpp
r194496 r196603 44 44 const double AudioScheduledSourceNode::UnknownTime = -1; 45 45 46 AudioScheduledSourceNode::AudioScheduledSourceNode(AudioContext *context, float sampleRate)46 AudioScheduledSourceNode::AudioScheduledSourceNode(AudioContext& context, float sampleRate) 47 47 : AudioNode(context, sampleRate) 48 48 , m_playbackState(UNSCHEDULED_STATE) … … 72 72 // startFrame : Start frame for this source. 73 73 // endFrame : End frame for this source. 74 size_t quantumStartFrame = context() ->currentSampleFrame();74 size_t quantumStartFrame = context().currentSampleFrame(); 75 75 size_t quantumEndFrame = quantumStartFrame + quantumFrameSize; 76 76 size_t startFrame = AudioUtilities::timeToSampleFrame(m_startTime, sampleRate); … … 92 92 // Increment the active source count only if we're transitioning from SCHEDULED_STATE to PLAYING_STATE. 93 93 m_playbackState = PLAYING_STATE; 94 context() ->incrementActiveSourceCount();94 context().incrementActiveSourceCount(); 95 95 } 96 96 … … 147 147 ASSERT(isMainThread()); 148 148 149 context() ->nodeWillBeginPlayback();149 context().nodeWillBeginPlayback(); 150 150 151 151 if (m_playbackState != UNSCHEDULED_STATE) { … … 200 200 if (m_playbackState != FINISHED_STATE) { 201 201 // Let the context dereference this AudioNode. 202 context() ->notifyNodeFinishedProcessing(this);202 context().notifyNodeFinishedProcessing(this); 203 203 m_playbackState = FINISHED_STATE; 204 context() ->decrementActiveSourceCount();204 context().decrementActiveSourceCount(); 205 205 } 206 206 -
trunk/Source/WebCore/Modules/webaudio/AudioScheduledSourceNode.h
r189565 r196603 56 56 }; 57 57 58 AudioScheduledSourceNode(AudioContext *, float sampleRate);58 AudioScheduledSourceNode(AudioContext&, float sampleRate); 59 59 60 60 // Scheduling. -
trunk/Source/WebCore/Modules/webaudio/AudioSummingJunction.cpp
r185316 r196603 35 35 namespace WebCore { 36 36 37 AudioSummingJunction::AudioSummingJunction(AudioContext *context)37 AudioSummingJunction::AudioSummingJunction(AudioContext& context) 38 38 : m_context(context) 39 39 , m_renderingStateNeedUpdating(false) … … 43 43 AudioSummingJunction::~AudioSummingJunction() 44 44 { 45 if (m_renderingStateNeedUpdating && m_context.get())46 m_context->removeMarkedSummingJunction(this);45 if (m_renderingStateNeedUpdating) 46 context().removeMarkedSummingJunction(this); 47 47 } 48 48 49 49 void AudioSummingJunction::changedOutputs() 50 50 { 51 ASSERT(context() ->isGraphOwner());51 ASSERT(context().isGraphOwner()); 52 52 if (!m_renderingStateNeedUpdating && canUpdateState()) { 53 context() ->markSummingJunctionDirty(this);53 context().markSummingJunctionDirty(this); 54 54 m_renderingStateNeedUpdating = true; 55 55 } … … 58 58 void AudioSummingJunction::updateRenderingState() 59 59 { 60 ASSERT(context() ->isAudioThread() && context()->isGraphOwner());60 ASSERT(context().isAudioThread() && context().isGraphOwner()); 61 61 62 62 if (m_renderingStateNeedUpdating && canUpdateState()) { -
trunk/Source/WebCore/Modules/webaudio/AudioSummingJunction.h
r123945 r196603 39 39 class AudioSummingJunction { 40 40 public: 41 explicit AudioSummingJunction(AudioContext *);41 explicit AudioSummingJunction(AudioContext&); 42 42 virtual ~AudioSummingJunction(); 43 43 44 44 // Can be called from any thread. 45 AudioContext *context() { return m_context.get(); }45 AudioContext& context() { return m_context.get(); } 46 46 47 47 // This must be called whenever we modify m_outputs. … … 62 62 63 63 protected: 64 Ref Ptr<AudioContext> m_context;64 Ref<AudioContext> m_context; 65 65 66 66 // m_outputs contains the AudioNodeOutputs representing current connections which are not disabled. -
trunk/Source/WebCore/Modules/webaudio/BiquadFilterNode.cpp
r162368 r196603 33 33 namespace WebCore { 34 34 35 BiquadFilterNode::BiquadFilterNode(AudioContext *context, float sampleRate)35 BiquadFilterNode::BiquadFilterNode(AudioContext& context, float sampleRate) 36 36 : AudioBasicProcessorNode(context, sampleRate) 37 37 { -
trunk/Source/WebCore/Modules/webaudio/BiquadFilterNode.h
r177733 r196603 47 47 }; 48 48 49 static Ref<BiquadFilterNode> create(AudioContext *context, float sampleRate)49 static Ref<BiquadFilterNode> create(AudioContext& context, float sampleRate) 50 50 { 51 51 return adoptRef(*new BiquadFilterNode(context, sampleRate)); … … 68 68 69 69 private: 70 BiquadFilterNode(AudioContext *, float sampleRate);70 BiquadFilterNode(AudioContext&, float sampleRate); 71 71 72 72 BiquadProcessor* biquadProcessor() { return static_cast<BiquadProcessor*>(processor()); } -
trunk/Source/WebCore/Modules/webaudio/BiquadProcessor.cpp
r162368 r196603 33 33 namespace WebCore { 34 34 35 BiquadProcessor::BiquadProcessor(AudioContext *context, float sampleRate, size_t numberOfChannels, bool autoInitialize)35 BiquadProcessor::BiquadProcessor(AudioContext& context, float sampleRate, size_t numberOfChannels, bool autoInitialize) 36 36 : AudioDSPKernelProcessor(sampleRate, numberOfChannels) 37 37 , m_type(LowPass) -
trunk/Source/WebCore/Modules/webaudio/BiquadProcessor.h
r162368 r196603 51 51 }; 52 52 53 BiquadProcessor(AudioContext *, float sampleRate, size_t numberOfChannels, bool autoInitialize);53 BiquadProcessor(AudioContext&, float sampleRate, size_t numberOfChannels, bool autoInitialize); 54 54 55 55 virtual ~BiquadProcessor(); -
trunk/Source/WebCore/Modules/webaudio/ChannelMergerNode.cpp
r184940 r196603 41 41 namespace WebCore { 42 42 43 RefPtr<ChannelMergerNode> ChannelMergerNode::create(AudioContext *context, float sampleRate, unsigned numberOfInputs)43 RefPtr<ChannelMergerNode> ChannelMergerNode::create(AudioContext& context, float sampleRate, unsigned numberOfInputs) 44 44 { 45 45 if (!numberOfInputs || numberOfInputs > AudioContext::maxNumberOfChannels()) … … 49 49 } 50 50 51 ChannelMergerNode::ChannelMergerNode(AudioContext *context, float sampleRate, unsigned numberOfInputs)51 ChannelMergerNode::ChannelMergerNode(AudioContext& context, float sampleRate, unsigned numberOfInputs) 52 52 : AudioNode(context, sampleRate) 53 53 , m_desiredNumberOfOutputChannels(DefaultNumberOfOutputChannels) … … 105 105 void ChannelMergerNode::checkNumberOfChannelsForInput(AudioNodeInput* input) 106 106 { 107 ASSERT(context() ->isAudioThread() && context()->isGraphOwner());107 ASSERT(context().isAudioThread() && context().isGraphOwner()); 108 108 109 109 // Count how many channels we have all together from all of the inputs. -
trunk/Source/WebCore/Modules/webaudio/ChannelMergerNode.h
r184940 r196603 39 39 class ChannelMergerNode : public AudioNode { 40 40 public: 41 static RefPtr<ChannelMergerNode> create(AudioContext *, float sampleRate, unsigned numberOfInputs);41 static RefPtr<ChannelMergerNode> create(AudioContext&, float sampleRate, unsigned numberOfInputs); 42 42 43 43 // AudioNode … … 54 54 virtual double latencyTime() const override { return 0; } 55 55 56 ChannelMergerNode(AudioContext *, float sampleRate, unsigned numberOfInputs);56 ChannelMergerNode(AudioContext&, float sampleRate, unsigned numberOfInputs); 57 57 }; 58 58 -
trunk/Source/WebCore/Modules/webaudio/ChannelSplitterNode.cpp
r184940 r196603 35 35 namespace WebCore { 36 36 37 RefPtr<ChannelSplitterNode> ChannelSplitterNode::create(AudioContext *context, float sampleRate, unsigned numberOfOutputs)37 RefPtr<ChannelSplitterNode> ChannelSplitterNode::create(AudioContext& context, float sampleRate, unsigned numberOfOutputs) 38 38 { 39 39 if (!numberOfOutputs || numberOfOutputs > AudioContext::maxNumberOfChannels()) … … 43 43 } 44 44 45 ChannelSplitterNode::ChannelSplitterNode(AudioContext *context, float sampleRate, unsigned numberOfOutputs)45 ChannelSplitterNode::ChannelSplitterNode(AudioContext& context, float sampleRate, unsigned numberOfOutputs) 46 46 : AudioNode(context, sampleRate) 47 47 { -
trunk/Source/WebCore/Modules/webaudio/ChannelSplitterNode.h
r184940 r196603 35 35 class ChannelSplitterNode : public AudioNode { 36 36 public: 37 static RefPtr<ChannelSplitterNode> create(AudioContext *, float sampleRate, unsigned numberOfOutputs);37 static RefPtr<ChannelSplitterNode> create(AudioContext&, float sampleRate, unsigned numberOfOutputs); 38 38 39 39 // AudioNode … … 45 45 virtual double latencyTime() const override { return 0; } 46 46 47 ChannelSplitterNode(AudioContext *, float sampleRate, unsigned numberOfOutputs);47 ChannelSplitterNode(AudioContext&, float sampleRate, unsigned numberOfOutputs); 48 48 }; 49 49 -
trunk/Source/WebCore/Modules/webaudio/ConvolverNode.cpp
r194496 r196603 47 47 namespace WebCore { 48 48 49 ConvolverNode::ConvolverNode(AudioContext *context, float sampleRate)49 ConvolverNode::ConvolverNode(AudioContext& context, float sampleRate) 50 50 : AudioNode(context, sampleRate) 51 51 , m_normalize(true) … … 124 124 return; 125 125 126 if (buffer->sampleRate() != context() ->sampleRate()) {126 if (buffer->sampleRate() != context().sampleRate()) { 127 127 ec = NOT_SUPPORTED_ERR; 128 128 return; … … 147 147 148 148 // Create the reverb with the given impulse response. 149 bool useBackgroundThreads = !context() ->isOfflineContext();149 bool useBackgroundThreads = !context().isOfflineContext(); 150 150 auto reverb = std::make_unique<Reverb>(bufferBus.get(), AudioNode::ProcessingSizeInFrames, MaxFFTSize, 2, useBackgroundThreads, m_normalize); 151 151 -
trunk/Source/WebCore/Modules/webaudio/ConvolverNode.h
r191492 r196603 38 38 class ConvolverNode : public AudioNode { 39 39 public: 40 static Ref<ConvolverNode> create(AudioContext *context, float sampleRate)40 static Ref<ConvolverNode> create(AudioContext& context, float sampleRate) 41 41 { 42 42 return adoptRef(*new ConvolverNode(context, sampleRate)); … … 59 59 60 60 private: 61 ConvolverNode(AudioContext *, float sampleRate);61 ConvolverNode(AudioContext&, float sampleRate); 62 62 63 63 virtual double tailTime() const override; -
trunk/Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.cpp
r196602 r196603 39 39 namespace WebCore { 40 40 41 DefaultAudioDestinationNode::DefaultAudioDestinationNode(AudioContext *context)41 DefaultAudioDestinationNode::DefaultAudioDestinationNode(AudioContext& context) 42 42 : AudioDestinationNode(context, AudioDestination::hardwareSampleRate()) 43 43 , m_numberOfInputChannels(0) … … 112 112 if (isInitialized()) 113 113 m_destination->start(); 114 if (auto scriptExecutionContext = context() ->scriptExecutionContext())114 if (auto scriptExecutionContext = context().scriptExecutionContext()) 115 115 scriptExecutionContext->postTask(function); 116 116 } … … 121 121 if (isInitialized()) 122 122 m_destination->stop(); 123 if (auto scriptExecutionContext = context() ->scriptExecutionContext())123 if (auto scriptExecutionContext = context().scriptExecutionContext()) 124 124 scriptExecutionContext->postTask(function); 125 125 } … … 129 129 ASSERT(isInitialized()); 130 130 uninitialize(); 131 if (auto scriptExecutionContext = context() ->scriptExecutionContext())131 if (auto scriptExecutionContext = context().scriptExecutionContext()) 132 132 scriptExecutionContext->postTask(function); 133 133 } -
trunk/Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.h
r184940 r196603 36 36 class DefaultAudioDestinationNode : public AudioDestinationNode { 37 37 public: 38 static Ref<DefaultAudioDestinationNode> create(AudioContext *context)38 static Ref<DefaultAudioDestinationNode> create(AudioContext& context) 39 39 { 40 40 return adoptRef(*new DefaultAudioDestinationNode(context)); … … 58 58 59 59 private: 60 explicit DefaultAudioDestinationNode(AudioContext *);60 explicit DefaultAudioDestinationNode(AudioContext&); 61 61 void createDestination(); 62 62 -
trunk/Source/WebCore/Modules/webaudio/DelayNode.cpp
r162368 r196603 33 33 const double maximumAllowedDelayTime = 180; 34 34 35 DelayNode::DelayNode(AudioContext *context, float sampleRate, double maxDelayTime, ExceptionCode& ec)35 DelayNode::DelayNode(AudioContext& context, float sampleRate, double maxDelayTime, ExceptionCode& ec) 36 36 : AudioBasicProcessorNode(context, sampleRate) 37 37 { -
trunk/Source/WebCore/Modules/webaudio/DelayNode.h
r177733 r196603 37 37 class DelayNode : public AudioBasicProcessorNode { 38 38 public: 39 static Ref<DelayNode> create(AudioContext *context, float sampleRate, double maxDelayTime, ExceptionCode& ec)39 static Ref<DelayNode> create(AudioContext& context, float sampleRate, double maxDelayTime, ExceptionCode& ec) 40 40 { 41 41 return adoptRef(*new DelayNode(context, sampleRate, maxDelayTime, ec)); … … 45 45 46 46 private: 47 DelayNode(AudioContext *, float sampleRate, double maxDelayTime, ExceptionCode&);47 DelayNode(AudioContext&, float sampleRate, double maxDelayTime, ExceptionCode&); 48 48 49 49 DelayProcessor* delayProcessor() { return static_cast<DelayProcessor*>(processor()); } -
trunk/Source/WebCore/Modules/webaudio/DelayProcessor.cpp
r162368 r196603 33 33 namespace WebCore { 34 34 35 DelayProcessor::DelayProcessor(AudioContext *context, float sampleRate, unsigned numberOfChannels, double maxDelayTime)35 DelayProcessor::DelayProcessor(AudioContext& context, float sampleRate, unsigned numberOfChannels, double maxDelayTime) 36 36 : AudioDSPKernelProcessor(sampleRate, numberOfChannels) 37 37 , m_maxDelayTime(maxDelayTime) -
trunk/Source/WebCore/Modules/webaudio/DelayProcessor.h
r162368 r196603 37 37 class DelayProcessor : public AudioDSPKernelProcessor { 38 38 public: 39 DelayProcessor(AudioContext *, float sampleRate, unsigned numberOfChannels, double maxDelayTime);39 DelayProcessor(AudioContext&, float sampleRate, unsigned numberOfChannels, double maxDelayTime); 40 40 virtual ~DelayProcessor(); 41 41 -
trunk/Source/WebCore/Modules/webaudio/DynamicsCompressorNode.cpp
r162376 r196603 39 39 namespace WebCore { 40 40 41 DynamicsCompressorNode::DynamicsCompressorNode(AudioContext *context, float sampleRate)41 DynamicsCompressorNode::DynamicsCompressorNode(AudioContext& context, float sampleRate) 42 42 : AudioNode(context, sampleRate) 43 43 { -
trunk/Source/WebCore/Modules/webaudio/DynamicsCompressorNode.h
r177733 r196603 36 36 class DynamicsCompressorNode : public AudioNode { 37 37 public: 38 static Ref<DynamicsCompressorNode> create(AudioContext *context, float sampleRate)38 static Ref<DynamicsCompressorNode> create(AudioContext& context, float sampleRate) 39 39 { 40 40 return adoptRef(*new DynamicsCompressorNode(context, sampleRate)); … … 63 63 virtual double latencyTime() const override; 64 64 65 DynamicsCompressorNode(AudioContext *, float sampleRate);65 DynamicsCompressorNode(AudioContext&, float sampleRate); 66 66 67 67 std::unique_ptr<DynamicsCompressor> m_dynamicsCompressor; -
trunk/Source/WebCore/Modules/webaudio/GainNode.cpp
r162368 r196603 35 35 namespace WebCore { 36 36 37 GainNode::GainNode(AudioContext *context, float sampleRate)37 GainNode::GainNode(AudioContext& context, float sampleRate) 38 38 : AudioNode(context, sampleRate) 39 39 , m_lastGain(1.0) … … 92 92 void GainNode::checkNumberOfChannelsForInput(AudioNodeInput* input) 93 93 { 94 ASSERT(context() ->isAudioThread() && context()->isGraphOwner());94 ASSERT(context().isAudioThread() && context().isGraphOwner()); 95 95 96 96 ASSERT(input && input == this->input(0)); -
trunk/Source/WebCore/Modules/webaudio/GainNode.h
r184940 r196603 40 40 class GainNode : public AudioNode { 41 41 public: 42 static Ref<GainNode> create(AudioContext *context, float sampleRate)42 static Ref<GainNode> create(AudioContext& context, float sampleRate) 43 43 { 44 44 return adoptRef(*new GainNode(context, sampleRate)); … … 59 59 virtual double latencyTime() const override { return 0; } 60 60 61 GainNode(AudioContext *, float sampleRate);61 GainNode(AudioContext&, float sampleRate); 62 62 63 63 float m_lastGain; // for de-zippering -
trunk/Source/WebCore/Modules/webaudio/MediaElementAudioSourceNode.cpp
r188642 r196603 41 41 namespace WebCore { 42 42 43 Ref<MediaElementAudioSourceNode> MediaElementAudioSourceNode::create(AudioContext *context, HTMLMediaElement* mediaElement)43 Ref<MediaElementAudioSourceNode> MediaElementAudioSourceNode::create(AudioContext& context, HTMLMediaElement* mediaElement) 44 44 { 45 45 return adoptRef(*new MediaElementAudioSourceNode(context, mediaElement)); 46 46 } 47 47 48 MediaElementAudioSourceNode::MediaElementAudioSourceNode(AudioContext *context, HTMLMediaElement* mediaElement)49 : AudioNode(context, context ->sampleRate())48 MediaElementAudioSourceNode::MediaElementAudioSourceNode(AudioContext& context, HTMLMediaElement* mediaElement) 49 : AudioNode(context, context.sampleRate()) 50 50 , m_mediaElement(mediaElement) 51 51 , m_sourceNumberOfChannels(0) … … 93 93 { 94 94 // The context must be locked when changing the number of output channels. 95 AudioContext::AutoLocker contextLocker( *context());95 AudioContext::AutoLocker contextLocker(context()); 96 96 97 97 // Do any necesssary re-configuration to the output's number of channels. -
trunk/Source/WebCore/Modules/webaudio/MediaElementAudioSourceNode.h
r188642 r196603 42 42 class MediaElementAudioSourceNode : public AudioNode, public AudioSourceProviderClient { 43 43 public: 44 static Ref<MediaElementAudioSourceNode> create(AudioContext *, HTMLMediaElement*);44 static Ref<MediaElementAudioSourceNode> create(AudioContext&, HTMLMediaElement*); 45 45 46 46 virtual ~MediaElementAudioSourceNode(); … … 59 59 60 60 private: 61 MediaElementAudioSourceNode(AudioContext *, HTMLMediaElement*);61 MediaElementAudioSourceNode(AudioContext&, HTMLMediaElement*); 62 62 63 63 virtual double tailTime() const override { return 0; } -
trunk/Source/WebCore/Modules/webaudio/MediaStreamAudioDestinationNode.cpp
r194496 r196603 38 38 namespace WebCore { 39 39 40 Ref<MediaStreamAudioDestinationNode> MediaStreamAudioDestinationNode::create(AudioContext *context, size_t numberOfChannels)40 Ref<MediaStreamAudioDestinationNode> MediaStreamAudioDestinationNode::create(AudioContext& context, size_t numberOfChannels) 41 41 { 42 42 return adoptRef(*new MediaStreamAudioDestinationNode(context, numberOfChannels)); 43 43 } 44 44 45 MediaStreamAudioDestinationNode::MediaStreamAudioDestinationNode(AudioContext *context, size_t numberOfChannels)46 : AudioBasicInspectorNode(context, context ->sampleRate(), numberOfChannels)45 MediaStreamAudioDestinationNode::MediaStreamAudioDestinationNode(AudioContext& context, size_t numberOfChannels) 46 : AudioBasicInspectorNode(context, context.sampleRate(), numberOfChannels) 47 47 , m_mixBus(AudioBus::create(numberOfChannels, ProcessingSizeInFrames)) 48 48 { … … 51 51 m_source = MediaStreamAudioSource::create(); 52 52 Vector<RefPtr<RealtimeMediaSource>> audioSources(1, m_source); 53 m_stream = MediaStream::create(*context ->scriptExecutionContext(), MediaStreamPrivate::create(WTFMove(audioSources), Vector<RefPtr<RealtimeMediaSource>>()));53 m_stream = MediaStream::create(*context.scriptExecutionContext(), MediaStreamPrivate::create(WTFMove(audioSources), Vector<RefPtr<RealtimeMediaSource>>())); 54 54 55 m_source->setAudioFormat(numberOfChannels, context ->sampleRate());55 m_source->setAudioFormat(numberOfChannels, context.sampleRate()); 56 56 57 57 initialize(); -
trunk/Source/WebCore/Modules/webaudio/MediaStreamAudioDestinationNode.h
r184940 r196603 40 40 class MediaStreamAudioDestinationNode : public AudioBasicInspectorNode { 41 41 public: 42 static Ref<MediaStreamAudioDestinationNode> create(AudioContext *, size_t numberOfChannels);42 static Ref<MediaStreamAudioDestinationNode> create(AudioContext&, size_t numberOfChannels); 43 43 44 44 virtual ~MediaStreamAudioDestinationNode(); … … 53 53 54 54 private: 55 MediaStreamAudioDestinationNode(AudioContext *, size_t numberOfChannels);55 MediaStreamAudioDestinationNode(AudioContext&, size_t numberOfChannels); 56 56 57 57 virtual double tailTime() const override { return 0; } -
trunk/Source/WebCore/Modules/webaudio/MediaStreamAudioSourceNode.cpp
r191721 r196603 42 42 43 43 MediaStreamAudioSourceNode::MediaStreamAudioSourceNode(AudioContext& context, MediaStream& mediaStream, MediaStreamTrack& audioTrack) 44 : AudioNode( &context, context.sampleRate())44 : AudioNode(context, context.sampleRate()) 45 45 , m_mediaStream(mediaStream) 46 46 , m_audioTrack(audioTrack) … … 98 98 { 99 99 // The context must be locked when changing the number of output channels. 100 AudioContext::AutoLocker contextLocker( *context());100 AudioContext::AutoLocker contextLocker(context()); 101 101 102 102 // Do any necesssary re-configuration to the output's number of channels. -
trunk/Source/WebCore/Modules/webaudio/OfflineAudioDestinationNode.cpp
r188772 r196603 39 39 const size_t renderQuantumSize = 128; 40 40 41 OfflineAudioDestinationNode::OfflineAudioDestinationNode(AudioContext *context, AudioBuffer* renderTarget)41 OfflineAudioDestinationNode::OfflineAudioDestinationNode(AudioContext& context, AudioBuffer* renderTarget) 42 42 : AudioDestinationNode(context, renderTarget->sampleRate()) 43 43 , m_renderTarget(renderTarget) … … 103 103 return; 104 104 105 bool isAudioContextInitialized = context() ->isInitialized();105 bool isAudioContextInitialized = context().isInitialized(); 106 106 ASSERT(isAudioContextInitialized); 107 107 if (!isAudioContextInitialized) … … 149 149 void OfflineAudioDestinationNode::notifyComplete() 150 150 { 151 context() ->fireCompletionEvent();151 context().fireCompletionEvent(); 152 152 } 153 153 -
trunk/Source/WebCore/Modules/webaudio/OfflineAudioDestinationNode.h
r188772 r196603 39 39 class OfflineAudioDestinationNode : public AudioDestinationNode { 40 40 public: 41 static Ref<OfflineAudioDestinationNode> create(AudioContext *context, AudioBuffer* renderTarget)41 static Ref<OfflineAudioDestinationNode> create(AudioContext& context, AudioBuffer* renderTarget) 42 42 { 43 43 return adoptRef(*new OfflineAudioDestinationNode(context, renderTarget)); … … 57 57 58 58 private: 59 OfflineAudioDestinationNode(AudioContext *, AudioBuffer* renderTarget);59 OfflineAudioDestinationNode(AudioContext&, AudioBuffer* renderTarget); 60 60 61 61 // This AudioNode renders into this AudioBuffer. -
trunk/Source/WebCore/Modules/webaudio/OscillatorNode.cpp
r188642 r196603 47 47 PeriodicWave* OscillatorNode::s_periodicWaveTriangle = nullptr; 48 48 49 Ref<OscillatorNode> OscillatorNode::create(AudioContext *context, float sampleRate)49 Ref<OscillatorNode> OscillatorNode::create(AudioContext& context, float sampleRate) 50 50 { 51 51 return adoptRef(*new OscillatorNode(context, sampleRate)); 52 52 } 53 53 54 OscillatorNode::OscillatorNode(AudioContext *context, float sampleRate)54 OscillatorNode::OscillatorNode(AudioContext& context, float sampleRate) 55 55 : AudioScheduledSourceNode(context, sampleRate) 56 56 , m_type(SINE) -
trunk/Source/WebCore/Modules/webaudio/OscillatorNode.h
r188642 r196603 52 52 }; 53 53 54 static Ref<OscillatorNode> create(AudioContext *, float sampleRate);54 static Ref<OscillatorNode> create(AudioContext&, float sampleRate); 55 55 56 56 virtual ~OscillatorNode(); … … 71 71 72 72 private: 73 OscillatorNode(AudioContext *, float sampleRate);73 OscillatorNode(AudioContext&, float sampleRate); 74 74 75 75 virtual double tailTime() const override { return 0; } -
trunk/Source/WebCore/Modules/webaudio/PannerNode.cpp
r192281 r196603 47 47 } 48 48 49 PannerNode::PannerNode(AudioContext *context, float sampleRate)49 PannerNode::PannerNode(AudioContext& context, float sampleRate) 50 50 : AudioNode(context, sampleRate) 51 51 , m_panningModel(Panner::PanningModelHRTF) … … 54 54 { 55 55 // Load the HRTF database asynchronously so we don't block the Javascript thread while creating the HRTF database. 56 m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(context ->sampleRate());56 m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(context.sampleRate()); 57 57 58 58 addInput(std::make_unique<AudioNodeInput>(this)); … … 85 85 // We override pullInputs(), so we can detect new AudioSourceNodes which have connected to us when new connections are made. 86 86 // These AudioSourceNodes need to be made aware of our existence in order to handle doppler shift pitch changes. 87 if (m_connectionCount != context() ->connectionCount()) {88 m_connectionCount = context() ->connectionCount();87 if (m_connectionCount != context().connectionCount()) { 88 m_connectionCount = context().connectionCount(); 89 89 90 90 // Recursively go through all nodes connected to us. … … 113 113 // HRTFDatabase should be loaded before proceeding for offline audio context when panningModel() is "HRTF". 114 114 if (panningModel() == "HRTF" && !m_hrtfDatabaseLoader->isLoaded()) { 115 if (context() ->isOfflineContext())115 if (context().isOfflineContext()) 116 116 m_hrtfDatabaseLoader->waitForLoaderThreadCompletion(); 117 117 else { … … 174 174 AudioListener* PannerNode::listener() 175 175 { 176 return context() ->listener();176 return context().listener(); 177 177 } 178 178 … … 219 219 case SOUNDFIELD: 220 220 // FIXME: Implement sound field model. See // https://bugs.webkit.org/show_bug.cgi?id=77367. 221 context() ->scriptExecutionContext()->addConsoleMessage(MessageSource::JS, MessageLevel::Warning, ASCIILiteral("'soundfield' panning model not implemented."));221 context().scriptExecutionContext()->addConsoleMessage(MessageSource::JS, MessageLevel::Warning, ASCIILiteral("'soundfield' panning model not implemented.")); 222 222 break; 223 223 default: -
trunk/Source/WebCore/Modules/webaudio/PannerNode.h
r192281 r196603 65 65 }; 66 66 67 static Ref<PannerNode> create(AudioContext *context, float sampleRate)67 static Ref<PannerNode> create(AudioContext& context, float sampleRate) 68 68 { 69 69 return adoptRef(*new PannerNode(context, sampleRate)); … … 134 134 135 135 private: 136 PannerNode(AudioContext *, float sampleRate);136 PannerNode(AudioContext&, float sampleRate); 137 137 138 138 // Returns the combined distance and cone gain attenuation. -
trunk/Source/WebCore/Modules/webaudio/ScriptProcessorNode.cpp
r194496 r196603 41 41 namespace WebCore { 42 42 43 RefPtr<ScriptProcessorNode> ScriptProcessorNode::create(AudioContext *context, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels)43 RefPtr<ScriptProcessorNode> ScriptProcessorNode::create(AudioContext& context, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels) 44 44 { 45 45 // Check for valid buffer size. … … 69 69 } 70 70 71 ScriptProcessorNode::ScriptProcessorNode(AudioContext *context, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels)71 ScriptProcessorNode::ScriptProcessorNode(AudioContext& context, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels) 72 72 : AudioNode(context, sampleRate) 73 73 , m_doubleBufferIndex(0) … … 105 105 return; 106 106 107 float sampleRate = context() ->sampleRate();107 float sampleRate = context().sampleRate(); 108 108 109 109 // Create double buffers on both the input and output sides. … … 240 240 241 241 // Avoid firing the event if the document has already gone away. 242 if (context() ->scriptExecutionContext()) {242 if (context().scriptExecutionContext()) { 243 243 // Let the audio thread know we've gotten to the point where it's OK for it to make another request. 244 244 m_isRequestOutstanding = false; … … 246 246 // Calculate playbackTime with the buffersize which needs to be processed each time when onaudioprocess is called. 247 247 // The outputBuffer being passed to JS will be played after exhausting previous outputBuffer by double-buffering. 248 double playbackTime = (context() ->currentSampleFrame() + m_bufferSize) / static_cast<double>(context()->sampleRate());248 double playbackTime = (context().currentSampleFrame() + m_bufferSize) / static_cast<double>(context().sampleRate()); 249 249 250 250 // Call the JavaScript event handler which will do the audio processing. -
trunk/Source/WebCore/Modules/webaudio/ScriptProcessorNode.h
r189565 r196603 54 54 // Lower numbers for bufferSize will result in a lower (better) latency. Higher numbers will be necessary to avoid audio breakup and glitches. 55 55 // The value chosen must carefully balance between latency and audio quality. 56 static RefPtr<ScriptProcessorNode> create(AudioContext *, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels);56 static RefPtr<ScriptProcessorNode> create(AudioContext&, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels); 57 57 58 58 virtual ~ScriptProcessorNode(); … … 70 70 virtual double latencyTime() const override; 71 71 72 ScriptProcessorNode(AudioContext *, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels);72 ScriptProcessorNode(AudioContext&, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels); 73 73 74 74 void fireProcessEvent(); -
trunk/Source/WebCore/Modules/webaudio/WaveShaperNode.cpp
r162368 r196603 34 34 namespace WebCore { 35 35 36 WaveShaperNode::WaveShaperNode(AudioContext *context)37 : AudioBasicProcessorNode(context, context ->sampleRate())36 WaveShaperNode::WaveShaperNode(AudioContext& context) 37 : AudioBasicProcessorNode(context, context.sampleRate()) 38 38 { 39 m_processor = std::make_unique<WaveShaperProcessor>(context ->sampleRate(), 1);39 m_processor = std::make_unique<WaveShaperProcessor>(context.sampleRate(), 1); 40 40 setNodeType(NodeTypeWaveShaper); 41 41 … … 59 59 60 60 // Synchronize with any graph changes or changes to channel configuration. 61 AudioContext::AutoLocker contextLocker( *context());61 AudioContext::AutoLocker contextLocker(context()); 62 62 63 63 if (type == "none") -
trunk/Source/WebCore/Modules/webaudio/WaveShaperNode.h
r184940 r196603 35 35 class WaveShaperNode : public AudioBasicProcessorNode { 36 36 public: 37 static Ref<WaveShaperNode> create(AudioContext *context)37 static Ref<WaveShaperNode> create(AudioContext& context) 38 38 { 39 39 return adoptRef(*new WaveShaperNode(context)); … … 50 50 51 51 private: 52 explicit WaveShaperNode(AudioContext *);52 explicit WaveShaperNode(AudioContext&); 53 53 54 54 WaveShaperProcessor* waveShaperProcessor() { return static_cast<WaveShaperProcessor*>(processor()); }
Note: See TracChangeset
for help on using the changeset viewer.