Changeset 162368 in webkit


Ignore:
Timestamp:
Jan 20, 2014 12:36:11 PM (10 years ago)
Author:
zandobersek@gmail.com
Message:

Move WebAudio source code to std::unique_ptr
https://bugs.webkit.org/show_bug.cgi?id=127274

Reviewed by Eric Carlson.

Move from using OwnPtr and PassOwnPtr to using std::unique_ptr and move semantics
in the WebAudio module and the WebAudio code in the platform layer.

  • Modules/webaudio/AsyncAudioDecoder.cpp:
  • Modules/webaudio/AsyncAudioDecoder.h:
  • Modules/webaudio/AudioBasicInspectorNode.cpp:

(WebCore::AudioBasicInspectorNode::AudioBasicInspectorNode):

  • Modules/webaudio/AudioBasicProcessorNode.cpp:

(WebCore::AudioBasicProcessorNode::AudioBasicProcessorNode):

  • Modules/webaudio/AudioBasicProcessorNode.h:
  • Modules/webaudio/AudioBufferSourceNode.cpp:

(WebCore::AudioBufferSourceNode::AudioBufferSourceNode):

  • Modules/webaudio/AudioContext.cpp:
  • Modules/webaudio/AudioContext.h:
  • Modules/webaudio/AudioDestinationNode.cpp:

(WebCore::AudioDestinationNode::AudioDestinationNode):

  • Modules/webaudio/AudioNode.cpp:

(WebCore::AudioNode::addInput):
(WebCore::AudioNode::addOutput):
(WebCore::AudioNode::checkNumberOfChannelsForInput):

  • Modules/webaudio/AudioNode.h:
  • Modules/webaudio/BiquadFilterNode.cpp:

(WebCore::BiquadFilterNode::BiquadFilterNode):

  • Modules/webaudio/BiquadProcessor.cpp:

(WebCore::BiquadProcessor::createKernel):
(WebCore::BiquadProcessor::getFrequencyResponse):

  • Modules/webaudio/BiquadProcessor.h:
  • Modules/webaudio/ChannelMergerNode.cpp:

(WebCore::ChannelMergerNode::ChannelMergerNode):

  • Modules/webaudio/ChannelSplitterNode.cpp:

(WebCore::ChannelSplitterNode::ChannelSplitterNode):

  • Modules/webaudio/ConvolverNode.cpp:

(WebCore::ConvolverNode::ConvolverNode):
(WebCore::ConvolverNode::uninitialize):
(WebCore::ConvolverNode::setBuffer):

  • Modules/webaudio/ConvolverNode.h:
  • Modules/webaudio/DefaultAudioDestinationNode.h:
  • Modules/webaudio/DelayNode.cpp:

(WebCore::DelayNode::DelayNode):

  • Modules/webaudio/DelayProcessor.cpp:

(WebCore::DelayProcessor::createKernel):

  • Modules/webaudio/DelayProcessor.h:
  • Modules/webaudio/DynamicsCompressorNode.cpp:

(WebCore::DynamicsCompressorNode::DynamicsCompressorNode):
(WebCore::DynamicsCompressorNode::initialize):
(WebCore::DynamicsCompressorNode::uninitialize):

  • Modules/webaudio/DynamicsCompressorNode.h:
  • Modules/webaudio/GainNode.cpp:

(WebCore::GainNode::GainNode):

  • Modules/webaudio/MediaElementAudioSourceNode.cpp:

(WebCore::MediaElementAudioSourceNode::MediaElementAudioSourceNode):
(WebCore::MediaElementAudioSourceNode::setFormat):

  • Modules/webaudio/MediaElementAudioSourceNode.h:
  • Modules/webaudio/MediaStreamAudioDestinationNode.h:
  • Modules/webaudio/MediaStreamAudioSource.cpp:
  • Modules/webaudio/MediaStreamAudioSourceNode.cpp:

(WebCore::MediaStreamAudioSourceNode::MediaStreamAudioSourceNode):

  • Modules/webaudio/MediaStreamAudioSourceNode.h:
  • Modules/webaudio/OscillatorNode.cpp:

(WebCore::OscillatorNode::OscillatorNode):

  • Modules/webaudio/PannerNode.cpp:

(WebCore::PannerNode::PannerNode):
(WebCore::PannerNode::uninitialize):
(WebCore::PannerNode::setPanningModel):

  • Modules/webaudio/PannerNode.h:
  • Modules/webaudio/PeriodicWave.cpp:

(WebCore::PeriodicWave::createBandLimitedTables):

  • Modules/webaudio/PeriodicWave.h:
  • Modules/webaudio/RealtimeAnalyser.cpp:

(WebCore::RealtimeAnalyser::RealtimeAnalyser):
(WebCore::RealtimeAnalyser::setFftSize):

  • Modules/webaudio/RealtimeAnalyser.h:
  • Modules/webaudio/ScriptProcessorNode.cpp:

(WebCore::ScriptProcessorNode::ScriptProcessorNode):

  • Modules/webaudio/WaveShaperDSPKernel.cpp:

(WebCore::WaveShaperDSPKernel::lazyInitializeOversampling):

  • Modules/webaudio/WaveShaperDSPKernel.h:
  • Modules/webaudio/WaveShaperNode.cpp:

(WebCore::WaveShaperNode::WaveShaperNode):

  • Modules/webaudio/WaveShaperProcessor.cpp:

(WebCore::WaveShaperProcessor::createKernel):

  • Modules/webaudio/WaveShaperProcessor.h:
  • platform/audio/AudioBus.cpp:

(WebCore::AudioBus::AudioBus):
(WebCore::AudioBus::copyWithGainFrom):

  • platform/audio/AudioBus.h:
  • platform/audio/AudioChannel.cpp:
  • platform/audio/AudioChannel.h:

(WebCore::AudioChannel::AudioChannel):
(WebCore::AudioChannel::set):

  • platform/audio/AudioDSPKernelProcessor.h:
  • platform/audio/AudioDestination.h:
  • platform/audio/AudioResampler.cpp:

(WebCore::AudioResampler::AudioResampler):
(WebCore::AudioResampler::configureChannels):

  • platform/audio/AudioResampler.h:
  • platform/audio/AudioSession.h:
  • platform/audio/DynamicsCompressor.cpp:

(WebCore::DynamicsCompressor::setNumberOfChannels):

  • platform/audio/DynamicsCompressor.h:
  • platform/audio/DynamicsCompressorKernel.cpp:

(WebCore::DynamicsCompressorKernel::setNumberOfChannels):

  • platform/audio/DynamicsCompressorKernel.h:
  • platform/audio/FFTFrame.cpp:

(WebCore::FFTFrame::createInterpolatedFrame):

  • platform/audio/FFTFrame.h:
  • platform/audio/HRTFDatabase.cpp:

(WebCore::HRTFDatabase::HRTFDatabase):

  • platform/audio/HRTFDatabase.h:
  • platform/audio/HRTFDatabaseLoader.cpp:

(WebCore::HRTFDatabaseLoader::~HRTFDatabaseLoader):
(WebCore::HRTFDatabaseLoader::load):

  • platform/audio/HRTFDatabaseLoader.h:
  • platform/audio/HRTFElevation.cpp:

(WebCore::HRTFElevation::createForSubject):
(WebCore::HRTFElevation::createByInterpolatingSlices):

  • platform/audio/HRTFElevation.h:

(WebCore::HRTFElevation::HRTFElevation):

  • platform/audio/HRTFKernel.cpp:

(WebCore::HRTFKernel::HRTFKernel):
(WebCore::HRTFKernel::createImpulseResponse):
(WebCore::HRTFKernel::createInterpolatedKernel):

  • platform/audio/HRTFKernel.h:

(WebCore::HRTFKernel::create):
(WebCore::HRTFKernel::HRTFKernel):

  • platform/audio/MultiChannelResampler.cpp:

(WebCore::MultiChannelResampler::MultiChannelResampler):

  • platform/audio/MultiChannelResampler.h:
  • platform/audio/Panner.cpp:

(WebCore::Panner::create):

  • platform/audio/Panner.h:
  • platform/audio/Reverb.cpp:

(WebCore::Reverb::initialize):

  • platform/audio/Reverb.h:
  • platform/audio/ReverbConvolver.h:
  • platform/audio/ReverbConvolverStage.cpp:

(WebCore::ReverbConvolverStage::ReverbConvolverStage):

  • platform/audio/ReverbConvolverStage.h:
  • platform/audio/gstreamer/AudioDestinationGStreamer.cpp:

(WebCore::AudioDestination::create):

  • platform/audio/gstreamer/AudioFileReaderGStreamer.cpp:
  • platform/audio/ios/AudioDestinationIOS.cpp:

(WebCore::AudioDestination::create):

  • platform/audio/ios/AudioSessionIOS.mm:

(WebCore::AudioSession::AudioSession):

  • platform/audio/mac/AudioDestinationMac.cpp:

(WebCore::AudioDestination::create):

  • platform/audio/mac/AudioDestinationMac.h:
  • platform/audio/mac/AudioSessionMac.cpp:

(WebCore::AudioSession::AudioSession):

Location:
trunk/Source/WebCore
Files:
84 edited

Legend:

Unmodified
Added
Removed
  • trunk/Source/WebCore/ChangeLog

    r162366 r162368  
     12014-01-20  Zan Dobersek  <zdobersek@igalia.com>
     2
     3        Move WebAudio source code to std::unique_ptr
     4        https://bugs.webkit.org/show_bug.cgi?id=127274
     5
     6        Reviewed by Eric Carlson.
     7
     8        Move from using OwnPtr and PassOwnPtr to using std::unique_ptr and move semantics
     9        in the WebAudio module and the WebAudio code in the platform layer.
     10
     11        * Modules/webaudio/AsyncAudioDecoder.cpp:
     12        * Modules/webaudio/AsyncAudioDecoder.h:
     13        * Modules/webaudio/AudioBasicInspectorNode.cpp:
     14        (WebCore::AudioBasicInspectorNode::AudioBasicInspectorNode):
     15        * Modules/webaudio/AudioBasicProcessorNode.cpp:
     16        (WebCore::AudioBasicProcessorNode::AudioBasicProcessorNode):
     17        * Modules/webaudio/AudioBasicProcessorNode.h:
     18        * Modules/webaudio/AudioBufferSourceNode.cpp:
     19        (WebCore::AudioBufferSourceNode::AudioBufferSourceNode):
     20        * Modules/webaudio/AudioContext.cpp:
     21        * Modules/webaudio/AudioContext.h:
     22        * Modules/webaudio/AudioDestinationNode.cpp:
     23        (WebCore::AudioDestinationNode::AudioDestinationNode):
     24        * Modules/webaudio/AudioNode.cpp:
     25        (WebCore::AudioNode::addInput):
     26        (WebCore::AudioNode::addOutput):
     27        (WebCore::AudioNode::checkNumberOfChannelsForInput):
     28        * Modules/webaudio/AudioNode.h:
     29        * Modules/webaudio/BiquadFilterNode.cpp:
     30        (WebCore::BiquadFilterNode::BiquadFilterNode):
     31        * Modules/webaudio/BiquadProcessor.cpp:
     32        (WebCore::BiquadProcessor::createKernel):
     33        (WebCore::BiquadProcessor::getFrequencyResponse):
     34        * Modules/webaudio/BiquadProcessor.h:
     35        * Modules/webaudio/ChannelMergerNode.cpp:
     36        (WebCore::ChannelMergerNode::ChannelMergerNode):
     37        * Modules/webaudio/ChannelSplitterNode.cpp:
     38        (WebCore::ChannelSplitterNode::ChannelSplitterNode):
     39        * Modules/webaudio/ConvolverNode.cpp:
     40        (WebCore::ConvolverNode::ConvolverNode):
     41        (WebCore::ConvolverNode::uninitialize):
     42        (WebCore::ConvolverNode::setBuffer):
     43        * Modules/webaudio/ConvolverNode.h:
     44        * Modules/webaudio/DefaultAudioDestinationNode.h:
     45        * Modules/webaudio/DelayNode.cpp:
     46        (WebCore::DelayNode::DelayNode):
     47        * Modules/webaudio/DelayProcessor.cpp:
     48        (WebCore::DelayProcessor::createKernel):
     49        * Modules/webaudio/DelayProcessor.h:
     50        * Modules/webaudio/DynamicsCompressorNode.cpp:
     51        (WebCore::DynamicsCompressorNode::DynamicsCompressorNode):
     52        (WebCore::DynamicsCompressorNode::initialize):
     53        (WebCore::DynamicsCompressorNode::uninitialize):
     54        * Modules/webaudio/DynamicsCompressorNode.h:
     55        * Modules/webaudio/GainNode.cpp:
     56        (WebCore::GainNode::GainNode):
     57        * Modules/webaudio/MediaElementAudioSourceNode.cpp:
     58        (WebCore::MediaElementAudioSourceNode::MediaElementAudioSourceNode):
     59        (WebCore::MediaElementAudioSourceNode::setFormat):
     60        * Modules/webaudio/MediaElementAudioSourceNode.h:
     61        * Modules/webaudio/MediaStreamAudioDestinationNode.h:
     62        * Modules/webaudio/MediaStreamAudioSource.cpp:
     63        * Modules/webaudio/MediaStreamAudioSourceNode.cpp:
     64        (WebCore::MediaStreamAudioSourceNode::MediaStreamAudioSourceNode):
     65        * Modules/webaudio/MediaStreamAudioSourceNode.h:
     66        * Modules/webaudio/OscillatorNode.cpp:
     67        (WebCore::OscillatorNode::OscillatorNode):
     68        * Modules/webaudio/PannerNode.cpp:
     69        (WebCore::PannerNode::PannerNode):
     70        (WebCore::PannerNode::uninitialize):
     71        (WebCore::PannerNode::setPanningModel):
     72        * Modules/webaudio/PannerNode.h:
     73        * Modules/webaudio/PeriodicWave.cpp:
     74        (WebCore::PeriodicWave::createBandLimitedTables):
     75        * Modules/webaudio/PeriodicWave.h:
     76        * Modules/webaudio/RealtimeAnalyser.cpp:
     77        (WebCore::RealtimeAnalyser::RealtimeAnalyser):
     78        (WebCore::RealtimeAnalyser::setFftSize):
     79        * Modules/webaudio/RealtimeAnalyser.h:
     80        * Modules/webaudio/ScriptProcessorNode.cpp:
     81        (WebCore::ScriptProcessorNode::ScriptProcessorNode):
     82        * Modules/webaudio/WaveShaperDSPKernel.cpp:
     83        (WebCore::WaveShaperDSPKernel::lazyInitializeOversampling):
     84        * Modules/webaudio/WaveShaperDSPKernel.h:
     85        * Modules/webaudio/WaveShaperNode.cpp:
     86        (WebCore::WaveShaperNode::WaveShaperNode):
     87        * Modules/webaudio/WaveShaperProcessor.cpp:
     88        (WebCore::WaveShaperProcessor::createKernel):
     89        * Modules/webaudio/WaveShaperProcessor.h:
     90        * platform/audio/AudioBus.cpp:
     91        (WebCore::AudioBus::AudioBus):
     92        (WebCore::AudioBus::copyWithGainFrom):
     93        * platform/audio/AudioBus.h:
     94        * platform/audio/AudioChannel.cpp:
     95        * platform/audio/AudioChannel.h:
     96        (WebCore::AudioChannel::AudioChannel):
     97        (WebCore::AudioChannel::set):
     98        * platform/audio/AudioDSPKernelProcessor.h:
     99        * platform/audio/AudioDestination.h:
     100        * platform/audio/AudioResampler.cpp:
     101        (WebCore::AudioResampler::AudioResampler):
     102        (WebCore::AudioResampler::configureChannels):
     103        * platform/audio/AudioResampler.h:
     104        * platform/audio/AudioSession.h:
     105        * platform/audio/DynamicsCompressor.cpp:
     106        (WebCore::DynamicsCompressor::setNumberOfChannels):
     107        * platform/audio/DynamicsCompressor.h:
     108        * platform/audio/DynamicsCompressorKernel.cpp:
     109        (WebCore::DynamicsCompressorKernel::setNumberOfChannels):
     110        * platform/audio/DynamicsCompressorKernel.h:
     111        * platform/audio/FFTFrame.cpp:
     112        (WebCore::FFTFrame::createInterpolatedFrame):
     113        * platform/audio/FFTFrame.h:
     114        * platform/audio/HRTFDatabase.cpp:
     115        (WebCore::HRTFDatabase::HRTFDatabase):
     116        * platform/audio/HRTFDatabase.h:
     117        * platform/audio/HRTFDatabaseLoader.cpp:
     118        (WebCore::HRTFDatabaseLoader::~HRTFDatabaseLoader):
     119        (WebCore::HRTFDatabaseLoader::load):
     120        * platform/audio/HRTFDatabaseLoader.h:
     121        * platform/audio/HRTFElevation.cpp:
     122        (WebCore::HRTFElevation::createForSubject):
     123        (WebCore::HRTFElevation::createByInterpolatingSlices):
     124        * platform/audio/HRTFElevation.h:
     125        (WebCore::HRTFElevation::HRTFElevation):
     126        * platform/audio/HRTFKernel.cpp:
     127        (WebCore::HRTFKernel::HRTFKernel):
     128        (WebCore::HRTFKernel::createImpulseResponse):
     129        (WebCore::HRTFKernel::createInterpolatedKernel):
     130        * platform/audio/HRTFKernel.h:
     131        (WebCore::HRTFKernel::create):
     132        (WebCore::HRTFKernel::HRTFKernel):
     133        * platform/audio/MultiChannelResampler.cpp:
     134        (WebCore::MultiChannelResampler::MultiChannelResampler):
     135        * platform/audio/MultiChannelResampler.h:
     136        * platform/audio/Panner.cpp:
     137        (WebCore::Panner::create):
     138        * platform/audio/Panner.h:
     139        * platform/audio/Reverb.cpp:
     140        (WebCore::Reverb::initialize):
     141        * platform/audio/Reverb.h:
     142        * platform/audio/ReverbConvolver.h:
     143        * platform/audio/ReverbConvolverStage.cpp:
     144        (WebCore::ReverbConvolverStage::ReverbConvolverStage):
     145        * platform/audio/ReverbConvolverStage.h:
     146        * platform/audio/gstreamer/AudioDestinationGStreamer.cpp:
     147        (WebCore::AudioDestination::create):
     148        * platform/audio/gstreamer/AudioFileReaderGStreamer.cpp:
     149        * platform/audio/ios/AudioDestinationIOS.cpp:
     150        (WebCore::AudioDestination::create):
     151        * platform/audio/ios/AudioSessionIOS.mm:
     152        (WebCore::AudioSession::AudioSession):
     153        * platform/audio/mac/AudioDestinationMac.cpp:
     154        (WebCore::AudioDestination::create):
     155        * platform/audio/mac/AudioDestinationMac.h:
     156        * platform/audio/mac/AudioSessionMac.cpp:
     157        (WebCore::AudioSession::AudioSession):
     158
    11592014-01-20  Morten Stenshorne  <mstensho@opera.com>
    2160
  • trunk/Source/WebCore/Modules/webaudio/AsyncAudioDecoder.cpp

    r156972 r162368  
    3333#include <runtime/ArrayBuffer.h>
    3434#include <wtf/MainThread.h>
    35 #include <wtf/OwnPtr.h>
    36 #include <wtf/PassOwnPtr.h>
    3735
    3836namespace WebCore {
  • trunk/Source/WebCore/Modules/webaudio/AsyncAudioDecoder.h

    r156972 r162368  
    2626#define AsyncAudioDecoder_h
    2727
     28#include <memory>
    2829#include <wtf/Forward.h>
    2930#include <wtf/MessageQueue.h>
    30 #include <wtf/PassOwnPtr.h>
    3131#include <wtf/PassRefPtr.h>
    3232#include <wtf/RefPtr.h>
  • trunk/Source/WebCore/Modules/webaudio/AudioBasicInspectorNode.cpp

    r161644 r162368  
    3939    , m_needAutomaticPull(false)
    4040{
    41     addInput(adoptPtr(new AudioNodeInput(this)));
    42     addOutput(adoptPtr(new AudioNodeOutput(this, outputChannelCount)));
     41    addInput(std::make_unique<AudioNodeInput>(this));
     42    addOutput(std::make_unique<AudioNodeOutput>(this, outputChannelCount));
    4343}
    4444
  • trunk/Source/WebCore/Modules/webaudio/AudioBasicProcessorNode.cpp

    r146102 r162368  
    4040    : AudioNode(context, sampleRate)
    4141{
    42     addInput(adoptPtr(new AudioNodeInput(this)));
    43     addOutput(adoptPtr(new AudioNodeOutput(this, 1)));
     42    addInput(std::make_unique<AudioNodeInput>(this));
     43    addOutput(std::make_unique<AudioNodeOutput>(this, 1));
    4444
    4545    // The subclass must create m_processor.
  • trunk/Source/WebCore/Modules/webaudio/AudioBasicProcessorNode.h

    r162139 r162368  
    2727
    2828#include "AudioNode.h"
     29#include <memory>
    2930#include <wtf/PassRefPtr.h>
    3031#include <wtf/RefCounted.h>
     
    6061
    6162    AudioProcessor* processor() { return m_processor.get(); }
    62     OwnPtr<AudioProcessor> m_processor;
     63    std::unique_ptr<AudioProcessor> m_processor;
    6364};
    6465
  • trunk/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.cpp

    r162296 r162368  
    7474
    7575    // Default to mono.  A call to setBuffer() will set the number of output channels to that of the buffer.
    76     addOutput(adoptPtr(new AudioNodeOutput(this, 1)));
     76    addOutput(std::make_unique<AudioNodeOutput>(this, 1));
    7777
    7878    initialize();
  • trunk/Source/WebCore/Modules/webaudio/AudioContext.cpp

    r161644 r162368  
    8989#include <wtf/Atomics.h>
    9090#include <wtf/MainThread.h>
    91 #include <wtf/OwnPtr.h>
    92 #include <wtf/PassOwnPtr.h>
    9391#include <wtf/Ref.h>
    9492#include <wtf/RefCounted.h>
  • trunk/Source/WebCore/Modules/webaudio/AudioContext.h

    r162158 r162368  
    3535#include <wtf/HashSet.h>
    3636#include <wtf/MainThread.h>
    37 #include <wtf/OwnPtr.h>
    3837#include <wtf/PassRefPtr.h>
    3938#include <wtf/RefCounted.h>
  • trunk/Source/WebCore/Modules/webaudio/AudioDestinationNode.cpp

    r151302 r162368  
    4141    , m_currentSampleFrame(0)
    4242{
    43     addInput(adoptPtr(new AudioNodeInput(this)));
     43    addInput(std::make_unique<AudioNodeInput>(this));
    4444   
    4545    setNodeType(NodeTypeDestination);
  • trunk/Source/WebCore/Modules/webaudio/AudioNode.cpp

    r161644 r162368  
    9999}
    100100
    101 void AudioNode::addInput(PassOwnPtr<AudioNodeInput> input)
    102 {
    103     m_inputs.append(input);
    104 }
    105 
    106 void AudioNode::addOutput(PassOwnPtr<AudioNodeOutput> output)
    107 {
    108     m_outputs.append(output);
     101void AudioNode::addInput(std::unique_ptr<AudioNodeInput> input)
     102{
     103    m_inputs.append(std::move(input));
     104}
     105
     106void AudioNode::addOutput(std::unique_ptr<AudioNodeOutput> output)
     107{
     108    m_outputs.append(std::move(output));
    109109}
    110110
     
    325325    ASSERT(context()->isAudioThread() && context()->isGraphOwner());
    326326
    327     ASSERT(m_inputs.contains(input));
    328     if (!m_inputs.contains(input))
    329         return;
    330 
    331     input->updateInternalBus();
     327    for (const std::unique_ptr<AudioNodeInput>& savedInput : m_inputs) {
     328        if (input == savedInput.get()) {
     329            input->updateInternalBus();
     330            return;
     331        }
     332    }
     333
     334    ASSERT_NOT_REACHED();
    332335}
    333336
  • trunk/Source/WebCore/Modules/webaudio/AudioNode.h

    r162158 r162368  
    2828#include "AudioBus.h"
    2929#include "EventTarget.h"
     30#include <memory>
    3031#include <wtf/Forward.h>
    31 #include <wtf/OwnPtr.h>
    32 #include <wtf/PassOwnPtr.h>
    3332#include <wtf/RefPtr.h>
    3433#include <wtf/Vector.h>
     
    186185protected:
    187186    // Inputs and outputs must be created before the AudioNode is initialized.
    188     void addInput(PassOwnPtr<AudioNodeInput>);
    189     void addOutput(PassOwnPtr<AudioNodeOutput>);
     187    void addInput(std::unique_ptr<AudioNodeInput>);
     188    void addOutput(std::unique_ptr<AudioNodeOutput>);
    190189   
    191190    // Called by processIfNecessary() to cause all parts of the rendering graph connected to us to process.
     
    202201    RefPtr<AudioContext> m_context;
    203202    float m_sampleRate;
    204     Vector<OwnPtr<AudioNodeInput>> m_inputs;
    205     Vector<OwnPtr<AudioNodeOutput>> m_outputs;
     203    Vector<std::unique_ptr<AudioNodeInput>> m_inputs;
     204    Vector<std::unique_ptr<AudioNodeOutput>> m_outputs;
    206205
    207206    double m_lastProcessingTime;
  • trunk/Source/WebCore/Modules/webaudio/BiquadFilterNode.cpp

    r138849 r162368  
    3737{
    3838    // Initially setup as lowpass filter.
    39     m_processor = adoptPtr(new BiquadProcessor(context, sampleRate, 1, false));
     39    m_processor = std::make_unique<BiquadProcessor>(context, sampleRate, 1, false);
    4040    setNodeType(NodeTypeBiquadFilter);
    4141}
  • trunk/Source/WebCore/Modules/webaudio/BiquadProcessor.cpp

    r135218 r162368  
    6161}
    6262
    63 PassOwnPtr<AudioDSPKernel> BiquadProcessor::createKernel()
     63std::unique_ptr<AudioDSPKernel> BiquadProcessor::createKernel()
    6464{
    65     return adoptPtr(new BiquadDSPKernel(this));
     65    return std::make_unique<BiquadDSPKernel>(this);
    6666}
    6767
     
    129129    // thread on the main kernels.
    130130   
    131     OwnPtr<BiquadDSPKernel> responseKernel = adoptPtr(new BiquadDSPKernel(this));
     131    auto responseKernel = std::make_unique<BiquadDSPKernel>(this);
    132132
    133133    responseKernel->getFrequencyResponse(nFrequencies, frequencyHz, magResponse, phaseResponse);
  • trunk/Source/WebCore/Modules/webaudio/BiquadProcessor.h

    r162139 r162368  
    3131#include "AudioParam.h"
    3232#include "Biquad.h"
     33#include <memory>
    3334#include <wtf/RefPtr.h>
    3435
     
    5455    virtual ~BiquadProcessor();
    5556   
    56     virtual PassOwnPtr<AudioDSPKernel> createKernel() override;
     57    virtual std::unique_ptr<AudioDSPKernel> createKernel() override;
    5758       
    5859    virtual void process(const AudioBus* source, AudioBus* destination, size_t framesToProcess) override;
  • trunk/Source/WebCore/Modules/webaudio/ChannelMergerNode.cpp

    r159442 r162368  
    5555    // Create the requested number of inputs.
    5656    for (unsigned i = 0; i < numberOfInputs; ++i)
    57         addInput(adoptPtr(new AudioNodeInput(this)));
     57        addInput(std::make_unique<AudioNodeInput>(this));
    5858
    59     addOutput(adoptPtr(new AudioNodeOutput(this, 1)));
     59    addOutput(std::make_unique<AudioNodeOutput>(this, 1));
    6060   
    6161    setNodeType(NodeTypeChannelMerger);
  • trunk/Source/WebCore/Modules/webaudio/ChannelSplitterNode.cpp

    r159442 r162368  
    4646    : AudioNode(context, sampleRate)
    4747{
    48     addInput(adoptPtr(new AudioNodeInput(this)));
     48    addInput(std::make_unique<AudioNodeInput>(this));
    4949
    5050    // Create a fixed number of outputs (able to handle the maximum number of channels fed to an input).
    5151    for (unsigned i = 0; i < numberOfOutputs; ++i)
    52         addOutput(adoptPtr(new AudioNodeOutput(this, 1)));
     52        addOutput(std::make_unique<AudioNodeOutput>(this, 1));
    5353   
    5454    setNodeType(NodeTypeChannelSplitter);
  • trunk/Source/WebCore/Modules/webaudio/ConvolverNode.cpp

    r162296 r162368  
    5050    , m_normalize(true)
    5151{
    52     addInput(adoptPtr(new AudioNodeInput(this)));
    53     addOutput(adoptPtr(new AudioNodeOutput(this, 2)));
     52    addInput(std::make_unique<AudioNodeInput>(this));
     53    addOutput(std::make_unique<AudioNodeOutput>(this, 2));
    5454
    5555    // Node-specific default mixing rules.
     
    112112        return;
    113113
    114     m_reverb.clear();
     114    m_reverb.reset();
    115115    AudioNode::uninitialize();
    116116}
     
    142142    // Create the reverb with the given impulse response.
    143143    bool useBackgroundThreads = !context()->isOfflineContext();
    144     OwnPtr<Reverb> reverb = adoptPtr(new Reverb(bufferBus.get(), AudioNode::ProcessingSizeInFrames, MaxFFTSize, 2, useBackgroundThreads, m_normalize));
     144    auto reverb = std::make_unique<Reverb>(bufferBus.get(), AudioNode::ProcessingSizeInFrames, MaxFFTSize, 2, useBackgroundThreads, m_normalize);
    145145
    146146    {
    147147        // Synchronize with process().
    148148        std::lock_guard<std::mutex> lock(m_processMutex);
    149         m_reverb = reverb.release();
     149        m_reverb = std::move(reverb);
    150150        m_buffer = buffer;
    151151    }
  • trunk/Source/WebCore/Modules/webaudio/ConvolverNode.h

    r162296 r162368  
    2727
    2828#include "AudioNode.h"
     29#include <memory>
    2930#include <mutex>
    30 #include <wtf/OwnPtr.h>
    3131#include <wtf/RefPtr.h>
    3232
     
    6464    virtual double latencyTime() const override;
    6565
    66     OwnPtr<Reverb> m_reverb;
     66    std::unique_ptr<Reverb> m_reverb;
    6767    RefPtr<AudioBuffer> m_buffer;
    6868
  • trunk/Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.h

    r162139 r162368  
    2828#include "AudioDestination.h"
    2929#include "AudioDestinationNode.h"
    30 #include <wtf/OwnPtr.h>
     30#include <memory>
    3131
    3232namespace WebCore {
     
    5757    void createDestination();
    5858
    59     OwnPtr<AudioDestination> m_destination;
     59    std::unique_ptr<AudioDestination> m_destination;
    6060    String m_inputDeviceId;
    6161    unsigned m_numberOfInputChannels;
  • trunk/Source/WebCore/Modules/webaudio/DelayNode.cpp

    r134644 r162368  
    4040        return;
    4141    }
    42     m_processor = adoptPtr(new DelayProcessor(context, sampleRate, 1, maxDelayTime));
     42    m_processor = std::make_unique<DelayProcessor>(context, sampleRate, 1, maxDelayTime);
    4343    setNodeType(NodeTypeDelay);
    4444}
  • trunk/Source/WebCore/Modules/webaudio/DelayProcessor.cpp

    r116485 r162368  
    4646}
    4747
    48 PassOwnPtr<AudioDSPKernel> DelayProcessor::createKernel()
     48std::unique_ptr<AudioDSPKernel> DelayProcessor::createKernel()
    4949{
    50     return adoptPtr(new DelayDSPKernel(this));
     50    return std::make_unique<DelayDSPKernel>(this);
    5151}
    5252
  • trunk/Source/WebCore/Modules/webaudio/DelayProcessor.h

    r162139 r162368  
    2828#include "AudioDSPKernelProcessor.h"
    2929#include "AudioParam.h"
    30 
    31 #include <wtf/PassOwnPtr.h>
     30#include <memory>
    3231#include <wtf/RefPtr.h>
    3332
     
    4140    virtual ~DelayProcessor();
    4241   
    43     virtual PassOwnPtr<AudioDSPKernel> createKernel() override;
     42    virtual std::unique_ptr<AudioDSPKernel> createKernel() override;
    4443       
    4544    AudioParam* delayTime() const { return m_delayTime.get(); }
  • trunk/Source/WebCore/Modules/webaudio/DynamicsCompressorNode.cpp

    r116485 r162368  
    4242    : AudioNode(context, sampleRate)
    4343{
    44     addInput(adoptPtr(new AudioNodeInput(this)));
    45     addOutput(adoptPtr(new AudioNodeOutput(this, defaultNumberOfOutputChannels)));
     44    addInput(std::make_unique<AudioNodeInput>(this));
     45    addOutput(std::make_unique<AudioNodeOutput>(this, defaultNumberOfOutputChannels));
    4646
    4747    setNodeType(NodeTypeDynamicsCompressor);
     
    9696
    9797    AudioNode::initialize();   
    98     m_dynamicsCompressor = adoptPtr(new DynamicsCompressor(sampleRate(), defaultNumberOfOutputChannels));
     98    m_dynamicsCompressor = std::make_unique<DynamicsCompressor>(sampleRate(), defaultNumberOfOutputChannels);
    9999}
    100100
     
    104104        return;
    105105
    106     m_dynamicsCompressor.clear();
     106    m_dynamicsCompressor.reset();
    107107    AudioNode::uninitialize();
    108108}
  • trunk/Source/WebCore/Modules/webaudio/DynamicsCompressorNode.h

    r162139 r162368  
    2828#include "AudioNode.h"
    2929#include "AudioParam.h"
    30 #include <wtf/OwnPtr.h>
     30#include <memory>
    3131
    3232namespace WebCore {
     
    6565    DynamicsCompressorNode(AudioContext*, float sampleRate);
    6666
    67     OwnPtr<DynamicsCompressor> m_dynamicsCompressor;
     67    std::unique_ptr<DynamicsCompressor> m_dynamicsCompressor;
    6868    RefPtr<AudioParam> m_threshold;
    6969    RefPtr<AudioParam> m_knee;
  • trunk/Source/WebCore/Modules/webaudio/GainNode.cpp

    r146486 r162368  
    4242    m_gain = AudioParam::create(context, "gain", 1.0, 0.0, 1.0);
    4343
    44     addInput(adoptPtr(new AudioNodeInput(this)));
    45     addOutput(adoptPtr(new AudioNodeOutput(this, 1)));
     44    addInput(std::make_unique<AudioNodeInput>(this));
     45    addOutput(std::make_unique<AudioNodeOutput>(this, 1));
    4646
    4747    setNodeType(NodeTypeGain);
  • trunk/Source/WebCore/Modules/webaudio/MediaElementAudioSourceNode.cpp

    r162296 r162368  
    5353{
    5454    // Default to stereo. This could change depending on what the media element .src is set to.
    55     addOutput(adoptPtr(new AudioNodeOutput(this, 2)));
     55    addOutput(std::make_unique<AudioNodeOutput>(this, 2));
    5656
    5757    setNodeType(NodeTypeMediaElementAudioSource);
     
    8585        if (sourceSampleRate != sampleRate()) {
    8686            double scaleFactor = sourceSampleRate / sampleRate();
    87             m_multiChannelResampler = adoptPtr(new MultiChannelResampler(scaleFactor, numberOfChannels));
     87            m_multiChannelResampler = std::make_unique<MultiChannelResampler>(scaleFactor, numberOfChannels);
    8888        } else {
    8989            // Bypass resampling.
    90             m_multiChannelResampler.clear();
     90            m_multiChannelResampler.reset();
    9191        }
    9292
  • trunk/Source/WebCore/Modules/webaudio/MediaElementAudioSourceNode.h

    r162296 r162368  
    3232#include "HTMLMediaElement.h"
    3333#include "MultiChannelResampler.h"
     34#include <memory>
    3435#include <mutex>
    35 #include <wtf/OwnPtr.h>
    3636#include <wtf/PassRefPtr.h>
    3737
     
    7373    double m_sourceSampleRate;
    7474
    75     OwnPtr<MultiChannelResampler> m_multiChannelResampler;
     75    std::unique_ptr<MultiChannelResampler> m_multiChannelResampler;
    7676};
    7777
  • trunk/Source/WebCore/Modules/webaudio/MediaStreamAudioDestinationNode.h

    r162139 r162368  
    3131#include "AudioBus.h"
    3232#include "MediaStream.h"
    33 #include <wtf/OwnPtr.h>
    3433#include <wtf/PassRefPtr.h>
    3534
  • trunk/Source/WebCore/Modules/webaudio/MediaStreamAudioSource.cpp

    r159442 r162368  
    3232#include "NotImplemented.h"
    3333#include "UUID.h"
    34 #include <wtf/PassOwnPtr.h>
    3534
    3635namespace WebCore {
  • trunk/Source/WebCore/Modules/webaudio/MediaStreamAudioSourceNode.cpp

    r162296 r162368  
    4949{
    5050    // Default to stereo. This could change depending on the format of the MediaStream's audio track.
    51     addOutput(adoptPtr(new AudioNodeOutput(this, 2)));
     51    addOutput(std::make_unique<AudioNodeOutput>(this, 2));
    5252
    5353    setNodeType(NodeTypeMediaStreamAudioSource);
  • trunk/Source/WebCore/Modules/webaudio/MediaStreamAudioSourceNode.h

    r162296 r162368  
    3333#include "MediaStream.h"
    3434#include <mutex>
    35 #include <wtf/OwnPtr.h>
    3635#include <wtf/PassRefPtr.h>
    3736
  • trunk/Source/WebCore/Modules/webaudio/OscillatorNode.cpp

    r162296 r162368  
    7171
    7272    // An oscillator is always mono.
    73     addOutput(adoptPtr(new AudioNodeOutput(this, 1)));
     73    addOutput(std::make_unique<AudioNodeOutput>(this, 1));
    7474
    7575    initialize();
  • trunk/Source/WebCore/Modules/webaudio/PannerNode.cpp

    r162296 r162368  
    5353    , m_connectionCount(0)
    5454{
    55     addInput(adoptPtr(new AudioNodeInput(this)));
    56     addOutput(adoptPtr(new AudioNodeOutput(this, 2)));
     55    addInput(std::make_unique<AudioNodeInput>(this));
     56    addOutput(std::make_unique<AudioNodeOutput>(this, 2));
    5757
    5858    // Node-specific default mixing rules.
     
    155155        return;
    156156       
    157     m_panner.clear();
     157    m_panner.reset();
    158158    AudioNode::uninitialize();
    159159}
     
    200200            std::lock_guard<std::mutex> lock(m_pannerMutex);
    201201
    202             OwnPtr<Panner> newPanner = Panner::create(model, sampleRate(), context()->hrtfDatabaseLoader());
    203             m_panner = newPanner.release();
     202            m_panner = Panner::create(model, sampleRate(), context()->hrtfDatabaseLoader());
    204203            m_panningModel = model;
    205204        }
  • trunk/Source/WebCore/Modules/webaudio/PannerNode.h

    r162296 r162368  
    3434#include "FloatPoint3D.h"
    3535#include "Panner.h"
     36#include <memory>
    3637#include <mutex>
    37 #include <wtf/OwnPtr.h>
    3838
    3939namespace WebCore {
     
    141141    void notifyAudioSourcesConnectedToNode(AudioNode*);
    142142
    143     OwnPtr<Panner> m_panner;
     143    std::unique_ptr<Panner> m_panner;
    144144    unsigned m_panningModel;
    145145
  • trunk/Source/WebCore/Modules/webaudio/PeriodicWave.cpp

    r159442 r162368  
    3737#include "VectorMath.h"
    3838#include <algorithm>
    39 #include <wtf/OwnPtr.h>
    4039
    4140const unsigned PeriodicWaveSize = 4096; // This must be a power of two.
     
    199198
    200199        // Create the band-limited table.
    201         OwnPtr<AudioFloatArray> table = adoptPtr(new AudioFloatArray(m_periodicWaveSize));
    202         m_bandLimitedTables.append(table.release());
     200        m_bandLimitedTables.append(std::make_unique<AudioFloatArray>(m_periodicWaveSize));
    203201
    204202        // Apply an inverse FFT to generate the time-domain table data.
  • trunk/Source/WebCore/Modules/webaudio/PeriodicWave.h

    r157653 r162368  
    3131
    3232#include "AudioArray.h"
     33#include <memory>
    3334#include <runtime/Float32Array.h>
    34 #include <wtf/OwnPtr.h>
    3535#include <wtf/PassRefPtr.h>
    3636#include <wtf/RefCounted.h>
     
    9090    // Creates tables based on numberOfComponents Fourier coefficients.
    9191    void createBandLimitedTables(const float* real, const float* imag, unsigned numberOfComponents);
    92     Vector<OwnPtr<AudioFloatArray>> m_bandLimitedTables;
     92    Vector<std::unique_ptr<AudioFloatArray>> m_bandLimitedTables;
    9393};
    9494
  • trunk/Source/WebCore/Modules/webaudio/RealtimeAnalyser.cpp

    r153728 r162368  
    6161    , m_maxDecibels(DefaultMaxDecibels)
    6262{
    63     m_analysisFrame = adoptPtr(new FFTFrame(DefaultFFTSize));
     63    m_analysisFrame = std::make_unique<FFTFrame>(DefaultFFTSize);
    6464}
    6565
     
    8787
    8888    if (m_fftSize != size) {
    89         m_analysisFrame = adoptPtr(new FFTFrame(size));
     89        m_analysisFrame = std::make_unique<FFTFrame>(size);
    9090        // m_magnitudeBuffer has size = fftSize / 2 because it contains floats reduced from complex values in m_analysisFrame.
    9191        m_magnitudeBuffer.allocate(size / 2);
  • trunk/Source/WebCore/Modules/webaudio/RealtimeAnalyser.h

    r155112 r162368  
    2727
    2828#include "AudioArray.h"
     29#include <memory>
    2930#include <runtime/Float32Array.h>
    3031#include <runtime/Uint8Array.h>
    3132#include <wtf/Forward.h>
    3233#include <wtf/Noncopyable.h>
    33 #include <wtf/OwnPtr.h>
    3434
    3535namespace WebCore {
     
    8282   
    8383    size_t m_fftSize;
    84     OwnPtr<FFTFrame> m_analysisFrame;
     84    std::unique_ptr<FFTFrame> m_analysisFrame;
    8585    void doFFTAnalysis();
    8686   
  • trunk/Source/WebCore/Modules/webaudio/ScriptProcessorNode.cpp

    r159442 r162368  
    8787    ASSERT(numberOfInputChannels <= AudioContext::maxNumberOfChannels());
    8888
    89     addInput(adoptPtr(new AudioNodeInput(this)));
    90     addOutput(adoptPtr(new AudioNodeOutput(this, numberOfOutputChannels)));
     89    addInput(std::make_unique<AudioNodeInput>(this));
     90    addOutput(std::make_unique<AudioNodeOutput>(this, numberOfOutputChannels));
    9191
    9292    setNodeType(NodeTypeJavaScript);
  • trunk/Source/WebCore/Modules/webaudio/WaveShaperDSPKernel.cpp

    r157971 r162368  
    5050
    5151    if (!m_tempBuffer) {
    52         m_tempBuffer = adoptPtr(new AudioFloatArray(RenderingQuantum * 2));
    53         m_tempBuffer2 = adoptPtr(new AudioFloatArray(RenderingQuantum * 4));
    54         m_upSampler = adoptPtr(new UpSampler(RenderingQuantum));
    55         m_downSampler = adoptPtr(new DownSampler(RenderingQuantum * 2));
    56         m_upSampler2 = adoptPtr(new UpSampler(RenderingQuantum * 2));
    57         m_downSampler2 = adoptPtr(new DownSampler(RenderingQuantum * 4));
     52        m_tempBuffer = std::make_unique<AudioFloatArray>(RenderingQuantum * 2);
     53        m_tempBuffer2 = std::make_unique<AudioFloatArray>(RenderingQuantum * 4);
     54        m_upSampler = std::make_unique<UpSampler>(RenderingQuantum);
     55        m_downSampler = std::make_unique<DownSampler>(RenderingQuantum * 2);
     56        m_upSampler2 = std::make_unique<UpSampler>(RenderingQuantum * 2);
     57        m_downSampler2 = std::make_unique<DownSampler>(RenderingQuantum * 4);
    5858    }
    5959}
  • trunk/Source/WebCore/Modules/webaudio/WaveShaperDSPKernel.h

    r162139 r162368  
    3131#include "UpSampler.h"
    3232#include "WaveShaperProcessor.h"
    33 #include <wtf/OwnPtr.h>
     33#include <memory>
    3434
    3535namespace WebCore {
     
    6363
    6464    // Oversampling.
    65     OwnPtr<AudioFloatArray> m_tempBuffer;
    66     OwnPtr<AudioFloatArray> m_tempBuffer2;
    67     OwnPtr<UpSampler> m_upSampler;
    68     OwnPtr<DownSampler> m_downSampler;
    69     OwnPtr<UpSampler> m_upSampler2;
    70     OwnPtr<DownSampler> m_downSampler2;
     65    std::unique_ptr<AudioFloatArray> m_tempBuffer;
     66    std::unique_ptr<AudioFloatArray> m_tempBuffer2;
     67    std::unique_ptr<UpSampler> m_upSampler;
     68    std::unique_ptr<DownSampler> m_downSampler;
     69    std::unique_ptr<UpSampler> m_upSampler2;
     70    std::unique_ptr<DownSampler> m_downSampler2;
    7171};
    7272
  • trunk/Source/WebCore/Modules/webaudio/WaveShaperNode.cpp

    r161644 r162368  
    3737    : AudioBasicProcessorNode(context, context->sampleRate())
    3838{
    39     m_processor = adoptPtr(new WaveShaperProcessor(context->sampleRate(), 1));
     39    m_processor = std::make_unique<WaveShaperProcessor>(context->sampleRate(), 1);
    4040    setNodeType(NodeTypeWaveShaper);
    4141
  • trunk/Source/WebCore/Modules/webaudio/WaveShaperProcessor.cpp

    r162296 r162368  
    4545}
    4646
    47 PassOwnPtr<AudioDSPKernel> WaveShaperProcessor::createKernel()
     47std::unique_ptr<AudioDSPKernel> WaveShaperProcessor::createKernel()
    4848{
    49     return adoptPtr(new WaveShaperDSPKernel(this));
     49    return std::make_unique<WaveShaperDSPKernel>(this);
    5050}
    5151
  • trunk/Source/WebCore/Modules/webaudio/WaveShaperProcessor.h

    r162296 r162368  
    2929#include "AudioDSPKernelProcessor.h"
    3030#include "AudioNode.h"
     31#include <memory>
    3132#include <mutex>
    3233#include <runtime/Float32Array.h>
     
    4950    virtual ~WaveShaperProcessor();
    5051
    51     virtual PassOwnPtr<AudioDSPKernel> createKernel() override;
     52    virtual std::unique_ptr<AudioDSPKernel> createKernel() override;
    5253
    5354    virtual void process(const AudioBus* source, AudioBus* destination, size_t framesToProcess) override;
  • trunk/Source/WebCore/platform/audio/AudioBus.cpp

    r149817 r162368  
    4040#include <assert.h>
    4141#include <math.h>
    42 #include <wtf/OwnPtr.h>
    43 #include <wtf/PassOwnPtr.h>
    4442
    4543namespace WebCore {
     
    6765
    6866    for (unsigned i = 0; i < numberOfChannels; ++i) {
    69         PassOwnPtr<AudioChannel> channel = allocate ? adoptPtr(new AudioChannel(length)) : adoptPtr(new AudioChannel(0, length));
    70         m_channels.append(channel);
     67        auto channel = allocate ? std::make_unique<AudioChannel>(length) : std::make_unique<AudioChannel>(nullptr, length);
     68        m_channels.append(std::move(channel));
    7169    }
    7270
     
    465463    if (framesToDezipper) {
    466464        if (!m_dezipperGainValues.get() || m_dezipperGainValues->size() < framesToDezipper)
    467             m_dezipperGainValues = adoptPtr(new AudioFloatArray(framesToDezipper));
     465            m_dezipperGainValues = std::make_unique<AudioFloatArray>(framesToDezipper);
    468466
    469467        float* gainValues = m_dezipperGainValues->data();
  • trunk/Source/WebCore/platform/audio/AudioBus.h

    r157653 r162368  
    3131
    3232#include "AudioChannel.h"
     33#include <memory>
    3334#include <wtf/Noncopyable.h>
    34 #include <wtf/PassOwnPtr.h>
    3535#include <wtf/ThreadSafeRefCounted.h>
    3636#include <wtf/Vector.h>
     
    159159
    160160    size_t m_length;
    161     Vector<OwnPtr<AudioChannel>> m_channels;
     161    Vector<std::unique_ptr<AudioChannel>> m_channels;
    162162    int m_layout;
    163163    float m_busGain;
    164     OwnPtr<AudioFloatArray> m_dezipperGainValues;
     164    std::unique_ptr<AudioFloatArray> m_dezipperGainValues;
    165165    bool m_isFirstTime;
    166166    float m_sampleRate; // 0.0 if unknown or N/A
  • trunk/Source/WebCore/platform/audio/AudioChannel.cpp

    r131262 r162368  
    3636#include <algorithm>
    3737#include <math.h>
    38 #include <wtf/OwnPtr.h>
    3938
    4039namespace WebCore {
  • trunk/Source/WebCore/platform/audio/AudioChannel.h

    r131262 r162368  
    3131
    3232#include "AudioArray.h"
    33 #include <wtf/PassOwnPtr.h>
     33#include <memory>
    3434
    3535namespace WebCore {
     
    4343
    4444    // Reference an external buffer.
    45     AudioChannel(float* storage, size_t length)
     45    explicit AudioChannel(float* storage, size_t length)
    4646        : m_length(length)
    4747        , m_rawPointer(storage)
     
    5656        , m_silent(true)
    5757    {
    58         m_memBuffer = adoptPtr(new AudioFloatArray(length));
     58        m_memBuffer = std::make_unique<AudioFloatArray>(length);
    5959    }
    6060
     
    7171    void set(float* storage, size_t length)
    7272    {
    73         m_memBuffer.clear(); // cleanup managed storage
     73        m_memBuffer.reset(); // cleanup managed storage
    7474        m_rawPointer = storage;
    7575        m_length = length;
     
    131131
    132132    float* m_rawPointer;
    133     OwnPtr<AudioFloatArray> m_memBuffer;
     133    std::unique_ptr<AudioFloatArray> m_memBuffer;
    134134    bool m_silent;
    135135};
  • trunk/Source/WebCore/platform/audio/AudioDSPKernelProcessor.h

    r162139 r162368  
    3434#include "AudioBus.h"
    3535#include "AudioProcessor.h"
    36 #include <wtf/OwnPtr.h>
    37 #include <wtf/PassOwnPtr.h>
    3836#include <wtf/Vector.h>
    3937
     
    5553    // Subclasses create the appropriate type of processing kernel here.
    5654    // We'll call this to create a kernel for each channel.
    57     virtual PassOwnPtr<AudioDSPKernel> createKernel() = 0;
     55    virtual std::unique_ptr<AudioDSPKernel> createKernel() = 0;
    5856
    5957    // AudioProcessor methods
     
    6967
    7068protected:
    71     Vector<OwnPtr<AudioDSPKernel>> m_kernels;
     69    Vector<std::unique_ptr<AudioDSPKernel>> m_kernels;
    7270    bool m_hasJustReset;
    7371};
  • trunk/Source/WebCore/platform/audio/AudioDestination.h

    r144720 r162368  
    3030#define AudioDestination_h
    3131
    32 #include <wtf/OwnPtr.h>
    33 #include <wtf/PassOwnPtr.h>
     32#include <memory>
    3433#include <wtf/text/WTFString.h>
    3534
     
    4645    // Pass in (numberOfInputChannels > 0) if live/local audio input is desired.
    4746    // Port-specific device identification information for live/local input streams can be passed in the inputDeviceId.
    48     static PassOwnPtr<AudioDestination> create(AudioIOCallback&, const String& inputDeviceId, unsigned numberOfInputChannels, unsigned numberOfOutputChannels, float sampleRate);
     47    static std::unique_ptr<AudioDestination> create(AudioIOCallback&, const String& inputDeviceId, unsigned numberOfInputChannels, unsigned numberOfOutputChannels, float sampleRate);
    4948
    5049    virtual ~AudioDestination() { }
  • trunk/Source/WebCore/platform/audio/AudioResampler.cpp

    r159027 r162368  
    4040    : m_rate(1.0)
    4141{
    42     m_kernels.append(adoptPtr(new AudioResamplerKernel(this)));
     42    m_kernels.append(std::make_unique<AudioResamplerKernel>(this));
    4343    m_sourceBus = AudioBus::create(1, 0, false);
    4444}
     
    4848{
    4949    for (unsigned i = 0; i < numberOfChannels; ++i)
    50         m_kernels.append(adoptPtr(new AudioResamplerKernel(this)));
     50        m_kernels.append(std::make_unique<AudioResamplerKernel>(this));
    5151
    5252    m_sourceBus = AudioBus::create(numberOfChannels, 0, false);
     
    6262    if (numberOfChannels > currentSize) {
    6363        for (unsigned i = currentSize; i < numberOfChannels; ++i)
    64             m_kernels.append(adoptPtr(new AudioResamplerKernel(this)));
     64            m_kernels.append(std::make_unique<AudioResamplerKernel>(this));
    6565    } else
    6666        m_kernels.resize(numberOfChannels);
  • trunk/Source/WebCore/platform/audio/AudioResampler.h

    r157653 r162368  
    2929#include "AudioResamplerKernel.h"
    3030#include "AudioSourceProvider.h"
    31 #include <wtf/OwnPtr.h>
     31#include <memory>
    3232#include <wtf/Vector.h>
    3333
     
    6060private:
    6161    double m_rate;
    62     Vector<OwnPtr<AudioResamplerKernel>> m_kernels;
     62    Vector<std::unique_ptr<AudioResamplerKernel>> m_kernels;
    6363    RefPtr<AudioBus> m_sourceBus;
    6464};
  • trunk/Source/WebCore/platform/audio/AudioSession.h

    r150651 r162368  
    2929#if USE(AUDIO_SESSION)
    3030
     31#include <memory>
    3132#include <wtf/HashSet.h>
    32 #include <wtf/OwnPtr.h>
    3333
    3434namespace WebCore {
     
    7575    ~AudioSession();
    7676
    77     OwnPtr<AudioSessionPrivate> m_private;
     77    std::unique_ptr<AudioSessionPrivate> m_private;
    7878    HashSet<AudioSessionListener*> m_listeners;
    7979};
  • trunk/Source/WebCore/platform/audio/DynamicsCompressor.cpp

    r156015 r162368  
    272272    m_postFilterPacks.clear();
    273273    for (unsigned i = 0; i < numberOfChannels; ++i) {
    274         m_preFilterPacks.append(adoptPtr(new ZeroPoleFilterPack4()));
    275         m_postFilterPacks.append(adoptPtr(new ZeroPoleFilterPack4()));
     274        m_preFilterPacks.append(std::make_unique<ZeroPoleFilterPack4>());
     275        m_postFilterPacks.append(std::make_unique<ZeroPoleFilterPack4>());
    276276    }
    277277
  • trunk/Source/WebCore/platform/audio/DynamicsCompressor.h

    r157653 r162368  
    100100
    101101    // Per-channel emphasis filters.
    102     Vector<OwnPtr<ZeroPoleFilterPack4>> m_preFilterPacks;
    103     Vector<OwnPtr<ZeroPoleFilterPack4>> m_postFilterPacks;
     102    Vector<std::unique_ptr<ZeroPoleFilterPack4>> m_preFilterPacks;
     103    Vector<std::unique_ptr<ZeroPoleFilterPack4>> m_postFilterPacks;
    104104
    105105    std::unique_ptr<const float*[]> m_sourceChannels;
  • trunk/Source/WebCore/platform/audio/DynamicsCompressorKernel.cpp

    r159027 r162368  
    7777    m_preDelayBuffers.clear();
    7878    for (unsigned i = 0; i < numberOfChannels; ++i)
    79         m_preDelayBuffers.append(adoptPtr(new AudioFloatArray(MaxPreDelayFrames)));
     79        m_preDelayBuffers.append(std::make_unique<AudioFloatArray>(MaxPreDelayFrames));
    8080}
    8181
  • trunk/Source/WebCore/platform/audio/DynamicsCompressorKernel.h

    r157653 r162368  
    3131
    3232#include "AudioArray.h"
    33 
    34 #include <wtf/OwnPtr.h>
    35 #include <wtf/PassOwnPtr.h>
     33#include <memory>
    3634
    3735namespace WebCore {
     
    8987    void setPreDelayTime(float);
    9088
    91     Vector<OwnPtr<AudioFloatArray>> m_preDelayBuffers;
     89    Vector<std::unique_ptr<AudioFloatArray>> m_preDelayBuffers;
    9290    int m_preDelayReadIndex;
    9391    int m_preDelayWriteIndex;
  • trunk/Source/WebCore/platform/audio/FFTFrame.cpp

    r149970 r162368  
    3636#include <complex>
    3737#include <wtf/MathExtras.h>
    38 #include <wtf/OwnPtr.h>
    3938
    4039#ifndef NDEBUG
     
    5453}
    5554
    56 PassOwnPtr<FFTFrame> FFTFrame::createInterpolatedFrame(const FFTFrame& frame1, const FFTFrame& frame2, double x)
    57 {
    58     OwnPtr<FFTFrame> newFrame = adoptPtr(new FFTFrame(frame1.fftSize()));
     55std::unique_ptr<FFTFrame> FFTFrame::createInterpolatedFrame(const FFTFrame& frame1, const FFTFrame& frame2, double x)
     56{
     57    auto newFrame = std::make_unique<FFTFrame>(frame1.fftSize());
    5958
    6059    newFrame->interpolateFrequencyComponents(frame1, frame2, x);
     
    6968    newFrame->doFFT(buffer.data());
    7069
    71     return newFrame.release();
     70    return newFrame;
    7271}
    7372
  • trunk/Source/WebCore/platform/audio/FFTFrame.h

    r160045 r162368  
    5050#if USE(WEBAUDIO_GSTREAMER)
    5151#include <glib.h>
    52 #include <memory>
    5352G_BEGIN_DECLS
    5453#include <gst/fft/gstfftf32.h>
     
    7372#endif
    7473
     74#include <memory>
    7575#include <wtf/Forward.h>
    76 #include <wtf/PassOwnPtr.h>
    7776#include <wtf/Threading.h>
    7877
     
    106105
    107106    // Interpolates from frame1 -> frame2 as x goes from 0.0 -> 1.0
    108     static PassOwnPtr<FFTFrame> createInterpolatedFrame(const FFTFrame& frame1, const FFTFrame& frame2, double x);
     107    static std::unique_ptr<FFTFrame> createInterpolatedFrame(const FFTFrame& frame1, const FFTFrame& frame2, double x);
    109108
    110109    void doPaddedFFT(const float* data, size_t dataSize); // zero-padding with dataSize <= fftSize
  • trunk/Source/WebCore/platform/audio/HRTFDatabase.cpp

    r159027 r162368  
    4444const unsigned HRTFDatabase::NumberOfTotalElevations = NumberOfRawElevations * InterpolationFactor;
    4545
    46 PassOwnPtr<HRTFDatabase> HRTFDatabase::create(float sampleRate)
    47 {
    48     OwnPtr<HRTFDatabase> hrtfDatabase = adoptPtr(new HRTFDatabase(sampleRate));
    49     return hrtfDatabase.release();
    50 }
    51 
    5246HRTFDatabase::HRTFDatabase(float sampleRate)
    5347    : m_elevations(NumberOfTotalElevations)
     
    5650    unsigned elevationIndex = 0;
    5751    for (int elevation = MinElevation; elevation <= MaxElevation; elevation += RawElevationAngleSpacing) {
    58         OwnPtr<HRTFElevation> hrtfElevation = HRTFElevation::createForSubject("Composite", elevation, sampleRate);
     52        std::unique_ptr<HRTFElevation> hrtfElevation = HRTFElevation::createForSubject("Composite", elevation, sampleRate);
    5953        ASSERT(hrtfElevation.get());
    6054        if (!hrtfElevation.get())
    6155            return;
    6256       
    63         m_elevations[elevationIndex] = hrtfElevation.release();
     57        m_elevations[elevationIndex] = std::move(hrtfElevation);
    6458        elevationIndex += InterpolationFactor;
    6559    }
  • trunk/Source/WebCore/platform/audio/HRTFDatabase.h

    r157653 r162368  
    3131
    3232#include "HRTFElevation.h"
     33#include <memory>
    3334#include <wtf/Forward.h>
    3435#include <wtf/Noncopyable.h>
    35 #include <wtf/OwnPtr.h>
    3636#include <wtf/PassRefPtr.h>
    3737#include <wtf/Vector.h>
     
    4444    WTF_MAKE_NONCOPYABLE(HRTFDatabase);
    4545public:
    46     static PassOwnPtr<HRTFDatabase> create(float sampleRate);
     46    explicit HRTFDatabase(float sampleRate);
    4747
    4848    // getKernelsFromAzimuthElevation() returns a left and right ear kernel, and an interpolated left and right frame delay for the given azimuth and elevation.
     
    6161
    6262private:
    63     explicit HRTFDatabase(float sampleRate);
    64 
    6563    // Minimum and maximum elevation angles (inclusive) for a HRTFDatabase.
    6664    static const int MinElevation;
     
    7775    static unsigned indexFromElevationAngle(double);
    7876
    79     Vector<OwnPtr<HRTFElevation>> m_elevations;                                           
     77    Vector<std::unique_ptr<HRTFElevation>> m_elevations;
    8078    float m_sampleRate;
    8179};
  • trunk/Source/WebCore/platform/audio/HRTFDatabaseLoader.cpp

    r157806 r162368  
    7878
    7979    waitForLoaderThreadCompletion();
    80     m_hrtfDatabase.clear();
     80    m_hrtfDatabase.reset();
    8181
    8282    // Remove ourself from the map.
     
    9797    if (!m_hrtfDatabase.get()) {
    9898        // Load the default HRTF database.
    99         m_hrtfDatabase = HRTFDatabase::create(m_databaseSampleRate);
     99        m_hrtfDatabase = std::make_unique<HRTFDatabase>(m_databaseSampleRate);
    100100    }
    101101}
  • trunk/Source/WebCore/platform/audio/HRTFDatabaseLoader.h

    r157806 r162368  
    3131
    3232#include "HRTFDatabase.h"
     33#include <memory>
    3334#include <wtf/HashMap.h>
    3435#include <wtf/PassRefPtr.h>
     
    7374    void loadAsynchronously();
    7475
    75     OwnPtr<HRTFDatabase> m_hrtfDatabase;
     76    std::unique_ptr<HRTFDatabase> m_hrtfDatabase;
    7677
    7778    // Holding a m_threadLock is required when accessing m_databaseLoaderThread.
  • trunk/Source/WebCore/platform/audio/HRTFElevation.cpp

    r159027 r162368  
    4141#include <algorithm>
    4242#include <math.h>
    43 #include <wtf/OwnPtr.h>
    44 
    4543
    4644namespace WebCore {
     
    239237};
    240238
    241 PassOwnPtr<HRTFElevation> HRTFElevation::createForSubject(const String& subjectName, int elevation, float sampleRate)
     239std::unique_ptr<HRTFElevation> HRTFElevation::createForSubject(const String& subjectName, int elevation, float sampleRate)
    242240{
    243241    bool isElevationGood = elevation >= -45 && elevation <= 90 && (elevation / 15) * 15 == elevation;
     
    246244        return nullptr;
    247245       
    248     OwnPtr<HRTFKernelList> kernelListL = adoptPtr(new HRTFKernelList(NumberOfTotalAzimuths));
    249     OwnPtr<HRTFKernelList> kernelListR = adoptPtr(new HRTFKernelList(NumberOfTotalAzimuths));
     246    auto kernelListL = std::make_unique<HRTFKernelList>(NumberOfTotalAzimuths);
     247    auto kernelListR = std::make_unique<HRTFKernelList>(NumberOfTotalAzimuths);
    250248
    251249    // Load convolution kernels from HRTF files.
     
    276274    }
    277275   
    278     OwnPtr<HRTFElevation> hrtfElevation = adoptPtr(new HRTFElevation(kernelListL.release(), kernelListR.release(), elevation, sampleRate));
    279     return hrtfElevation.release();
    280 }
    281 
    282 PassOwnPtr<HRTFElevation> HRTFElevation::createByInterpolatingSlices(HRTFElevation* hrtfElevation1, HRTFElevation* hrtfElevation2, float x, float sampleRate)
     276    return std::make_unique<HRTFElevation>(std::move(kernelListL), std::move(kernelListR), elevation, sampleRate);
     277}
     278
     279std::unique_ptr<HRTFElevation> HRTFElevation::createByInterpolatingSlices(HRTFElevation* hrtfElevation1, HRTFElevation* hrtfElevation2, float x, float sampleRate)
    283280{
    284281    ASSERT(hrtfElevation1 && hrtfElevation2);
     
    288285    ASSERT(x >= 0.0 && x < 1.0);
    289286   
    290     OwnPtr<HRTFKernelList> kernelListL = adoptPtr(new HRTFKernelList(NumberOfTotalAzimuths));
    291     OwnPtr<HRTFKernelList> kernelListR = adoptPtr(new HRTFKernelList(NumberOfTotalAzimuths));
     287    auto kernelListL = std::make_unique<HRTFKernelList>(NumberOfTotalAzimuths);
     288    auto kernelListR = std::make_unique<HRTFKernelList>(NumberOfTotalAzimuths);
    292289
    293290    HRTFKernelList* kernelListL1 = hrtfElevation1->kernelListL();
     
    305302    double angle = (1.0 - x) * hrtfElevation1->elevationAngle() + x * hrtfElevation2->elevationAngle();
    306303   
    307     OwnPtr<HRTFElevation> hrtfElevation = adoptPtr(new HRTFElevation(kernelListL.release(), kernelListR.release(), static_cast<int>(angle), sampleRate));
    308     return hrtfElevation.release(); 
     304    return std::make_unique<HRTFElevation>(std::move(kernelListL), std::move(kernelListR), static_cast<int>(angle), sampleRate);
    309305}
    310306
  • trunk/Source/WebCore/platform/audio/HRTFElevation.h

    r148921 r162368  
    3131
    3232#include "HRTFKernel.h"
     33#include <memory>
    3334#include <wtf/Noncopyable.h>
    34 #include <wtf/OwnPtr.h>
    35 #include <wtf/PassOwnPtr.h>
    3635#include <wtf/PassRefPtr.h>
    3736#include <wtf/RefCounted.h>
     
    4746    WTF_MAKE_NONCOPYABLE(HRTFElevation);
    4847public:
     48    HRTFElevation(std::unique_ptr<HRTFKernelList> kernelListL, std::unique_ptr<HRTFKernelList> kernelListR, int elevation, float sampleRate)
     49        : m_kernelListL(std::move(kernelListL))
     50        , m_kernelListR(std::move(kernelListR))
     51        , m_elevationAngle(elevation)
     52        , m_sampleRate(sampleRate)
     53    {
     54    }
     55
    4956    // Loads and returns an HRTFElevation with the given HRTF database subject name and elevation from browser (or WebKit.framework) resources.
    5057    // Normally, there will only be a single HRTF database set, but this API supports the possibility of multiple ones with different names.
    5158    // Interpolated azimuths will be generated based on InterpolationFactor.
    5259    // Valid values for elevation are -45 -> +90 in 15 degree increments.
    53     static PassOwnPtr<HRTFElevation> createForSubject(const String& subjectName, int elevation, float sampleRate);
     60    static std::unique_ptr<HRTFElevation> createForSubject(const String& subjectName, int elevation, float sampleRate);
    5461
    5562    // Given two HRTFElevations, and an interpolation factor x: 0 -> 1, returns an interpolated HRTFElevation.
    56     static PassOwnPtr<HRTFElevation> createByInterpolatingSlices(HRTFElevation* hrtfElevation1, HRTFElevation* hrtfElevation2, float x, float sampleRate);
     63    static std::unique_ptr<HRTFElevation> createByInterpolatingSlices(HRTFElevation* hrtfElevation1, HRTFElevation* hrtfElevation2, float x, float sampleRate);
    5764
    5865    // Returns the list of left or right ear HRTFKernels for all the azimuths going from 0 to 360 degrees.
     
    94101
    95102private:
    96     HRTFElevation(PassOwnPtr<HRTFKernelList> kernelListL, PassOwnPtr<HRTFKernelList> kernelListR, int elevation, float sampleRate)
    97         : m_kernelListL(kernelListL)
    98         , m_kernelListR(kernelListR)
    99         , m_elevationAngle(elevation)
    100         , m_sampleRate(sampleRate)
    101     {
    102     }
    103 
    104     OwnPtr<HRTFKernelList> m_kernelListL;
    105     OwnPtr<HRTFKernelList> m_kernelListR;
     103    std::unique_ptr<HRTFKernelList> m_kernelListL;
     104    std::unique_ptr<HRTFKernelList> m_kernelListR;
    106105    double m_elevationAngle;
    107106    float m_sampleRate;
  • trunk/Source/WebCore/platform/audio/HRTFKernel.cpp

    r159027 r162368  
    9393    }
    9494
    95     m_fftFrame = adoptPtr(new FFTFrame(fftSize));
     95    m_fftFrame = std::make_unique<FFTFrame>(fftSize);
    9696    m_fftFrame->doPaddedFFT(impulseResponse, truncatedResponseLength);
    9797}
     
    102102}
    103103
    104 PassOwnPtr<AudioChannel> HRTFKernel::createImpulseResponse()
     104std::unique_ptr<AudioChannel> HRTFKernel::createImpulseResponse()
    105105{
    106     OwnPtr<AudioChannel> channel = adoptPtr(new AudioChannel(fftSize()));
     106    auto channel = std::make_unique<AudioChannel>(fftSize());
    107107    FFTFrame fftFrame(*m_fftFrame);
    108108
     
    111111    fftFrame.doInverseFFT(channel->mutableData());
    112112
    113     return channel.release();
     113    return channel;
    114114}
    115115
     
    132132    float frameDelay = (1 - x) * kernel1->frameDelay() + x * kernel2->frameDelay();
    133133   
    134     OwnPtr<FFTFrame> interpolatedFrame = FFTFrame::createInterpolatedFrame(*kernel1->fftFrame(), *kernel2->fftFrame(), x);
    135     return HRTFKernel::create(interpolatedFrame.release(), frameDelay, sampleRate1);
     134    std::unique_ptr<FFTFrame> interpolatedFrame = FFTFrame::createInterpolatedFrame(*kernel1->fftFrame(), *kernel2->fftFrame(), x);
     135    return HRTFKernel::create(std::move(interpolatedFrame), frameDelay, sampleRate1);
    136136}
    137137
  • trunk/Source/WebCore/platform/audio/HRTFKernel.h

    r157653 r162368  
    3131
    3232#include "FFTFrame.h"
    33 #include <wtf/OwnPtr.h>
    34 #include <wtf/PassOwnPtr.h>
     33#include <memory>
    3534#include <wtf/PassRefPtr.h>
    3635#include <wtf/RefCounted.h>
     
    5756    }
    5857
    59     static PassRefPtr<HRTFKernel> create(PassOwnPtr<FFTFrame> fftFrame, float frameDelay, float sampleRate)
     58    static PassRefPtr<HRTFKernel> create(std::unique_ptr<FFTFrame> fftFrame, float frameDelay, float sampleRate)
    6059    {
    61         return adoptRef(new HRTFKernel(fftFrame, frameDelay, sampleRate));
     60        return adoptRef(new HRTFKernel(std::forward<std::unique_ptr<FFTFrame>>(fftFrame), frameDelay, sampleRate));
    6261    }
    6362
     
    7473
    7574    // Converts back into impulse-response form.
    76     PassOwnPtr<AudioChannel> createImpulseResponse();
     75    std::unique_ptr<AudioChannel> createImpulseResponse();
    7776
    7877private:
     
    8079    HRTFKernel(AudioChannel*, size_t fftSize, float sampleRate);
    8180   
    82     HRTFKernel(PassOwnPtr<FFTFrame> fftFrame, float frameDelay, float sampleRate)
    83         : m_fftFrame(fftFrame)
     81    HRTFKernel(std::unique_ptr<FFTFrame> fftFrame, float frameDelay, float sampleRate)
     82        : m_fftFrame(std::move(fftFrame))
    8483        , m_frameDelay(frameDelay)
    8584        , m_sampleRate(sampleRate)
     
    8786    }
    8887   
    89     OwnPtr<FFTFrame> m_fftFrame;
     88    std::unique_ptr<FFTFrame> m_fftFrame;
    9089    float m_frameDelay;
    9190    float m_sampleRate;
  • trunk/Source/WebCore/platform/audio/MultiChannelResampler.cpp

    r149817 r162368  
    9898    // Create each channel's resampler.
    9999    for (unsigned channelIndex = 0; channelIndex < numberOfChannels; ++channelIndex)
    100         m_kernels.append(adoptPtr(new SincResampler(scaleFactor)));
     100        m_kernels.append(std::make_unique<SincResampler>(scaleFactor));
    101101}
    102102
  • trunk/Source/WebCore/platform/audio/MultiChannelResampler.h

    r157653 r162368  
    3131
    3232#include "SincResampler.h"
    33 #include <wtf/OwnPtr.h>
     33#include <memory>
    3434
    3535namespace WebCore {
     
    5050   
    5151    // Each channel will be resampled using a high-quality SincResampler.
    52     Vector<OwnPtr<SincResampler>> m_kernels;
     52    Vector<std::unique_ptr<SincResampler>> m_kernels;
    5353   
    5454    unsigned m_numberOfChannels;
  • trunk/Source/WebCore/platform/audio/Panner.cpp

    r150856 r162368  
    3535#include "EqualPowerPanner.h"
    3636#include "HRTFPanner.h"
    37 #include <wtf/OwnPtr.h>
    3837
    3938namespace WebCore {
    4039
    41 PassOwnPtr<Panner> Panner::create(PanningModel model, float sampleRate, HRTFDatabaseLoader* databaseLoader)
     40std::unique_ptr<Panner> Panner::create(PanningModel model, float sampleRate, HRTFDatabaseLoader* databaseLoader)
    4241{
    43     OwnPtr<Panner> panner;
     42    std::unique_ptr<Panner> panner;
    4443
    4544    switch (model) {
    4645    case PanningModelEqualPower:
    47         panner = adoptPtr(new EqualPowerPanner(sampleRate));
     46        panner = std::make_unique<EqualPowerPanner>(sampleRate);
    4847        break;
    4948
    5049    case PanningModelHRTF:
    51         panner = adoptPtr(new HRTFPanner(sampleRate, databaseLoader));
     50        panner = std::make_unique<HRTFPanner>(sampleRate, databaseLoader);
    5251        break;
    5352
     
    5958    }
    6059
    61     return panner.release();
     60    return panner;
    6261}
    6362
  • trunk/Source/WebCore/platform/audio/Panner.h

    r150856 r162368  
    3030#define Panner_h
    3131
    32 #include <wtf/PassOwnPtr.h>
     32#include <memory>
    3333
    3434namespace WebCore {
     
    4949    typedef unsigned PanningModel;
    5050
    51     static PassOwnPtr<Panner> create(PanningModel, float sampleRate, HRTFDatabaseLoader*);
     51    static std::unique_ptr<Panner> create(PanningModel, float sampleRate, HRTFDatabaseLoader*);
    5252
    5353    virtual ~Panner() { };
  • trunk/Source/WebCore/platform/audio/Reverb.cpp

    r159027 r162368  
    3939#include <math.h>
    4040#include <wtf/MathExtras.h>
    41 #include <wtf/OwnPtr.h>
    42 #include <wtf/PassOwnPtr.h>
    4341
    4442namespace WebCore {
     
    120118        AudioChannel* channel = impulseResponseBuffer->channel(i);
    121119
    122         OwnPtr<ReverbConvolver> convolver = adoptPtr(new ReverbConvolver(channel, renderSliceSize, maxFFTSize, convolverRenderPhase, useBackgroundThreads));
    123         m_convolvers.append(convolver.release());
     120        m_convolvers.append(std::make_unique<ReverbConvolver>(channel, renderSliceSize, maxFFTSize, convolverRenderPhase, useBackgroundThreads));
    124121
    125122        convolverRenderPhase += renderSliceSize;
  • trunk/Source/WebCore/platform/audio/Reverb.h

    r157653 r162368  
    5757    size_t m_impulseResponseLength;
    5858
    59     Vector<OwnPtr<ReverbConvolver>> m_convolvers;
     59    Vector<std::unique_ptr<ReverbConvolver>> m_convolvers;
    6060
    6161    // For "True" stereo processing
  • trunk/Source/WebCore/platform/audio/ReverbConvolver.h

    r162291 r162368  
    3737#include "ReverbInputBuffer.h"
    3838#include <condition_variable>
     39#include <memory>
    3940#include <mutex>
    4041#include <wtf/RefCounted.h>
  • trunk/Source/WebCore/platform/audio/ReverbConvolverStage.cpp

    r147684 r162368  
    3838#include "ReverbConvolver.h"
    3939#include "ReverbInputBuffer.h"
    40 #include <wtf/OwnPtr.h>
    41 #include <wtf/PassOwnPtr.h>
    4240
    4341namespace WebCore {
     
    5654
    5755    if (!m_directMode) {
    58         m_fftKernel = adoptPtr(new FFTFrame(fftSize));
     56        m_fftKernel = std::make_unique<FFTFrame>(fftSize);
    5957        m_fftKernel->doPaddedFFT(impulseResponse + stageOffset, stageLength);
    60         m_fftConvolver = adoptPtr(new FFTConvolver(fftSize));
     58        m_fftConvolver = std::make_unique<FFTConvolver>(fftSize);
    6159    } else {
    62         m_directKernel = adoptPtr(new AudioFloatArray(fftSize / 2));
     60        m_directKernel = std::make_unique<AudioFloatArray>(fftSize / 2);
    6361        m_directKernel->copyToRange(impulseResponse + stageOffset, 0, fftSize / 2);
    64         m_directConvolver = adoptPtr(new DirectConvolver(renderSliceSize));
     62        m_directConvolver = std::make_unique<DirectConvolver>(renderSliceSize);
    6563    }
    6664    m_temporaryBuffer.allocate(renderSliceSize);
  • trunk/Source/WebCore/platform/audio/ReverbConvolverStage.h

    r147684 r162368  
    3131
    3232#include "AudioArray.h"
    33 #include <wtf/OwnPtr.h>
     33#include <memory>
    3434
    3535namespace WebCore {
     
    6161
    6262private:
    63     OwnPtr<FFTFrame> m_fftKernel;
    64     OwnPtr<FFTConvolver> m_fftConvolver;
     63    std::unique_ptr<FFTFrame> m_fftKernel;
     64    std::unique_ptr<FFTConvolver> m_fftConvolver;
    6565
    6666    AudioFloatArray m_preDelayBuffer;
     
    7878
    7979    bool m_directMode;
    80     OwnPtr<AudioFloatArray> m_directKernel;
    81     OwnPtr<DirectConvolver> m_directConvolver;
     80    std::unique_ptr<AudioFloatArray> m_directKernel;
     81    std::unique_ptr<DirectConvolver> m_directConvolver;
    8282};
    8383
  • trunk/Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.cpp

    r159730 r162368  
    4343}
    4444
    45 PassOwnPtr<AudioDestination> AudioDestination::create(AudioIOCallback& callback, const String&, unsigned numberOfInputChannels, unsigned numberOfOutputChannels, float sampleRate)
     45std::unique_ptr<AudioDestination> AudioDestination::create(AudioIOCallback& callback, const String&, unsigned numberOfInputChannels, unsigned numberOfOutputChannels, float sampleRate)
    4646{
    4747    // FIXME: make use of inputDeviceId as appropriate.
     
    5555        LOG(Media, "AudioDestination::create(%u, %u, %f) - unhandled output channels", numberOfInputChannels, numberOfOutputChannels, sampleRate);
    5656
    57     return adoptPtr(new AudioDestinationGStreamer(callback, sampleRate));
     57    return std::make_unique<AudioDestinationGStreamer>(callback, sampleRate);
    5858}
    5959
  • trunk/Source/WebCore/platform/audio/gstreamer/AudioFileReaderGStreamer.cpp

    r159730 r162368  
    3131#include <gst/pbutils/pbutils.h>
    3232#include <wtf/Noncopyable.h>
    33 #include <wtf/PassOwnPtr.h>
    3433#include <wtf/gobject/GOwnPtr.h>
    3534#include <wtf/gobject/GRefPtr.h>
  • trunk/Source/WebCore/platform/audio/ios/AudioDestinationIOS.cpp

    r161589 r162368  
    6868
    6969// Factory method: iOS-implementation
    70 PassOwnPtr<AudioDestination> AudioDestination::create(AudioIOCallback& callback, const String&, unsigned numberOfInputChannels, unsigned numberOfOutputChannels, float sampleRate)
     70std::unique_ptr<AudioDestination> AudioDestination::create(AudioIOCallback& callback, const String&, unsigned numberOfInputChannels, unsigned numberOfOutputChannels, float sampleRate)
    7171{
    7272    // FIXME: make use of inputDeviceId as appropriate.
     
    8080        LOG(Media, "AudioDestination::create(%u, %u, %f) - unhandled output channels", numberOfInputChannels, numberOfOutputChannels, sampleRate);
    8181
    82     return adoptPtr(new AudioDestinationIOS(callback, sampleRate));
     82    return std::make_unique<AudioDestinationIOS>(callback, sampleRate);
    8383}
    8484
  • trunk/Source/WebCore/platform/audio/ios/AudioSessionIOS.mm

    r161642 r162368  
    3232#import <AVFoundation/AVAudioSession.h>
    3333#import <objc/runtime.h>
    34 #import <wtf/PassOwnPtr.h>
    3534#import <wtf/RetainPtr.h>
    3635
     
    120119
    121120AudioSession::AudioSession()
    122     : m_private(adoptPtr(new AudioSessionPrivate(this)))
     121    : m_private(std::make_unique<AudioSessionPrivate>(this))
    123122{
    124123}
  • trunk/Source/WebCore/platform/audio/mac/AudioDestinationMac.cpp

    r161589 r162368  
    5050
    5151// Factory method: Mac-implementation
    52 PassOwnPtr<AudioDestination> AudioDestination::create(AudioIOCallback& callback, const String&, unsigned numberOfInputChannels, unsigned numberOfOutputChannels, float sampleRate)
     52std::unique_ptr<AudioDestination> AudioDestination::create(AudioIOCallback& callback, const String&, unsigned numberOfInputChannels, unsigned numberOfOutputChannels, float sampleRate)
    5353{
    5454    // FIXME: make use of inputDeviceId as appropriate.
     
    6262        LOG(Media, "AudioDestination::create(%u, %u, %f) - unhandled output channels", numberOfInputChannels, numberOfOutputChannels, sampleRate);
    6363
    64     return adoptPtr(new AudioDestinationMac(callback, sampleRate));
     64    return std::make_unique<AudioDestinationMac>(callback, sampleRate);
    6565}
    6666
  • trunk/Source/WebCore/platform/audio/mac/AudioDestinationMac.h

    r162139 r162368  
    3434#include "MediaSession.h"
    3535#include <AudioUnit/AudioUnit.h>
    36 #include <wtf/OwnPtr.h>
    3736#include <wtf/RefPtr.h>
    3837
  • trunk/Source/WebCore/platform/audio/mac/AudioSessionMac.cpp

    r161589 r162368  
    3333#include "NotImplemented.h"
    3434#include <CoreAudio/AudioHardware.h>
    35 #include <wtf/PassOwnPtr.h>
    3635
    3736namespace WebCore {
     
    5756
    5857AudioSession::AudioSession()
    59     : m_private(adoptPtr(new AudioSessionPrivate()))
     58    : m_private(std::make_unique<AudioSessionPrivate>())
    6059{
    6160}
Note: See TracChangeset for help on using the changeset viewer.