Changeset 162368 in webkit
- Timestamp:
- Jan 20, 2014 12:36:11 PM (10 years ago)
- Location:
- trunk/Source/WebCore
- Files:
-
- 84 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/WebCore/ChangeLog
r162366 r162368 1 2014-01-20 Zan Dobersek <zdobersek@igalia.com> 2 3 Move WebAudio source code to std::unique_ptr 4 https://bugs.webkit.org/show_bug.cgi?id=127274 5 6 Reviewed by Eric Carlson. 7 8 Move from using OwnPtr and PassOwnPtr to using std::unique_ptr and move semantics 9 in the WebAudio module and the WebAudio code in the platform layer. 10 11 * Modules/webaudio/AsyncAudioDecoder.cpp: 12 * Modules/webaudio/AsyncAudioDecoder.h: 13 * Modules/webaudio/AudioBasicInspectorNode.cpp: 14 (WebCore::AudioBasicInspectorNode::AudioBasicInspectorNode): 15 * Modules/webaudio/AudioBasicProcessorNode.cpp: 16 (WebCore::AudioBasicProcessorNode::AudioBasicProcessorNode): 17 * Modules/webaudio/AudioBasicProcessorNode.h: 18 * Modules/webaudio/AudioBufferSourceNode.cpp: 19 (WebCore::AudioBufferSourceNode::AudioBufferSourceNode): 20 * Modules/webaudio/AudioContext.cpp: 21 * Modules/webaudio/AudioContext.h: 22 * Modules/webaudio/AudioDestinationNode.cpp: 23 (WebCore::AudioDestinationNode::AudioDestinationNode): 24 * Modules/webaudio/AudioNode.cpp: 25 (WebCore::AudioNode::addInput): 26 (WebCore::AudioNode::addOutput): 27 (WebCore::AudioNode::checkNumberOfChannelsForInput): 28 * Modules/webaudio/AudioNode.h: 29 * Modules/webaudio/BiquadFilterNode.cpp: 30 (WebCore::BiquadFilterNode::BiquadFilterNode): 31 * Modules/webaudio/BiquadProcessor.cpp: 32 (WebCore::BiquadProcessor::createKernel): 33 (WebCore::BiquadProcessor::getFrequencyResponse): 34 * Modules/webaudio/BiquadProcessor.h: 35 * Modules/webaudio/ChannelMergerNode.cpp: 36 (WebCore::ChannelMergerNode::ChannelMergerNode): 37 * Modules/webaudio/ChannelSplitterNode.cpp: 38 (WebCore::ChannelSplitterNode::ChannelSplitterNode): 39 * Modules/webaudio/ConvolverNode.cpp: 40 (WebCore::ConvolverNode::ConvolverNode): 41 (WebCore::ConvolverNode::uninitialize): 42 (WebCore::ConvolverNode::setBuffer): 43 * Modules/webaudio/ConvolverNode.h: 44 * Modules/webaudio/DefaultAudioDestinationNode.h: 45 * Modules/webaudio/DelayNode.cpp: 46 (WebCore::DelayNode::DelayNode): 47 * Modules/webaudio/DelayProcessor.cpp: 48 (WebCore::DelayProcessor::createKernel): 49 * Modules/webaudio/DelayProcessor.h: 50 * Modules/webaudio/DynamicsCompressorNode.cpp: 51 (WebCore::DynamicsCompressorNode::DynamicsCompressorNode): 52 (WebCore::DynamicsCompressorNode::initialize): 53 (WebCore::DynamicsCompressorNode::uninitialize): 54 * Modules/webaudio/DynamicsCompressorNode.h: 55 * Modules/webaudio/GainNode.cpp: 56 (WebCore::GainNode::GainNode): 57 * Modules/webaudio/MediaElementAudioSourceNode.cpp: 58 (WebCore::MediaElementAudioSourceNode::MediaElementAudioSourceNode): 59 (WebCore::MediaElementAudioSourceNode::setFormat): 60 * Modules/webaudio/MediaElementAudioSourceNode.h: 61 * Modules/webaudio/MediaStreamAudioDestinationNode.h: 62 * Modules/webaudio/MediaStreamAudioSource.cpp: 63 * Modules/webaudio/MediaStreamAudioSourceNode.cpp: 64 (WebCore::MediaStreamAudioSourceNode::MediaStreamAudioSourceNode): 65 * Modules/webaudio/MediaStreamAudioSourceNode.h: 66 * Modules/webaudio/OscillatorNode.cpp: 67 (WebCore::OscillatorNode::OscillatorNode): 68 * Modules/webaudio/PannerNode.cpp: 69 (WebCore::PannerNode::PannerNode): 70 (WebCore::PannerNode::uninitialize): 71 (WebCore::PannerNode::setPanningModel): 72 * Modules/webaudio/PannerNode.h: 73 * Modules/webaudio/PeriodicWave.cpp: 74 (WebCore::PeriodicWave::createBandLimitedTables): 75 * Modules/webaudio/PeriodicWave.h: 76 * Modules/webaudio/RealtimeAnalyser.cpp: 77 (WebCore::RealtimeAnalyser::RealtimeAnalyser): 78 (WebCore::RealtimeAnalyser::setFftSize): 79 * Modules/webaudio/RealtimeAnalyser.h: 80 * Modules/webaudio/ScriptProcessorNode.cpp: 81 (WebCore::ScriptProcessorNode::ScriptProcessorNode): 82 * Modules/webaudio/WaveShaperDSPKernel.cpp: 83 (WebCore::WaveShaperDSPKernel::lazyInitializeOversampling): 84 * Modules/webaudio/WaveShaperDSPKernel.h: 85 * Modules/webaudio/WaveShaperNode.cpp: 86 (WebCore::WaveShaperNode::WaveShaperNode): 87 * Modules/webaudio/WaveShaperProcessor.cpp: 88 (WebCore::WaveShaperProcessor::createKernel): 89 * Modules/webaudio/WaveShaperProcessor.h: 90 * platform/audio/AudioBus.cpp: 91 (WebCore::AudioBus::AudioBus): 92 (WebCore::AudioBus::copyWithGainFrom): 93 * platform/audio/AudioBus.h: 94 * platform/audio/AudioChannel.cpp: 95 * platform/audio/AudioChannel.h: 96 (WebCore::AudioChannel::AudioChannel): 97 (WebCore::AudioChannel::set): 98 * platform/audio/AudioDSPKernelProcessor.h: 99 * platform/audio/AudioDestination.h: 100 * platform/audio/AudioResampler.cpp: 101 (WebCore::AudioResampler::AudioResampler): 102 (WebCore::AudioResampler::configureChannels): 103 * platform/audio/AudioResampler.h: 104 * platform/audio/AudioSession.h: 105 * platform/audio/DynamicsCompressor.cpp: 106 (WebCore::DynamicsCompressor::setNumberOfChannels): 107 * platform/audio/DynamicsCompressor.h: 108 * platform/audio/DynamicsCompressorKernel.cpp: 109 (WebCore::DynamicsCompressorKernel::setNumberOfChannels): 110 * platform/audio/DynamicsCompressorKernel.h: 111 * platform/audio/FFTFrame.cpp: 112 (WebCore::FFTFrame::createInterpolatedFrame): 113 * platform/audio/FFTFrame.h: 114 * platform/audio/HRTFDatabase.cpp: 115 (WebCore::HRTFDatabase::HRTFDatabase): 116 * platform/audio/HRTFDatabase.h: 117 * platform/audio/HRTFDatabaseLoader.cpp: 118 (WebCore::HRTFDatabaseLoader::~HRTFDatabaseLoader): 119 (WebCore::HRTFDatabaseLoader::load): 120 * platform/audio/HRTFDatabaseLoader.h: 121 * platform/audio/HRTFElevation.cpp: 122 (WebCore::HRTFElevation::createForSubject): 123 (WebCore::HRTFElevation::createByInterpolatingSlices): 124 * platform/audio/HRTFElevation.h: 125 (WebCore::HRTFElevation::HRTFElevation): 126 * platform/audio/HRTFKernel.cpp: 127 (WebCore::HRTFKernel::HRTFKernel): 128 (WebCore::HRTFKernel::createImpulseResponse): 129 (WebCore::HRTFKernel::createInterpolatedKernel): 130 * platform/audio/HRTFKernel.h: 131 (WebCore::HRTFKernel::create): 132 (WebCore::HRTFKernel::HRTFKernel): 133 * platform/audio/MultiChannelResampler.cpp: 134 (WebCore::MultiChannelResampler::MultiChannelResampler): 135 * platform/audio/MultiChannelResampler.h: 136 * platform/audio/Panner.cpp: 137 (WebCore::Panner::create): 138 * platform/audio/Panner.h: 139 * platform/audio/Reverb.cpp: 140 (WebCore::Reverb::initialize): 141 * platform/audio/Reverb.h: 142 * platform/audio/ReverbConvolver.h: 143 * platform/audio/ReverbConvolverStage.cpp: 144 (WebCore::ReverbConvolverStage::ReverbConvolverStage): 145 * platform/audio/ReverbConvolverStage.h: 146 * platform/audio/gstreamer/AudioDestinationGStreamer.cpp: 147 (WebCore::AudioDestination::create): 148 * platform/audio/gstreamer/AudioFileReaderGStreamer.cpp: 149 * platform/audio/ios/AudioDestinationIOS.cpp: 150 (WebCore::AudioDestination::create): 151 * platform/audio/ios/AudioSessionIOS.mm: 152 (WebCore::AudioSession::AudioSession): 153 * platform/audio/mac/AudioDestinationMac.cpp: 154 (WebCore::AudioDestination::create): 155 * platform/audio/mac/AudioDestinationMac.h: 156 * platform/audio/mac/AudioSessionMac.cpp: 157 (WebCore::AudioSession::AudioSession): 158 1 159 2014-01-20 Morten Stenshorne <mstensho@opera.com> 2 160 -
trunk/Source/WebCore/Modules/webaudio/AsyncAudioDecoder.cpp
r156972 r162368 33 33 #include <runtime/ArrayBuffer.h> 34 34 #include <wtf/MainThread.h> 35 #include <wtf/OwnPtr.h>36 #include <wtf/PassOwnPtr.h>37 35 38 36 namespace WebCore { -
trunk/Source/WebCore/Modules/webaudio/AsyncAudioDecoder.h
r156972 r162368 26 26 #define AsyncAudioDecoder_h 27 27 28 #include <memory> 28 29 #include <wtf/Forward.h> 29 30 #include <wtf/MessageQueue.h> 30 #include <wtf/PassOwnPtr.h>31 31 #include <wtf/PassRefPtr.h> 32 32 #include <wtf/RefPtr.h> -
trunk/Source/WebCore/Modules/webaudio/AudioBasicInspectorNode.cpp
r161644 r162368 39 39 , m_needAutomaticPull(false) 40 40 { 41 addInput( adoptPtr(new AudioNodeInput(this)));42 addOutput( adoptPtr(new AudioNodeOutput(this, outputChannelCount)));41 addInput(std::make_unique<AudioNodeInput>(this)); 42 addOutput(std::make_unique<AudioNodeOutput>(this, outputChannelCount)); 43 43 } 44 44 -
trunk/Source/WebCore/Modules/webaudio/AudioBasicProcessorNode.cpp
r146102 r162368 40 40 : AudioNode(context, sampleRate) 41 41 { 42 addInput( adoptPtr(new AudioNodeInput(this)));43 addOutput( adoptPtr(new AudioNodeOutput(this, 1)));42 addInput(std::make_unique<AudioNodeInput>(this)); 43 addOutput(std::make_unique<AudioNodeOutput>(this, 1)); 44 44 45 45 // The subclass must create m_processor. -
trunk/Source/WebCore/Modules/webaudio/AudioBasicProcessorNode.h
r162139 r162368 27 27 28 28 #include "AudioNode.h" 29 #include <memory> 29 30 #include <wtf/PassRefPtr.h> 30 31 #include <wtf/RefCounted.h> … … 60 61 61 62 AudioProcessor* processor() { return m_processor.get(); } 62 OwnPtr<AudioProcessor> m_processor;63 std::unique_ptr<AudioProcessor> m_processor; 63 64 }; 64 65 -
trunk/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.cpp
r162296 r162368 74 74 75 75 // Default to mono. A call to setBuffer() will set the number of output channels to that of the buffer. 76 addOutput( adoptPtr(new AudioNodeOutput(this, 1)));76 addOutput(std::make_unique<AudioNodeOutput>(this, 1)); 77 77 78 78 initialize(); -
trunk/Source/WebCore/Modules/webaudio/AudioContext.cpp
r161644 r162368 89 89 #include <wtf/Atomics.h> 90 90 #include <wtf/MainThread.h> 91 #include <wtf/OwnPtr.h>92 #include <wtf/PassOwnPtr.h>93 91 #include <wtf/Ref.h> 94 92 #include <wtf/RefCounted.h> -
trunk/Source/WebCore/Modules/webaudio/AudioContext.h
r162158 r162368 35 35 #include <wtf/HashSet.h> 36 36 #include <wtf/MainThread.h> 37 #include <wtf/OwnPtr.h>38 37 #include <wtf/PassRefPtr.h> 39 38 #include <wtf/RefCounted.h> -
trunk/Source/WebCore/Modules/webaudio/AudioDestinationNode.cpp
r151302 r162368 41 41 , m_currentSampleFrame(0) 42 42 { 43 addInput( adoptPtr(new AudioNodeInput(this)));43 addInput(std::make_unique<AudioNodeInput>(this)); 44 44 45 45 setNodeType(NodeTypeDestination); -
trunk/Source/WebCore/Modules/webaudio/AudioNode.cpp
r161644 r162368 99 99 } 100 100 101 void AudioNode::addInput( PassOwnPtr<AudioNodeInput> input)102 { 103 m_inputs.append( input);104 } 105 106 void AudioNode::addOutput( PassOwnPtr<AudioNodeOutput> output)107 { 108 m_outputs.append( output);101 void AudioNode::addInput(std::unique_ptr<AudioNodeInput> input) 102 { 103 m_inputs.append(std::move(input)); 104 } 105 106 void AudioNode::addOutput(std::unique_ptr<AudioNodeOutput> output) 107 { 108 m_outputs.append(std::move(output)); 109 109 } 110 110 … … 325 325 ASSERT(context()->isAudioThread() && context()->isGraphOwner()); 326 326 327 ASSERT(m_inputs.contains(input)); 328 if (!m_inputs.contains(input)) 329 return; 330 331 input->updateInternalBus(); 327 for (const std::unique_ptr<AudioNodeInput>& savedInput : m_inputs) { 328 if (input == savedInput.get()) { 329 input->updateInternalBus(); 330 return; 331 } 332 } 333 334 ASSERT_NOT_REACHED(); 332 335 } 333 336 -
trunk/Source/WebCore/Modules/webaudio/AudioNode.h
r162158 r162368 28 28 #include "AudioBus.h" 29 29 #include "EventTarget.h" 30 #include <memory> 30 31 #include <wtf/Forward.h> 31 #include <wtf/OwnPtr.h>32 #include <wtf/PassOwnPtr.h>33 32 #include <wtf/RefPtr.h> 34 33 #include <wtf/Vector.h> … … 186 185 protected: 187 186 // Inputs and outputs must be created before the AudioNode is initialized. 188 void addInput( PassOwnPtr<AudioNodeInput>);189 void addOutput( PassOwnPtr<AudioNodeOutput>);187 void addInput(std::unique_ptr<AudioNodeInput>); 188 void addOutput(std::unique_ptr<AudioNodeOutput>); 190 189 191 190 // Called by processIfNecessary() to cause all parts of the rendering graph connected to us to process. … … 202 201 RefPtr<AudioContext> m_context; 203 202 float m_sampleRate; 204 Vector< OwnPtr<AudioNodeInput>> m_inputs;205 Vector< OwnPtr<AudioNodeOutput>> m_outputs;203 Vector<std::unique_ptr<AudioNodeInput>> m_inputs; 204 Vector<std::unique_ptr<AudioNodeOutput>> m_outputs; 206 205 207 206 double m_lastProcessingTime; -
trunk/Source/WebCore/Modules/webaudio/BiquadFilterNode.cpp
r138849 r162368 37 37 { 38 38 // Initially setup as lowpass filter. 39 m_processor = adoptPtr(new BiquadProcessor(context, sampleRate, 1, false));39 m_processor = std::make_unique<BiquadProcessor>(context, sampleRate, 1, false); 40 40 setNodeType(NodeTypeBiquadFilter); 41 41 } -
trunk/Source/WebCore/Modules/webaudio/BiquadProcessor.cpp
r135218 r162368 61 61 } 62 62 63 PassOwnPtr<AudioDSPKernel> BiquadProcessor::createKernel()63 std::unique_ptr<AudioDSPKernel> BiquadProcessor::createKernel() 64 64 { 65 return adoptPtr(new BiquadDSPKernel(this));65 return std::make_unique<BiquadDSPKernel>(this); 66 66 } 67 67 … … 129 129 // thread on the main kernels. 130 130 131 OwnPtr<BiquadDSPKernel> responseKernel = adoptPtr(new BiquadDSPKernel(this));131 auto responseKernel = std::make_unique<BiquadDSPKernel>(this); 132 132 133 133 responseKernel->getFrequencyResponse(nFrequencies, frequencyHz, magResponse, phaseResponse); -
trunk/Source/WebCore/Modules/webaudio/BiquadProcessor.h
r162139 r162368 31 31 #include "AudioParam.h" 32 32 #include "Biquad.h" 33 #include <memory> 33 34 #include <wtf/RefPtr.h> 34 35 … … 54 55 virtual ~BiquadProcessor(); 55 56 56 virtual PassOwnPtr<AudioDSPKernel> createKernel() override;57 virtual std::unique_ptr<AudioDSPKernel> createKernel() override; 57 58 58 59 virtual void process(const AudioBus* source, AudioBus* destination, size_t framesToProcess) override; -
trunk/Source/WebCore/Modules/webaudio/ChannelMergerNode.cpp
r159442 r162368 55 55 // Create the requested number of inputs. 56 56 for (unsigned i = 0; i < numberOfInputs; ++i) 57 addInput( adoptPtr(new AudioNodeInput(this)));57 addInput(std::make_unique<AudioNodeInput>(this)); 58 58 59 addOutput( adoptPtr(new AudioNodeOutput(this, 1)));59 addOutput(std::make_unique<AudioNodeOutput>(this, 1)); 60 60 61 61 setNodeType(NodeTypeChannelMerger); -
trunk/Source/WebCore/Modules/webaudio/ChannelSplitterNode.cpp
r159442 r162368 46 46 : AudioNode(context, sampleRate) 47 47 { 48 addInput( adoptPtr(new AudioNodeInput(this)));48 addInput(std::make_unique<AudioNodeInput>(this)); 49 49 50 50 // Create a fixed number of outputs (able to handle the maximum number of channels fed to an input). 51 51 for (unsigned i = 0; i < numberOfOutputs; ++i) 52 addOutput( adoptPtr(new AudioNodeOutput(this, 1)));52 addOutput(std::make_unique<AudioNodeOutput>(this, 1)); 53 53 54 54 setNodeType(NodeTypeChannelSplitter); -
trunk/Source/WebCore/Modules/webaudio/ConvolverNode.cpp
r162296 r162368 50 50 , m_normalize(true) 51 51 { 52 addInput( adoptPtr(new AudioNodeInput(this)));53 addOutput( adoptPtr(new AudioNodeOutput(this, 2)));52 addInput(std::make_unique<AudioNodeInput>(this)); 53 addOutput(std::make_unique<AudioNodeOutput>(this, 2)); 54 54 55 55 // Node-specific default mixing rules. … … 112 112 return; 113 113 114 m_reverb. clear();114 m_reverb.reset(); 115 115 AudioNode::uninitialize(); 116 116 } … … 142 142 // Create the reverb with the given impulse response. 143 143 bool useBackgroundThreads = !context()->isOfflineContext(); 144 OwnPtr<Reverb> reverb = adoptPtr(new Reverb(bufferBus.get(), AudioNode::ProcessingSizeInFrames, MaxFFTSize, 2, useBackgroundThreads, m_normalize));144 auto reverb = std::make_unique<Reverb>(bufferBus.get(), AudioNode::ProcessingSizeInFrames, MaxFFTSize, 2, useBackgroundThreads, m_normalize); 145 145 146 146 { 147 147 // Synchronize with process(). 148 148 std::lock_guard<std::mutex> lock(m_processMutex); 149 m_reverb = reverb.release();149 m_reverb = std::move(reverb); 150 150 m_buffer = buffer; 151 151 } -
trunk/Source/WebCore/Modules/webaudio/ConvolverNode.h
r162296 r162368 27 27 28 28 #include "AudioNode.h" 29 #include <memory> 29 30 #include <mutex> 30 #include <wtf/OwnPtr.h>31 31 #include <wtf/RefPtr.h> 32 32 … … 64 64 virtual double latencyTime() const override; 65 65 66 OwnPtr<Reverb> m_reverb;66 std::unique_ptr<Reverb> m_reverb; 67 67 RefPtr<AudioBuffer> m_buffer; 68 68 -
trunk/Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.h
r162139 r162368 28 28 #include "AudioDestination.h" 29 29 #include "AudioDestinationNode.h" 30 #include < wtf/OwnPtr.h>30 #include <memory> 31 31 32 32 namespace WebCore { … … 57 57 void createDestination(); 58 58 59 OwnPtr<AudioDestination> m_destination;59 std::unique_ptr<AudioDestination> m_destination; 60 60 String m_inputDeviceId; 61 61 unsigned m_numberOfInputChannels; -
trunk/Source/WebCore/Modules/webaudio/DelayNode.cpp
r134644 r162368 40 40 return; 41 41 } 42 m_processor = adoptPtr(new DelayProcessor(context, sampleRate, 1, maxDelayTime));42 m_processor = std::make_unique<DelayProcessor>(context, sampleRate, 1, maxDelayTime); 43 43 setNodeType(NodeTypeDelay); 44 44 } -
trunk/Source/WebCore/Modules/webaudio/DelayProcessor.cpp
r116485 r162368 46 46 } 47 47 48 PassOwnPtr<AudioDSPKernel> DelayProcessor::createKernel()48 std::unique_ptr<AudioDSPKernel> DelayProcessor::createKernel() 49 49 { 50 return adoptPtr(new DelayDSPKernel(this));50 return std::make_unique<DelayDSPKernel>(this); 51 51 } 52 52 -
trunk/Source/WebCore/Modules/webaudio/DelayProcessor.h
r162139 r162368 28 28 #include "AudioDSPKernelProcessor.h" 29 29 #include "AudioParam.h" 30 31 #include <wtf/PassOwnPtr.h> 30 #include <memory> 32 31 #include <wtf/RefPtr.h> 33 32 … … 41 40 virtual ~DelayProcessor(); 42 41 43 virtual PassOwnPtr<AudioDSPKernel> createKernel() override;42 virtual std::unique_ptr<AudioDSPKernel> createKernel() override; 44 43 45 44 AudioParam* delayTime() const { return m_delayTime.get(); } -
trunk/Source/WebCore/Modules/webaudio/DynamicsCompressorNode.cpp
r116485 r162368 42 42 : AudioNode(context, sampleRate) 43 43 { 44 addInput( adoptPtr(new AudioNodeInput(this)));45 addOutput( adoptPtr(new AudioNodeOutput(this, defaultNumberOfOutputChannels)));44 addInput(std::make_unique<AudioNodeInput>(this)); 45 addOutput(std::make_unique<AudioNodeOutput>(this, defaultNumberOfOutputChannels)); 46 46 47 47 setNodeType(NodeTypeDynamicsCompressor); … … 96 96 97 97 AudioNode::initialize(); 98 m_dynamicsCompressor = adoptPtr(new DynamicsCompressor(sampleRate(), defaultNumberOfOutputChannels));98 m_dynamicsCompressor = std::make_unique<DynamicsCompressor>(sampleRate(), defaultNumberOfOutputChannels); 99 99 } 100 100 … … 104 104 return; 105 105 106 m_dynamicsCompressor. clear();106 m_dynamicsCompressor.reset(); 107 107 AudioNode::uninitialize(); 108 108 } -
trunk/Source/WebCore/Modules/webaudio/DynamicsCompressorNode.h
r162139 r162368 28 28 #include "AudioNode.h" 29 29 #include "AudioParam.h" 30 #include < wtf/OwnPtr.h>30 #include <memory> 31 31 32 32 namespace WebCore { … … 65 65 DynamicsCompressorNode(AudioContext*, float sampleRate); 66 66 67 OwnPtr<DynamicsCompressor> m_dynamicsCompressor;67 std::unique_ptr<DynamicsCompressor> m_dynamicsCompressor; 68 68 RefPtr<AudioParam> m_threshold; 69 69 RefPtr<AudioParam> m_knee; -
trunk/Source/WebCore/Modules/webaudio/GainNode.cpp
r146486 r162368 42 42 m_gain = AudioParam::create(context, "gain", 1.0, 0.0, 1.0); 43 43 44 addInput( adoptPtr(new AudioNodeInput(this)));45 addOutput( adoptPtr(new AudioNodeOutput(this, 1)));44 addInput(std::make_unique<AudioNodeInput>(this)); 45 addOutput(std::make_unique<AudioNodeOutput>(this, 1)); 46 46 47 47 setNodeType(NodeTypeGain); -
trunk/Source/WebCore/Modules/webaudio/MediaElementAudioSourceNode.cpp
r162296 r162368 53 53 { 54 54 // Default to stereo. This could change depending on what the media element .src is set to. 55 addOutput( adoptPtr(new AudioNodeOutput(this, 2)));55 addOutput(std::make_unique<AudioNodeOutput>(this, 2)); 56 56 57 57 setNodeType(NodeTypeMediaElementAudioSource); … … 85 85 if (sourceSampleRate != sampleRate()) { 86 86 double scaleFactor = sourceSampleRate / sampleRate(); 87 m_multiChannelResampler = adoptPtr(new MultiChannelResampler(scaleFactor, numberOfChannels));87 m_multiChannelResampler = std::make_unique<MultiChannelResampler>(scaleFactor, numberOfChannels); 88 88 } else { 89 89 // Bypass resampling. 90 m_multiChannelResampler. clear();90 m_multiChannelResampler.reset(); 91 91 } 92 92 -
trunk/Source/WebCore/Modules/webaudio/MediaElementAudioSourceNode.h
r162296 r162368 32 32 #include "HTMLMediaElement.h" 33 33 #include "MultiChannelResampler.h" 34 #include <memory> 34 35 #include <mutex> 35 #include <wtf/OwnPtr.h>36 36 #include <wtf/PassRefPtr.h> 37 37 … … 73 73 double m_sourceSampleRate; 74 74 75 OwnPtr<MultiChannelResampler> m_multiChannelResampler;75 std::unique_ptr<MultiChannelResampler> m_multiChannelResampler; 76 76 }; 77 77 -
trunk/Source/WebCore/Modules/webaudio/MediaStreamAudioDestinationNode.h
r162139 r162368 31 31 #include "AudioBus.h" 32 32 #include "MediaStream.h" 33 #include <wtf/OwnPtr.h>34 33 #include <wtf/PassRefPtr.h> 35 34 -
trunk/Source/WebCore/Modules/webaudio/MediaStreamAudioSource.cpp
r159442 r162368 32 32 #include "NotImplemented.h" 33 33 #include "UUID.h" 34 #include <wtf/PassOwnPtr.h>35 34 36 35 namespace WebCore { -
trunk/Source/WebCore/Modules/webaudio/MediaStreamAudioSourceNode.cpp
r162296 r162368 49 49 { 50 50 // Default to stereo. This could change depending on the format of the MediaStream's audio track. 51 addOutput( adoptPtr(new AudioNodeOutput(this, 2)));51 addOutput(std::make_unique<AudioNodeOutput>(this, 2)); 52 52 53 53 setNodeType(NodeTypeMediaStreamAudioSource); -
trunk/Source/WebCore/Modules/webaudio/MediaStreamAudioSourceNode.h
r162296 r162368 33 33 #include "MediaStream.h" 34 34 #include <mutex> 35 #include <wtf/OwnPtr.h>36 35 #include <wtf/PassRefPtr.h> 37 36 -
trunk/Source/WebCore/Modules/webaudio/OscillatorNode.cpp
r162296 r162368 71 71 72 72 // An oscillator is always mono. 73 addOutput( adoptPtr(new AudioNodeOutput(this, 1)));73 addOutput(std::make_unique<AudioNodeOutput>(this, 1)); 74 74 75 75 initialize(); -
trunk/Source/WebCore/Modules/webaudio/PannerNode.cpp
r162296 r162368 53 53 , m_connectionCount(0) 54 54 { 55 addInput( adoptPtr(new AudioNodeInput(this)));56 addOutput( adoptPtr(new AudioNodeOutput(this, 2)));55 addInput(std::make_unique<AudioNodeInput>(this)); 56 addOutput(std::make_unique<AudioNodeOutput>(this, 2)); 57 57 58 58 // Node-specific default mixing rules. … … 155 155 return; 156 156 157 m_panner. clear();157 m_panner.reset(); 158 158 AudioNode::uninitialize(); 159 159 } … … 200 200 std::lock_guard<std::mutex> lock(m_pannerMutex); 201 201 202 OwnPtr<Panner> newPanner = Panner::create(model, sampleRate(), context()->hrtfDatabaseLoader()); 203 m_panner = newPanner.release(); 202 m_panner = Panner::create(model, sampleRate(), context()->hrtfDatabaseLoader()); 204 203 m_panningModel = model; 205 204 } -
trunk/Source/WebCore/Modules/webaudio/PannerNode.h
r162296 r162368 34 34 #include "FloatPoint3D.h" 35 35 #include "Panner.h" 36 #include <memory> 36 37 #include <mutex> 37 #include <wtf/OwnPtr.h>38 38 39 39 namespace WebCore { … … 141 141 void notifyAudioSourcesConnectedToNode(AudioNode*); 142 142 143 OwnPtr<Panner> m_panner;143 std::unique_ptr<Panner> m_panner; 144 144 unsigned m_panningModel; 145 145 -
trunk/Source/WebCore/Modules/webaudio/PeriodicWave.cpp
r159442 r162368 37 37 #include "VectorMath.h" 38 38 #include <algorithm> 39 #include <wtf/OwnPtr.h>40 39 41 40 const unsigned PeriodicWaveSize = 4096; // This must be a power of two. … … 199 198 200 199 // Create the band-limited table. 201 OwnPtr<AudioFloatArray> table = adoptPtr(new AudioFloatArray(m_periodicWaveSize)); 202 m_bandLimitedTables.append(table.release()); 200 m_bandLimitedTables.append(std::make_unique<AudioFloatArray>(m_periodicWaveSize)); 203 201 204 202 // Apply an inverse FFT to generate the time-domain table data. -
trunk/Source/WebCore/Modules/webaudio/PeriodicWave.h
r157653 r162368 31 31 32 32 #include "AudioArray.h" 33 #include <memory> 33 34 #include <runtime/Float32Array.h> 34 #include <wtf/OwnPtr.h>35 35 #include <wtf/PassRefPtr.h> 36 36 #include <wtf/RefCounted.h> … … 90 90 // Creates tables based on numberOfComponents Fourier coefficients. 91 91 void createBandLimitedTables(const float* real, const float* imag, unsigned numberOfComponents); 92 Vector< OwnPtr<AudioFloatArray>> m_bandLimitedTables;92 Vector<std::unique_ptr<AudioFloatArray>> m_bandLimitedTables; 93 93 }; 94 94 -
trunk/Source/WebCore/Modules/webaudio/RealtimeAnalyser.cpp
r153728 r162368 61 61 , m_maxDecibels(DefaultMaxDecibels) 62 62 { 63 m_analysisFrame = adoptPtr(new FFTFrame(DefaultFFTSize));63 m_analysisFrame = std::make_unique<FFTFrame>(DefaultFFTSize); 64 64 } 65 65 … … 87 87 88 88 if (m_fftSize != size) { 89 m_analysisFrame = adoptPtr(new FFTFrame(size));89 m_analysisFrame = std::make_unique<FFTFrame>(size); 90 90 // m_magnitudeBuffer has size = fftSize / 2 because it contains floats reduced from complex values in m_analysisFrame. 91 91 m_magnitudeBuffer.allocate(size / 2); -
trunk/Source/WebCore/Modules/webaudio/RealtimeAnalyser.h
r155112 r162368 27 27 28 28 #include "AudioArray.h" 29 #include <memory> 29 30 #include <runtime/Float32Array.h> 30 31 #include <runtime/Uint8Array.h> 31 32 #include <wtf/Forward.h> 32 33 #include <wtf/Noncopyable.h> 33 #include <wtf/OwnPtr.h>34 34 35 35 namespace WebCore { … … 82 82 83 83 size_t m_fftSize; 84 OwnPtr<FFTFrame> m_analysisFrame;84 std::unique_ptr<FFTFrame> m_analysisFrame; 85 85 void doFFTAnalysis(); 86 86 -
trunk/Source/WebCore/Modules/webaudio/ScriptProcessorNode.cpp
r159442 r162368 87 87 ASSERT(numberOfInputChannels <= AudioContext::maxNumberOfChannels()); 88 88 89 addInput( adoptPtr(new AudioNodeInput(this)));90 addOutput( adoptPtr(new AudioNodeOutput(this, numberOfOutputChannels)));89 addInput(std::make_unique<AudioNodeInput>(this)); 90 addOutput(std::make_unique<AudioNodeOutput>(this, numberOfOutputChannels)); 91 91 92 92 setNodeType(NodeTypeJavaScript); -
trunk/Source/WebCore/Modules/webaudio/WaveShaperDSPKernel.cpp
r157971 r162368 50 50 51 51 if (!m_tempBuffer) { 52 m_tempBuffer = adoptPtr(new AudioFloatArray(RenderingQuantum * 2));53 m_tempBuffer2 = adoptPtr(new AudioFloatArray(RenderingQuantum * 4));54 m_upSampler = adoptPtr(new UpSampler(RenderingQuantum));55 m_downSampler = adoptPtr(new DownSampler(RenderingQuantum * 2));56 m_upSampler2 = adoptPtr(new UpSampler(RenderingQuantum * 2));57 m_downSampler2 = adoptPtr(new DownSampler(RenderingQuantum * 4));52 m_tempBuffer = std::make_unique<AudioFloatArray>(RenderingQuantum * 2); 53 m_tempBuffer2 = std::make_unique<AudioFloatArray>(RenderingQuantum * 4); 54 m_upSampler = std::make_unique<UpSampler>(RenderingQuantum); 55 m_downSampler = std::make_unique<DownSampler>(RenderingQuantum * 2); 56 m_upSampler2 = std::make_unique<UpSampler>(RenderingQuantum * 2); 57 m_downSampler2 = std::make_unique<DownSampler>(RenderingQuantum * 4); 58 58 } 59 59 } -
trunk/Source/WebCore/Modules/webaudio/WaveShaperDSPKernel.h
r162139 r162368 31 31 #include "UpSampler.h" 32 32 #include "WaveShaperProcessor.h" 33 #include < wtf/OwnPtr.h>33 #include <memory> 34 34 35 35 namespace WebCore { … … 63 63 64 64 // Oversampling. 65 OwnPtr<AudioFloatArray> m_tempBuffer;66 OwnPtr<AudioFloatArray> m_tempBuffer2;67 OwnPtr<UpSampler> m_upSampler;68 OwnPtr<DownSampler> m_downSampler;69 OwnPtr<UpSampler> m_upSampler2;70 OwnPtr<DownSampler> m_downSampler2;65 std::unique_ptr<AudioFloatArray> m_tempBuffer; 66 std::unique_ptr<AudioFloatArray> m_tempBuffer2; 67 std::unique_ptr<UpSampler> m_upSampler; 68 std::unique_ptr<DownSampler> m_downSampler; 69 std::unique_ptr<UpSampler> m_upSampler2; 70 std::unique_ptr<DownSampler> m_downSampler2; 71 71 }; 72 72 -
trunk/Source/WebCore/Modules/webaudio/WaveShaperNode.cpp
r161644 r162368 37 37 : AudioBasicProcessorNode(context, context->sampleRate()) 38 38 { 39 m_processor = adoptPtr(new WaveShaperProcessor(context->sampleRate(), 1));39 m_processor = std::make_unique<WaveShaperProcessor>(context->sampleRate(), 1); 40 40 setNodeType(NodeTypeWaveShaper); 41 41 -
trunk/Source/WebCore/Modules/webaudio/WaveShaperProcessor.cpp
r162296 r162368 45 45 } 46 46 47 PassOwnPtr<AudioDSPKernel> WaveShaperProcessor::createKernel()47 std::unique_ptr<AudioDSPKernel> WaveShaperProcessor::createKernel() 48 48 { 49 return adoptPtr(new WaveShaperDSPKernel(this));49 return std::make_unique<WaveShaperDSPKernel>(this); 50 50 } 51 51 -
trunk/Source/WebCore/Modules/webaudio/WaveShaperProcessor.h
r162296 r162368 29 29 #include "AudioDSPKernelProcessor.h" 30 30 #include "AudioNode.h" 31 #include <memory> 31 32 #include <mutex> 32 33 #include <runtime/Float32Array.h> … … 49 50 virtual ~WaveShaperProcessor(); 50 51 51 virtual PassOwnPtr<AudioDSPKernel> createKernel() override;52 virtual std::unique_ptr<AudioDSPKernel> createKernel() override; 52 53 53 54 virtual void process(const AudioBus* source, AudioBus* destination, size_t framesToProcess) override; -
trunk/Source/WebCore/platform/audio/AudioBus.cpp
r149817 r162368 40 40 #include <assert.h> 41 41 #include <math.h> 42 #include <wtf/OwnPtr.h>43 #include <wtf/PassOwnPtr.h>44 42 45 43 namespace WebCore { … … 67 65 68 66 for (unsigned i = 0; i < numberOfChannels; ++i) { 69 PassOwnPtr<AudioChannel> channel = allocate ? adoptPtr(new AudioChannel(length)) : adoptPtr(new AudioChannel(0, length));70 m_channels.append( channel);67 auto channel = allocate ? std::make_unique<AudioChannel>(length) : std::make_unique<AudioChannel>(nullptr, length); 68 m_channels.append(std::move(channel)); 71 69 } 72 70 … … 465 463 if (framesToDezipper) { 466 464 if (!m_dezipperGainValues.get() || m_dezipperGainValues->size() < framesToDezipper) 467 m_dezipperGainValues = adoptPtr(new AudioFloatArray(framesToDezipper));465 m_dezipperGainValues = std::make_unique<AudioFloatArray>(framesToDezipper); 468 466 469 467 float* gainValues = m_dezipperGainValues->data(); -
trunk/Source/WebCore/platform/audio/AudioBus.h
r157653 r162368 31 31 32 32 #include "AudioChannel.h" 33 #include <memory> 33 34 #include <wtf/Noncopyable.h> 34 #include <wtf/PassOwnPtr.h>35 35 #include <wtf/ThreadSafeRefCounted.h> 36 36 #include <wtf/Vector.h> … … 159 159 160 160 size_t m_length; 161 Vector< OwnPtr<AudioChannel>> m_channels;161 Vector<std::unique_ptr<AudioChannel>> m_channels; 162 162 int m_layout; 163 163 float m_busGain; 164 OwnPtr<AudioFloatArray> m_dezipperGainValues;164 std::unique_ptr<AudioFloatArray> m_dezipperGainValues; 165 165 bool m_isFirstTime; 166 166 float m_sampleRate; // 0.0 if unknown or N/A -
trunk/Source/WebCore/platform/audio/AudioChannel.cpp
r131262 r162368 36 36 #include <algorithm> 37 37 #include <math.h> 38 #include <wtf/OwnPtr.h>39 38 40 39 namespace WebCore { -
trunk/Source/WebCore/platform/audio/AudioChannel.h
r131262 r162368 31 31 32 32 #include "AudioArray.h" 33 #include < wtf/PassOwnPtr.h>33 #include <memory> 34 34 35 35 namespace WebCore { … … 43 43 44 44 // Reference an external buffer. 45 AudioChannel(float* storage, size_t length)45 explicit AudioChannel(float* storage, size_t length) 46 46 : m_length(length) 47 47 , m_rawPointer(storage) … … 56 56 , m_silent(true) 57 57 { 58 m_memBuffer = adoptPtr(new AudioFloatArray(length));58 m_memBuffer = std::make_unique<AudioFloatArray>(length); 59 59 } 60 60 … … 71 71 void set(float* storage, size_t length) 72 72 { 73 m_memBuffer. clear(); // cleanup managed storage73 m_memBuffer.reset(); // cleanup managed storage 74 74 m_rawPointer = storage; 75 75 m_length = length; … … 131 131 132 132 float* m_rawPointer; 133 OwnPtr<AudioFloatArray> m_memBuffer;133 std::unique_ptr<AudioFloatArray> m_memBuffer; 134 134 bool m_silent; 135 135 }; -
trunk/Source/WebCore/platform/audio/AudioDSPKernelProcessor.h
r162139 r162368 34 34 #include "AudioBus.h" 35 35 #include "AudioProcessor.h" 36 #include <wtf/OwnPtr.h>37 #include <wtf/PassOwnPtr.h>38 36 #include <wtf/Vector.h> 39 37 … … 55 53 // Subclasses create the appropriate type of processing kernel here. 56 54 // We'll call this to create a kernel for each channel. 57 virtual PassOwnPtr<AudioDSPKernel> createKernel() = 0;55 virtual std::unique_ptr<AudioDSPKernel> createKernel() = 0; 58 56 59 57 // AudioProcessor methods … … 69 67 70 68 protected: 71 Vector< OwnPtr<AudioDSPKernel>> m_kernels;69 Vector<std::unique_ptr<AudioDSPKernel>> m_kernels; 72 70 bool m_hasJustReset; 73 71 }; -
trunk/Source/WebCore/platform/audio/AudioDestination.h
r144720 r162368 30 30 #define AudioDestination_h 31 31 32 #include <wtf/OwnPtr.h> 33 #include <wtf/PassOwnPtr.h> 32 #include <memory> 34 33 #include <wtf/text/WTFString.h> 35 34 … … 46 45 // Pass in (numberOfInputChannels > 0) if live/local audio input is desired. 47 46 // Port-specific device identification information for live/local input streams can be passed in the inputDeviceId. 48 static PassOwnPtr<AudioDestination> create(AudioIOCallback&, const String& inputDeviceId, unsigned numberOfInputChannels, unsigned numberOfOutputChannels, float sampleRate);47 static std::unique_ptr<AudioDestination> create(AudioIOCallback&, const String& inputDeviceId, unsigned numberOfInputChannels, unsigned numberOfOutputChannels, float sampleRate); 49 48 50 49 virtual ~AudioDestination() { } -
trunk/Source/WebCore/platform/audio/AudioResampler.cpp
r159027 r162368 40 40 : m_rate(1.0) 41 41 { 42 m_kernels.append( adoptPtr(new AudioResamplerKernel(this)));42 m_kernels.append(std::make_unique<AudioResamplerKernel>(this)); 43 43 m_sourceBus = AudioBus::create(1, 0, false); 44 44 } … … 48 48 { 49 49 for (unsigned i = 0; i < numberOfChannels; ++i) 50 m_kernels.append( adoptPtr(new AudioResamplerKernel(this)));50 m_kernels.append(std::make_unique<AudioResamplerKernel>(this)); 51 51 52 52 m_sourceBus = AudioBus::create(numberOfChannels, 0, false); … … 62 62 if (numberOfChannels > currentSize) { 63 63 for (unsigned i = currentSize; i < numberOfChannels; ++i) 64 m_kernels.append( adoptPtr(new AudioResamplerKernel(this)));64 m_kernels.append(std::make_unique<AudioResamplerKernel>(this)); 65 65 } else 66 66 m_kernels.resize(numberOfChannels); -
trunk/Source/WebCore/platform/audio/AudioResampler.h
r157653 r162368 29 29 #include "AudioResamplerKernel.h" 30 30 #include "AudioSourceProvider.h" 31 #include < wtf/OwnPtr.h>31 #include <memory> 32 32 #include <wtf/Vector.h> 33 33 … … 60 60 private: 61 61 double m_rate; 62 Vector< OwnPtr<AudioResamplerKernel>> m_kernels;62 Vector<std::unique_ptr<AudioResamplerKernel>> m_kernels; 63 63 RefPtr<AudioBus> m_sourceBus; 64 64 }; -
trunk/Source/WebCore/platform/audio/AudioSession.h
r150651 r162368 29 29 #if USE(AUDIO_SESSION) 30 30 31 #include <memory> 31 32 #include <wtf/HashSet.h> 32 #include <wtf/OwnPtr.h>33 33 34 34 namespace WebCore { … … 75 75 ~AudioSession(); 76 76 77 OwnPtr<AudioSessionPrivate> m_private;77 std::unique_ptr<AudioSessionPrivate> m_private; 78 78 HashSet<AudioSessionListener*> m_listeners; 79 79 }; -
trunk/Source/WebCore/platform/audio/DynamicsCompressor.cpp
r156015 r162368 272 272 m_postFilterPacks.clear(); 273 273 for (unsigned i = 0; i < numberOfChannels; ++i) { 274 m_preFilterPacks.append( adoptPtr(new ZeroPoleFilterPack4()));275 m_postFilterPacks.append( adoptPtr(new ZeroPoleFilterPack4()));274 m_preFilterPacks.append(std::make_unique<ZeroPoleFilterPack4>()); 275 m_postFilterPacks.append(std::make_unique<ZeroPoleFilterPack4>()); 276 276 } 277 277 -
trunk/Source/WebCore/platform/audio/DynamicsCompressor.h
r157653 r162368 100 100 101 101 // Per-channel emphasis filters. 102 Vector< OwnPtr<ZeroPoleFilterPack4>> m_preFilterPacks;103 Vector< OwnPtr<ZeroPoleFilterPack4>> m_postFilterPacks;102 Vector<std::unique_ptr<ZeroPoleFilterPack4>> m_preFilterPacks; 103 Vector<std::unique_ptr<ZeroPoleFilterPack4>> m_postFilterPacks; 104 104 105 105 std::unique_ptr<const float*[]> m_sourceChannels; -
trunk/Source/WebCore/platform/audio/DynamicsCompressorKernel.cpp
r159027 r162368 77 77 m_preDelayBuffers.clear(); 78 78 for (unsigned i = 0; i < numberOfChannels; ++i) 79 m_preDelayBuffers.append( adoptPtr(new AudioFloatArray(MaxPreDelayFrames)));79 m_preDelayBuffers.append(std::make_unique<AudioFloatArray>(MaxPreDelayFrames)); 80 80 } 81 81 -
trunk/Source/WebCore/platform/audio/DynamicsCompressorKernel.h
r157653 r162368 31 31 32 32 #include "AudioArray.h" 33 34 #include <wtf/OwnPtr.h> 35 #include <wtf/PassOwnPtr.h> 33 #include <memory> 36 34 37 35 namespace WebCore { … … 89 87 void setPreDelayTime(float); 90 88 91 Vector< OwnPtr<AudioFloatArray>> m_preDelayBuffers;89 Vector<std::unique_ptr<AudioFloatArray>> m_preDelayBuffers; 92 90 int m_preDelayReadIndex; 93 91 int m_preDelayWriteIndex; -
trunk/Source/WebCore/platform/audio/FFTFrame.cpp
r149970 r162368 36 36 #include <complex> 37 37 #include <wtf/MathExtras.h> 38 #include <wtf/OwnPtr.h>39 38 40 39 #ifndef NDEBUG … … 54 53 } 55 54 56 PassOwnPtr<FFTFrame> FFTFrame::createInterpolatedFrame(const FFTFrame& frame1, const FFTFrame& frame2, double x)57 { 58 OwnPtr<FFTFrame> newFrame = adoptPtr(new FFTFrame(frame1.fftSize()));55 std::unique_ptr<FFTFrame> FFTFrame::createInterpolatedFrame(const FFTFrame& frame1, const FFTFrame& frame2, double x) 56 { 57 auto newFrame = std::make_unique<FFTFrame>(frame1.fftSize()); 59 58 60 59 newFrame->interpolateFrequencyComponents(frame1, frame2, x); … … 69 68 newFrame->doFFT(buffer.data()); 70 69 71 return newFrame .release();70 return newFrame; 72 71 } 73 72 -
trunk/Source/WebCore/platform/audio/FFTFrame.h
r160045 r162368 50 50 #if USE(WEBAUDIO_GSTREAMER) 51 51 #include <glib.h> 52 #include <memory>53 52 G_BEGIN_DECLS 54 53 #include <gst/fft/gstfftf32.h> … … 73 72 #endif 74 73 74 #include <memory> 75 75 #include <wtf/Forward.h> 76 #include <wtf/PassOwnPtr.h>77 76 #include <wtf/Threading.h> 78 77 … … 106 105 107 106 // Interpolates from frame1 -> frame2 as x goes from 0.0 -> 1.0 108 static PassOwnPtr<FFTFrame> createInterpolatedFrame(const FFTFrame& frame1, const FFTFrame& frame2, double x);107 static std::unique_ptr<FFTFrame> createInterpolatedFrame(const FFTFrame& frame1, const FFTFrame& frame2, double x); 109 108 110 109 void doPaddedFFT(const float* data, size_t dataSize); // zero-padding with dataSize <= fftSize -
trunk/Source/WebCore/platform/audio/HRTFDatabase.cpp
r159027 r162368 44 44 const unsigned HRTFDatabase::NumberOfTotalElevations = NumberOfRawElevations * InterpolationFactor; 45 45 46 PassOwnPtr<HRTFDatabase> HRTFDatabase::create(float sampleRate)47 {48 OwnPtr<HRTFDatabase> hrtfDatabase = adoptPtr(new HRTFDatabase(sampleRate));49 return hrtfDatabase.release();50 }51 52 46 HRTFDatabase::HRTFDatabase(float sampleRate) 53 47 : m_elevations(NumberOfTotalElevations) … … 56 50 unsigned elevationIndex = 0; 57 51 for (int elevation = MinElevation; elevation <= MaxElevation; elevation += RawElevationAngleSpacing) { 58 OwnPtr<HRTFElevation> hrtfElevation = HRTFElevation::createForSubject("Composite", elevation, sampleRate);52 std::unique_ptr<HRTFElevation> hrtfElevation = HRTFElevation::createForSubject("Composite", elevation, sampleRate); 59 53 ASSERT(hrtfElevation.get()); 60 54 if (!hrtfElevation.get()) 61 55 return; 62 56 63 m_elevations[elevationIndex] = hrtfElevation.release();57 m_elevations[elevationIndex] = std::move(hrtfElevation); 64 58 elevationIndex += InterpolationFactor; 65 59 } -
trunk/Source/WebCore/platform/audio/HRTFDatabase.h
r157653 r162368 31 31 32 32 #include "HRTFElevation.h" 33 #include <memory> 33 34 #include <wtf/Forward.h> 34 35 #include <wtf/Noncopyable.h> 35 #include <wtf/OwnPtr.h>36 36 #include <wtf/PassRefPtr.h> 37 37 #include <wtf/Vector.h> … … 44 44 WTF_MAKE_NONCOPYABLE(HRTFDatabase); 45 45 public: 46 static PassOwnPtr<HRTFDatabase> create(float sampleRate);46 explicit HRTFDatabase(float sampleRate); 47 47 48 48 // getKernelsFromAzimuthElevation() returns a left and right ear kernel, and an interpolated left and right frame delay for the given azimuth and elevation. … … 61 61 62 62 private: 63 explicit HRTFDatabase(float sampleRate);64 65 63 // Minimum and maximum elevation angles (inclusive) for a HRTFDatabase. 66 64 static const int MinElevation; … … 77 75 static unsigned indexFromElevationAngle(double); 78 76 79 Vector< OwnPtr<HRTFElevation>> m_elevations;77 Vector<std::unique_ptr<HRTFElevation>> m_elevations; 80 78 float m_sampleRate; 81 79 }; -
trunk/Source/WebCore/platform/audio/HRTFDatabaseLoader.cpp
r157806 r162368 78 78 79 79 waitForLoaderThreadCompletion(); 80 m_hrtfDatabase. clear();80 m_hrtfDatabase.reset(); 81 81 82 82 // Remove ourself from the map. … … 97 97 if (!m_hrtfDatabase.get()) { 98 98 // Load the default HRTF database. 99 m_hrtfDatabase = HRTFDatabase::create(m_databaseSampleRate);99 m_hrtfDatabase = std::make_unique<HRTFDatabase>(m_databaseSampleRate); 100 100 } 101 101 } -
trunk/Source/WebCore/platform/audio/HRTFDatabaseLoader.h
r157806 r162368 31 31 32 32 #include "HRTFDatabase.h" 33 #include <memory> 33 34 #include <wtf/HashMap.h> 34 35 #include <wtf/PassRefPtr.h> … … 73 74 void loadAsynchronously(); 74 75 75 OwnPtr<HRTFDatabase> m_hrtfDatabase;76 std::unique_ptr<HRTFDatabase> m_hrtfDatabase; 76 77 77 78 // Holding a m_threadLock is required when accessing m_databaseLoaderThread. -
trunk/Source/WebCore/platform/audio/HRTFElevation.cpp
r159027 r162368 41 41 #include <algorithm> 42 42 #include <math.h> 43 #include <wtf/OwnPtr.h>44 45 43 46 44 namespace WebCore { … … 239 237 }; 240 238 241 PassOwnPtr<HRTFElevation> HRTFElevation::createForSubject(const String& subjectName, int elevation, float sampleRate)239 std::unique_ptr<HRTFElevation> HRTFElevation::createForSubject(const String& subjectName, int elevation, float sampleRate) 242 240 { 243 241 bool isElevationGood = elevation >= -45 && elevation <= 90 && (elevation / 15) * 15 == elevation; … … 246 244 return nullptr; 247 245 248 OwnPtr<HRTFKernelList> kernelListL = adoptPtr(new HRTFKernelList(NumberOfTotalAzimuths));249 OwnPtr<HRTFKernelList> kernelListR = adoptPtr(new HRTFKernelList(NumberOfTotalAzimuths));246 auto kernelListL = std::make_unique<HRTFKernelList>(NumberOfTotalAzimuths); 247 auto kernelListR = std::make_unique<HRTFKernelList>(NumberOfTotalAzimuths); 250 248 251 249 // Load convolution kernels from HRTF files. … … 276 274 } 277 275 278 OwnPtr<HRTFElevation> hrtfElevation = adoptPtr(new HRTFElevation(kernelListL.release(), kernelListR.release(), elevation, sampleRate)); 279 return hrtfElevation.release(); 280 } 281 282 PassOwnPtr<HRTFElevation> HRTFElevation::createByInterpolatingSlices(HRTFElevation* hrtfElevation1, HRTFElevation* hrtfElevation2, float x, float sampleRate) 276 return std::make_unique<HRTFElevation>(std::move(kernelListL), std::move(kernelListR), elevation, sampleRate); 277 } 278 279 std::unique_ptr<HRTFElevation> HRTFElevation::createByInterpolatingSlices(HRTFElevation* hrtfElevation1, HRTFElevation* hrtfElevation2, float x, float sampleRate) 283 280 { 284 281 ASSERT(hrtfElevation1 && hrtfElevation2); … … 288 285 ASSERT(x >= 0.0 && x < 1.0); 289 286 290 OwnPtr<HRTFKernelList> kernelListL = adoptPtr(new HRTFKernelList(NumberOfTotalAzimuths));291 OwnPtr<HRTFKernelList> kernelListR = adoptPtr(new HRTFKernelList(NumberOfTotalAzimuths));287 auto kernelListL = std::make_unique<HRTFKernelList>(NumberOfTotalAzimuths); 288 auto kernelListR = std::make_unique<HRTFKernelList>(NumberOfTotalAzimuths); 292 289 293 290 HRTFKernelList* kernelListL1 = hrtfElevation1->kernelListL(); … … 305 302 double angle = (1.0 - x) * hrtfElevation1->elevationAngle() + x * hrtfElevation2->elevationAngle(); 306 303 307 OwnPtr<HRTFElevation> hrtfElevation = adoptPtr(new HRTFElevation(kernelListL.release(), kernelListR.release(), static_cast<int>(angle), sampleRate)); 308 return hrtfElevation.release(); 304 return std::make_unique<HRTFElevation>(std::move(kernelListL), std::move(kernelListR), static_cast<int>(angle), sampleRate); 309 305 } 310 306 -
trunk/Source/WebCore/platform/audio/HRTFElevation.h
r148921 r162368 31 31 32 32 #include "HRTFKernel.h" 33 #include <memory> 33 34 #include <wtf/Noncopyable.h> 34 #include <wtf/OwnPtr.h>35 #include <wtf/PassOwnPtr.h>36 35 #include <wtf/PassRefPtr.h> 37 36 #include <wtf/RefCounted.h> … … 47 46 WTF_MAKE_NONCOPYABLE(HRTFElevation); 48 47 public: 48 HRTFElevation(std::unique_ptr<HRTFKernelList> kernelListL, std::unique_ptr<HRTFKernelList> kernelListR, int elevation, float sampleRate) 49 : m_kernelListL(std::move(kernelListL)) 50 , m_kernelListR(std::move(kernelListR)) 51 , m_elevationAngle(elevation) 52 , m_sampleRate(sampleRate) 53 { 54 } 55 49 56 // Loads and returns an HRTFElevation with the given HRTF database subject name and elevation from browser (or WebKit.framework) resources. 50 57 // Normally, there will only be a single HRTF database set, but this API supports the possibility of multiple ones with different names. 51 58 // Interpolated azimuths will be generated based on InterpolationFactor. 52 59 // Valid values for elevation are -45 -> +90 in 15 degree increments. 53 static PassOwnPtr<HRTFElevation> createForSubject(const String& subjectName, int elevation, float sampleRate);60 static std::unique_ptr<HRTFElevation> createForSubject(const String& subjectName, int elevation, float sampleRate); 54 61 55 62 // Given two HRTFElevations, and an interpolation factor x: 0 -> 1, returns an interpolated HRTFElevation. 56 static PassOwnPtr<HRTFElevation> createByInterpolatingSlices(HRTFElevation* hrtfElevation1, HRTFElevation* hrtfElevation2, float x, float sampleRate);63 static std::unique_ptr<HRTFElevation> createByInterpolatingSlices(HRTFElevation* hrtfElevation1, HRTFElevation* hrtfElevation2, float x, float sampleRate); 57 64 58 65 // Returns the list of left or right ear HRTFKernels for all the azimuths going from 0 to 360 degrees. … … 94 101 95 102 private: 96 HRTFElevation(PassOwnPtr<HRTFKernelList> kernelListL, PassOwnPtr<HRTFKernelList> kernelListR, int elevation, float sampleRate) 97 : m_kernelListL(kernelListL) 98 , m_kernelListR(kernelListR) 99 , m_elevationAngle(elevation) 100 , m_sampleRate(sampleRate) 101 { 102 } 103 104 OwnPtr<HRTFKernelList> m_kernelListL; 105 OwnPtr<HRTFKernelList> m_kernelListR; 103 std::unique_ptr<HRTFKernelList> m_kernelListL; 104 std::unique_ptr<HRTFKernelList> m_kernelListR; 106 105 double m_elevationAngle; 107 106 float m_sampleRate; -
trunk/Source/WebCore/platform/audio/HRTFKernel.cpp
r159027 r162368 93 93 } 94 94 95 m_fftFrame = adoptPtr(new FFTFrame(fftSize));95 m_fftFrame = std::make_unique<FFTFrame>(fftSize); 96 96 m_fftFrame->doPaddedFFT(impulseResponse, truncatedResponseLength); 97 97 } … … 102 102 } 103 103 104 PassOwnPtr<AudioChannel> HRTFKernel::createImpulseResponse()104 std::unique_ptr<AudioChannel> HRTFKernel::createImpulseResponse() 105 105 { 106 OwnPtr<AudioChannel> channel = adoptPtr(new AudioChannel(fftSize()));106 auto channel = std::make_unique<AudioChannel>(fftSize()); 107 107 FFTFrame fftFrame(*m_fftFrame); 108 108 … … 111 111 fftFrame.doInverseFFT(channel->mutableData()); 112 112 113 return channel .release();113 return channel; 114 114 } 115 115 … … 132 132 float frameDelay = (1 - x) * kernel1->frameDelay() + x * kernel2->frameDelay(); 133 133 134 OwnPtr<FFTFrame> interpolatedFrame = FFTFrame::createInterpolatedFrame(*kernel1->fftFrame(), *kernel2->fftFrame(), x);135 return HRTFKernel::create( interpolatedFrame.release(), frameDelay, sampleRate1);134 std::unique_ptr<FFTFrame> interpolatedFrame = FFTFrame::createInterpolatedFrame(*kernel1->fftFrame(), *kernel2->fftFrame(), x); 135 return HRTFKernel::create(std::move(interpolatedFrame), frameDelay, sampleRate1); 136 136 } 137 137 -
trunk/Source/WebCore/platform/audio/HRTFKernel.h
r157653 r162368 31 31 32 32 #include "FFTFrame.h" 33 #include <wtf/OwnPtr.h> 34 #include <wtf/PassOwnPtr.h> 33 #include <memory> 35 34 #include <wtf/PassRefPtr.h> 36 35 #include <wtf/RefCounted.h> … … 57 56 } 58 57 59 static PassRefPtr<HRTFKernel> create( PassOwnPtr<FFTFrame> fftFrame, float frameDelay, float sampleRate)58 static PassRefPtr<HRTFKernel> create(std::unique_ptr<FFTFrame> fftFrame, float frameDelay, float sampleRate) 60 59 { 61 return adoptRef(new HRTFKernel( fftFrame, frameDelay, sampleRate));60 return adoptRef(new HRTFKernel(std::forward<std::unique_ptr<FFTFrame>>(fftFrame), frameDelay, sampleRate)); 62 61 } 63 62 … … 74 73 75 74 // Converts back into impulse-response form. 76 PassOwnPtr<AudioChannel> createImpulseResponse();75 std::unique_ptr<AudioChannel> createImpulseResponse(); 77 76 78 77 private: … … 80 79 HRTFKernel(AudioChannel*, size_t fftSize, float sampleRate); 81 80 82 HRTFKernel( PassOwnPtr<FFTFrame> fftFrame, float frameDelay, float sampleRate)83 : m_fftFrame( fftFrame)81 HRTFKernel(std::unique_ptr<FFTFrame> fftFrame, float frameDelay, float sampleRate) 82 : m_fftFrame(std::move(fftFrame)) 84 83 , m_frameDelay(frameDelay) 85 84 , m_sampleRate(sampleRate) … … 87 86 } 88 87 89 OwnPtr<FFTFrame> m_fftFrame;88 std::unique_ptr<FFTFrame> m_fftFrame; 90 89 float m_frameDelay; 91 90 float m_sampleRate; -
trunk/Source/WebCore/platform/audio/MultiChannelResampler.cpp
r149817 r162368 98 98 // Create each channel's resampler. 99 99 for (unsigned channelIndex = 0; channelIndex < numberOfChannels; ++channelIndex) 100 m_kernels.append( adoptPtr(new SincResampler(scaleFactor)));100 m_kernels.append(std::make_unique<SincResampler>(scaleFactor)); 101 101 } 102 102 -
trunk/Source/WebCore/platform/audio/MultiChannelResampler.h
r157653 r162368 31 31 32 32 #include "SincResampler.h" 33 #include < wtf/OwnPtr.h>33 #include <memory> 34 34 35 35 namespace WebCore { … … 50 50 51 51 // Each channel will be resampled using a high-quality SincResampler. 52 Vector< OwnPtr<SincResampler>> m_kernels;52 Vector<std::unique_ptr<SincResampler>> m_kernels; 53 53 54 54 unsigned m_numberOfChannels; -
trunk/Source/WebCore/platform/audio/Panner.cpp
r150856 r162368 35 35 #include "EqualPowerPanner.h" 36 36 #include "HRTFPanner.h" 37 #include <wtf/OwnPtr.h>38 37 39 38 namespace WebCore { 40 39 41 PassOwnPtr<Panner> Panner::create(PanningModel model, float sampleRate, HRTFDatabaseLoader* databaseLoader)40 std::unique_ptr<Panner> Panner::create(PanningModel model, float sampleRate, HRTFDatabaseLoader* databaseLoader) 42 41 { 43 OwnPtr<Panner> panner;42 std::unique_ptr<Panner> panner; 44 43 45 44 switch (model) { 46 45 case PanningModelEqualPower: 47 panner = adoptPtr(new EqualPowerPanner(sampleRate));46 panner = std::make_unique<EqualPowerPanner>(sampleRate); 48 47 break; 49 48 50 49 case PanningModelHRTF: 51 panner = adoptPtr(new HRTFPanner(sampleRate, databaseLoader));50 panner = std::make_unique<HRTFPanner>(sampleRate, databaseLoader); 52 51 break; 53 52 … … 59 58 } 60 59 61 return panner .release();60 return panner; 62 61 } 63 62 -
trunk/Source/WebCore/platform/audio/Panner.h
r150856 r162368 30 30 #define Panner_h 31 31 32 #include < wtf/PassOwnPtr.h>32 #include <memory> 33 33 34 34 namespace WebCore { … … 49 49 typedef unsigned PanningModel; 50 50 51 static PassOwnPtr<Panner> create(PanningModel, float sampleRate, HRTFDatabaseLoader*);51 static std::unique_ptr<Panner> create(PanningModel, float sampleRate, HRTFDatabaseLoader*); 52 52 53 53 virtual ~Panner() { }; -
trunk/Source/WebCore/platform/audio/Reverb.cpp
r159027 r162368 39 39 #include <math.h> 40 40 #include <wtf/MathExtras.h> 41 #include <wtf/OwnPtr.h>42 #include <wtf/PassOwnPtr.h>43 41 44 42 namespace WebCore { … … 120 118 AudioChannel* channel = impulseResponseBuffer->channel(i); 121 119 122 OwnPtr<ReverbConvolver> convolver = adoptPtr(new ReverbConvolver(channel, renderSliceSize, maxFFTSize, convolverRenderPhase, useBackgroundThreads)); 123 m_convolvers.append(convolver.release()); 120 m_convolvers.append(std::make_unique<ReverbConvolver>(channel, renderSliceSize, maxFFTSize, convolverRenderPhase, useBackgroundThreads)); 124 121 125 122 convolverRenderPhase += renderSliceSize; -
trunk/Source/WebCore/platform/audio/Reverb.h
r157653 r162368 57 57 size_t m_impulseResponseLength; 58 58 59 Vector< OwnPtr<ReverbConvolver>> m_convolvers;59 Vector<std::unique_ptr<ReverbConvolver>> m_convolvers; 60 60 61 61 // For "True" stereo processing -
trunk/Source/WebCore/platform/audio/ReverbConvolver.h
r162291 r162368 37 37 #include "ReverbInputBuffer.h" 38 38 #include <condition_variable> 39 #include <memory> 39 40 #include <mutex> 40 41 #include <wtf/RefCounted.h> -
trunk/Source/WebCore/platform/audio/ReverbConvolverStage.cpp
r147684 r162368 38 38 #include "ReverbConvolver.h" 39 39 #include "ReverbInputBuffer.h" 40 #include <wtf/OwnPtr.h>41 #include <wtf/PassOwnPtr.h>42 40 43 41 namespace WebCore { … … 56 54 57 55 if (!m_directMode) { 58 m_fftKernel = adoptPtr(new FFTFrame(fftSize));56 m_fftKernel = std::make_unique<FFTFrame>(fftSize); 59 57 m_fftKernel->doPaddedFFT(impulseResponse + stageOffset, stageLength); 60 m_fftConvolver = adoptPtr(new FFTConvolver(fftSize));58 m_fftConvolver = std::make_unique<FFTConvolver>(fftSize); 61 59 } else { 62 m_directKernel = adoptPtr(new AudioFloatArray(fftSize / 2));60 m_directKernel = std::make_unique<AudioFloatArray>(fftSize / 2); 63 61 m_directKernel->copyToRange(impulseResponse + stageOffset, 0, fftSize / 2); 64 m_directConvolver = adoptPtr(new DirectConvolver(renderSliceSize));62 m_directConvolver = std::make_unique<DirectConvolver>(renderSliceSize); 65 63 } 66 64 m_temporaryBuffer.allocate(renderSliceSize); -
trunk/Source/WebCore/platform/audio/ReverbConvolverStage.h
r147684 r162368 31 31 32 32 #include "AudioArray.h" 33 #include < wtf/OwnPtr.h>33 #include <memory> 34 34 35 35 namespace WebCore { … … 61 61 62 62 private: 63 OwnPtr<FFTFrame> m_fftKernel;64 OwnPtr<FFTConvolver> m_fftConvolver;63 std::unique_ptr<FFTFrame> m_fftKernel; 64 std::unique_ptr<FFTConvolver> m_fftConvolver; 65 65 66 66 AudioFloatArray m_preDelayBuffer; … … 78 78 79 79 bool m_directMode; 80 OwnPtr<AudioFloatArray> m_directKernel;81 OwnPtr<DirectConvolver> m_directConvolver;80 std::unique_ptr<AudioFloatArray> m_directKernel; 81 std::unique_ptr<DirectConvolver> m_directConvolver; 82 82 }; 83 83 -
trunk/Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.cpp
r159730 r162368 43 43 } 44 44 45 PassOwnPtr<AudioDestination> AudioDestination::create(AudioIOCallback& callback, const String&, unsigned numberOfInputChannels, unsigned numberOfOutputChannels, float sampleRate)45 std::unique_ptr<AudioDestination> AudioDestination::create(AudioIOCallback& callback, const String&, unsigned numberOfInputChannels, unsigned numberOfOutputChannels, float sampleRate) 46 46 { 47 47 // FIXME: make use of inputDeviceId as appropriate. … … 55 55 LOG(Media, "AudioDestination::create(%u, %u, %f) - unhandled output channels", numberOfInputChannels, numberOfOutputChannels, sampleRate); 56 56 57 return adoptPtr(new AudioDestinationGStreamer(callback, sampleRate));57 return std::make_unique<AudioDestinationGStreamer>(callback, sampleRate); 58 58 } 59 59 -
trunk/Source/WebCore/platform/audio/gstreamer/AudioFileReaderGStreamer.cpp
r159730 r162368 31 31 #include <gst/pbutils/pbutils.h> 32 32 #include <wtf/Noncopyable.h> 33 #include <wtf/PassOwnPtr.h>34 33 #include <wtf/gobject/GOwnPtr.h> 35 34 #include <wtf/gobject/GRefPtr.h> -
trunk/Source/WebCore/platform/audio/ios/AudioDestinationIOS.cpp
r161589 r162368 68 68 69 69 // Factory method: iOS-implementation 70 PassOwnPtr<AudioDestination> AudioDestination::create(AudioIOCallback& callback, const String&, unsigned numberOfInputChannels, unsigned numberOfOutputChannels, float sampleRate)70 std::unique_ptr<AudioDestination> AudioDestination::create(AudioIOCallback& callback, const String&, unsigned numberOfInputChannels, unsigned numberOfOutputChannels, float sampleRate) 71 71 { 72 72 // FIXME: make use of inputDeviceId as appropriate. … … 80 80 LOG(Media, "AudioDestination::create(%u, %u, %f) - unhandled output channels", numberOfInputChannels, numberOfOutputChannels, sampleRate); 81 81 82 return adoptPtr(new AudioDestinationIOS(callback, sampleRate));82 return std::make_unique<AudioDestinationIOS>(callback, sampleRate); 83 83 } 84 84 -
trunk/Source/WebCore/platform/audio/ios/AudioSessionIOS.mm
r161642 r162368 32 32 #import <AVFoundation/AVAudioSession.h> 33 33 #import <objc/runtime.h> 34 #import <wtf/PassOwnPtr.h>35 34 #import <wtf/RetainPtr.h> 36 35 … … 120 119 121 120 AudioSession::AudioSession() 122 : m_private( adoptPtr(new AudioSessionPrivate(this)))121 : m_private(std::make_unique<AudioSessionPrivate>(this)) 123 122 { 124 123 } -
trunk/Source/WebCore/platform/audio/mac/AudioDestinationMac.cpp
r161589 r162368 50 50 51 51 // Factory method: Mac-implementation 52 PassOwnPtr<AudioDestination> AudioDestination::create(AudioIOCallback& callback, const String&, unsigned numberOfInputChannels, unsigned numberOfOutputChannels, float sampleRate)52 std::unique_ptr<AudioDestination> AudioDestination::create(AudioIOCallback& callback, const String&, unsigned numberOfInputChannels, unsigned numberOfOutputChannels, float sampleRate) 53 53 { 54 54 // FIXME: make use of inputDeviceId as appropriate. … … 62 62 LOG(Media, "AudioDestination::create(%u, %u, %f) - unhandled output channels", numberOfInputChannels, numberOfOutputChannels, sampleRate); 63 63 64 return adoptPtr(new AudioDestinationMac(callback, sampleRate));64 return std::make_unique<AudioDestinationMac>(callback, sampleRate); 65 65 } 66 66 -
trunk/Source/WebCore/platform/audio/mac/AudioDestinationMac.h
r162139 r162368 34 34 #include "MediaSession.h" 35 35 #include <AudioUnit/AudioUnit.h> 36 #include <wtf/OwnPtr.h>37 36 #include <wtf/RefPtr.h> 38 37 -
trunk/Source/WebCore/platform/audio/mac/AudioSessionMac.cpp
r161589 r162368 33 33 #include "NotImplemented.h" 34 34 #include <CoreAudio/AudioHardware.h> 35 #include <wtf/PassOwnPtr.h>36 35 37 36 namespace WebCore { … … 57 56 58 57 AudioSession::AudioSession() 59 : m_private( adoptPtr(new AudioSessionPrivate()))58 : m_private(std::make_unique<AudioSessionPrivate>()) 60 59 { 61 60 }
Note: See TracChangeset
for help on using the changeset viewer.