Changeset 269849 in webkit
- Timestamp:
- Nov 16, 2020 4:46:38 AM (3 years ago)
- Location:
- trunk/Source/WebCore
- Files:
-
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/WebCore/ChangeLog
r269848 r269849 1 2020-11-16 Philippe Normand <pnormand@igalia.com> 2 3 [GStreamer] Clean-up Audio{Data,StreamDescription} implementations 4 https://bugs.webkit.org/show_bug.cgi?id=218957 5 6 Reviewed by Xabier Rodriguez-Calvar. 7 8 Refactor the GStreamerAudioData and GStreamerStreamDescription implementations in order to 9 avoid un-necessary copies. The call-sites were adapted accordingly. Some usage of the latter 10 class was removed because it was simpler to use the GstAudioInfo API directly. 11 12 * Modules/webaudio/MediaStreamAudioSourceGStreamer.cpp: 13 (WebCore::MediaStreamAudioSource::consumeAudio): 14 * platform/audio/gstreamer/GStreamerAudioData.h: 15 * platform/audio/gstreamer/GStreamerAudioStreamDescription.h: 16 * platform/mediastream/gstreamer/GStreamerAudioCaptureSource.cpp: 17 (WebCore::GStreamerAudioCaptureSource::newSampleCallback): 18 * platform/mediastream/gstreamer/GStreamerMediaStreamSource.cpp: 19 (webkitMediaStreamSrcPushAudioSample): 20 * platform/mediastream/gstreamer/MockRealtimeAudioSourceGStreamer.cpp: 21 (WebCore::MockRealtimeAudioSourceGStreamer::render): 22 * platform/mediastream/gstreamer/RealtimeIncomingAudioSourceLibWebRTC.cpp: 23 (WebCore::RealtimeIncomingAudioSourceLibWebRTC::OnData): 24 * platform/mediastream/gstreamer/RealtimeOutgoingAudioSourceLibWebRTC.cpp: 25 (WebCore::libwebrtcAudioFormat): 26 (WebCore::RealtimeOutgoingAudioSourceLibWebRTC::audioSamplesAvailable): 27 (WebCore::RealtimeOutgoingAudioSourceLibWebRTC::pullAudioData): 28 * platform/mediastream/gstreamer/RealtimeOutgoingAudioSourceLibWebRTC.h: 29 1 30 2020-11-16 Andres Gonzalez <andresg_22@apple.com> 2 31 -
trunk/Source/WebCore/Modules/webaudio/MediaStreamAudioSourceGStreamer.cpp
r269827 r269849 53 53 } 54 54 55 auto mediaTime = MediaTime((m_numberOfFrames * G_USEC_PER_SEC) / m_currentSettings.sampleRate(), G_USEC_PER_SEC);55 MediaTime mediaTime((m_numberOfFrames * G_USEC_PER_SEC) / m_currentSettings.sampleRate(), G_USEC_PER_SEC); 56 56 m_numberOfFrames += numberOfFrames; 57 57 … … 65 65 copyBusData(bus, buffer.get(), muted()); 66 66 auto sample = adoptGRef(gst_sample_new(buffer.get(), caps.get(), nullptr, nullptr)); 67 m_audioBuffer = makeUnique<GStreamerAudioData>(WTFMove(sample), info); 68 69 GStreamerAudioStreamDescription description(info); 70 audioSamplesAvailable(mediaTime, *m_audioBuffer, description, numberOfFrames); 67 GStreamerAudioData audioBuffer(WTFMove(sample), info); 68 GStreamerAudioStreamDescription description(&info); 69 audioSamplesAvailable(mediaTime, audioBuffer, description, numberOfFrames); 71 70 } 72 71 -
trunk/Source/WebCore/platform/audio/gstreamer/GStreamerAudioData.h
r269827 r269849 33 33 class GStreamerAudioData final : public PlatformAudioData { 34 34 public: 35 GStreamerAudioData(GRefPtr<GstSample>&& sample, GstAudioInfo info) 35 GStreamerAudioData(GRefPtr<GstSample>&& sample, GstAudioInfo&& info) 36 : m_sample(WTFMove(sample)) 37 , m_audioInfo(WTFMove(info)) 38 { 39 } 40 41 GStreamerAudioData(GRefPtr<GstSample>&& sample, const GstAudioInfo& info) 36 42 : m_sample(WTFMove(sample)) 37 43 , m_audioInfo(info) … … 46 52 47 53 void setSample(GRefPtr<GstSample>&& sample) { m_sample = WTFMove(sample); } 48 GstSample* getSample() { return m_sample.get(); }49 GstAudioInfo getAudioInfo(){ return m_audioInfo; }54 const GRefPtr<GstSample>& getSample() const { return m_sample; } 55 const GstAudioInfo& getAudioInfo() const { return m_audioInfo; } 50 56 uint32_t channelCount() const { return GST_AUDIO_INFO_CHANNELS(&m_audioInfo); } 51 57 -
trunk/Source/WebCore/platform/audio/gstreamer/GStreamerAudioStreamDescription.h
r269827 r269849 31 31 class GStreamerAudioStreamDescription final: public AudioStreamDescription { 32 32 public: 33 GStreamerAudioStreamDescription(GstAudioInfo info) 33 GStreamerAudioStreamDescription(GstAudioInfo&& info) 34 : m_info(WTFMove(info)) 35 , m_caps(adoptGRef(gst_audio_info_to_caps(&m_info))) 36 { 37 } 38 39 GStreamerAudioStreamDescription(const GstAudioInfo& info) 34 40 : m_info(info) 35 41 , m_caps(adoptGRef(gst_audio_info_to_caps(&m_info))) … … 37 43 } 38 44 39 GStreamerAudioStreamDescription(GstAudioInfo *info)45 GStreamerAudioStreamDescription(GstAudioInfo* info) 40 46 : m_info(*info) 41 47 , m_caps(adoptGRef(gst_audio_info_to_caps(&m_info))) … … 94 100 bool operator!=(const GStreamerAudioStreamDescription& other) { return !operator == (other); } 95 101 96 GstCaps* caps() { return m_caps.get(); }97 GstAudioInfo* getInfo() { return &m_info; }102 const GRefPtr<GstCaps>& caps() const { return m_caps; } 103 const GstAudioInfo& getInfo() const { return m_info; } 98 104 99 105 private: -
trunk/Source/WebCore/platform/mediastream/gstreamer/GStreamerAudioCaptureSource.cpp
r267934 r269849 125 125 126 126 // FIXME - figure out a way to avoid copying (on write) the data. 127 GstBuffer* buf= gst_sample_get_buffer(sample.get());128 auto frames(std::unique_ptr<GStreamerAudioData>(new GStreamerAudioData(WTFMove(sample))));129 auto streamDesc(std::unique_ptr<GStreamerAudioStreamDescription>(new GStreamerAudioStreamDescription(frames->getAudioInfo())));127 auto* buffer = gst_sample_get_buffer(sample.get()); 128 GStreamerAudioData frames(WTFMove(sample)); 129 GStreamerAudioStreamDescription description(frames.getAudioInfo()); 130 130 131 131 source->audioSamplesAvailable( 132 MediaTime(GST_TIME_AS_USECONDS(GST_BUFFER_PTS(buf )), G_USEC_PER_SEC),133 *frames, *streamDesc, gst_buffer_get_size(buf) / frames->getAudioInfo().bpf);132 MediaTime(GST_TIME_AS_USECONDS(GST_BUFFER_PTS(buffer)), G_USEC_PER_SEC), 133 frames, description, gst_buffer_get_size(buffer) / description.getInfo().bpf); 134 134 135 135 return GST_FLOW_OK; -
trunk/Source/WebCore/platform/mediastream/gstreamer/GStreamerMediaStreamSource.cpp
r264648 r269849 41 41 42 42 static void webkitMediaStreamSrcPushVideoSample(WebKitMediaStreamSrc*, GstSample*); 43 static void webkitMediaStreamSrcPushAudioSample(WebKitMediaStreamSrc*, GstSample*);43 static void webkitMediaStreamSrcPushAudioSample(WebKitMediaStreamSrc*, const GRefPtr<GstSample>&); 44 44 static void webkitMediaStreamSrcTrackEnded(WebKitMediaStreamSrc*, MediaStreamTrackPrivate&); 45 45 static void webkitMediaStreamSrcRemoveTrackByType(WebKitMediaStreamSrc*, RealtimeMediaSource::Type); … … 592 592 } 593 593 594 static void webkitMediaStreamSrcPushAudioSample(WebKitMediaStreamSrc* self, GstSample*sample)594 static void webkitMediaStreamSrcPushAudioSample(WebKitMediaStreamSrc* self, const GRefPtr<GstSample>& sample) 595 595 { 596 596 if (self->priv->audioSrc) 597 self->priv->audioSrc->pushSample(sample );597 self->priv->audioSrc->pushSample(sample.get()); 598 598 } 599 599 -
trunk/Source/WebCore/platform/mediastream/gstreamer/MockRealtimeAudioSourceGStreamer.cpp
r267934 r269849 82 82 83 83 ASSERT(m_streamFormat); 84 GstAudioInfo*info = m_streamFormat->getInfo();84 const auto& info = m_streamFormat->getInfo(); 85 85 GRefPtr<GstBuffer> buffer = adoptGRef(gst_buffer_new_allocate(nullptr, bipBopCount * m_streamFormat->bytesPerFrame(), nullptr)); 86 86 { … … 88 88 89 89 if (muted()) 90 gst_audio_format_fill_silence(info ->finfo, map.data(), map.size());90 gst_audio_format_fill_silence(info.finfo, map.data(), map.size()); 91 91 else { 92 92 memcpy(map.data(), &m_bipBopBuffer[bipBopStart], sizeof(float) * bipBopCount); … … 99 99 frameCount = std::min(totalFrameCount, m_maximiumFrameCount); 100 100 101 GRefPtr<GstCaps> caps = adoptGRef(gst_audio_info_to_caps(info));101 auto caps = adoptGRef(gst_audio_info_to_caps(&info)); 102 102 auto sample = adoptGRef(gst_sample_new(buffer.get(), caps.get(), nullptr, nullptr)); 103 auto data(std::unique_ptr<GStreamerAudioData>(new GStreamerAudioData(WTFMove(sample), *info)));104 auto mediaTime = MediaTime((m_samplesRendered * G_USEC_PER_SEC) / sampleRate(), G_USEC_PER_SEC);105 audioSamplesAvailable(mediaTime, *data.get(), *m_streamFormat, bipBopCount);103 GStreamerAudioData data(WTFMove(sample), info); 104 MediaTime mediaTime((m_samplesRendered * G_USEC_PER_SEC) / sampleRate(), G_USEC_PER_SEC); 105 audioSamplesAvailable(mediaTime, data, *m_streamFormat, bipBopCount); 106 106 } 107 107 } -
trunk/Source/WebCore/platform/mediastream/gstreamer/RealtimeIncomingAudioSourceLibWebRTC.cpp
r243163 r269849 73 73 74 74 auto buffer = adoptGRef(gst_buffer_new_wrapped(bufferData, bufferSize)); 75 GRefPtr<GstCaps>caps = adoptGRef(gst_audio_info_to_caps(&info));75 auto caps = adoptGRef(gst_audio_info_to_caps(&info)); 76 76 auto sample = adoptGRef(gst_sample_new(buffer.get(), caps.get(), nullptr, nullptr)); 77 auto data(std::unique_ptr<GStreamerAudioData>(new GStreamerAudioData(WTFMove(sample), info))); 78 79 auto mediaTime = MediaTime((m_numberOfFrames * G_USEC_PER_SEC) / sampleRate, G_USEC_PER_SEC); 80 audioSamplesAvailable(mediaTime, *data.get(), GStreamerAudioStreamDescription(info), numberOfFrames); 77 GStreamerAudioData data(WTFMove(sample), info); 78 MediaTime mediaTime((m_numberOfFrames * G_USEC_PER_SEC) / sampleRate, G_USEC_PER_SEC); 79 audioSamplesAvailable(mediaTime, data, GStreamerAudioStreamDescription(info), numberOfFrames); 81 80 82 81 m_numberOfFrames += numberOfFrames; -
trunk/Source/WebCore/platform/mediastream/gstreamer/RealtimeOutgoingAudioSourceLibWebRTC.cpp
r269827 r269849 23 23 #include "RealtimeOutgoingAudioSourceLibWebRTC.h" 24 24 25 #include "GStreamerAudioData.h" 26 #include "GStreamerAudioStreamDescription.h" 25 27 #include "LibWebRTCAudioFormat.h" 26 28 #include "LibWebRTCProvider.h" 27 29 #include "NotImplemented.h" 28 #include "gstreamer/GStreamerAudioData.h"29 30 30 31 namespace WebCore { … … 47 48 } 48 49 49 static inline std::unique_ptr<GStreamerAudioStreamDescription> libwebrtcAudioFormat(int sampleRate, 50 size_t channelCount) 50 static inline GstAudioInfo libwebrtcAudioFormat(int sampleRate, size_t channelCount) 51 51 { 52 52 GstAudioFormat format = gst_audio_format_build_integer( … … 60 60 size_t libWebRTCChannelCount = channelCount >= 2 ? 2 : channelCount; 61 61 gst_audio_info_set_format(&info, format, sampleRate, libWebRTCChannelCount, nullptr); 62 63 return std::unique_ptr<GStreamerAudioStreamDescription>(new GStreamerAudioStreamDescription(info)); 62 return info; 64 63 } 65 64 66 void RealtimeOutgoingAudioSourceLibWebRTC::audioSamplesAvailable(const MediaTime&, 67 const PlatformAudioData& audioData, const AudioStreamDescription& streamDescription, 68 size_t /* sampleCount */) 65 void RealtimeOutgoingAudioSourceLibWebRTC::audioSamplesAvailable(const MediaTime&, const PlatformAudioData& audioData, const AudioStreamDescription& streamDescription, size_t /* sampleCount */) 69 66 { 70 67 auto data = static_cast<const GStreamerAudioData&>(audioData); 71 68 auto desc = static_cast<const GStreamerAudioStreamDescription&>(streamDescription); 72 69 73 if (m_sampleConverter && !gst_audio_info_is_equal( m_inputStreamDescription->getInfo(),desc.getInfo())) {70 if (m_sampleConverter && !gst_audio_info_is_equal(&m_inputStreamDescription, &desc.getInfo())) { 74 71 GST_ERROR_OBJECT(this, "FIXME - Audio format renegotiation is not possible yet!"); 75 72 m_sampleConverter = nullptr; … … 77 74 78 75 if (!m_sampleConverter) { 79 m_inputStreamDescription = std::unique_ptr<GStreamerAudioStreamDescription>(new GStreamerAudioStreamDescription(desc.getInfo())); 80 m_outputStreamDescription = libwebrtcAudioFormat(LibWebRTCAudioFormat::sampleRate, streamDescription.numberOfChannels()); 81 m_sampleConverter.reset(gst_audio_converter_new(GST_AUDIO_CONVERTER_FLAG_IN_WRITABLE, 82 m_inputStreamDescription->getInfo(), 83 m_outputStreamDescription->getInfo(), 84 nullptr)); 76 m_inputStreamDescription = desc.getInfo(); 77 m_outputStreamDescription = libwebrtcAudioFormat(LibWebRTCAudioFormat::sampleRate, desc.numberOfChannels()); 78 m_sampleConverter.reset(gst_audio_converter_new(GST_AUDIO_CONVERTER_FLAG_IN_WRITABLE, &m_inputStreamDescription, 79 &m_outputStreamDescription, nullptr)); 85 80 } 86 81 87 82 { 88 83 LockHolder locker(m_adapterMutex); 89 auto* buffer = gst_sample_get_buffer(data.getSample()); 84 const auto& sample = data.getSample(); 85 auto* buffer = gst_sample_get_buffer(sample.get()); 90 86 gst_adapter_push(m_adapter.get(), gst_buffer_ref(buffer)); 91 87 } … … 97 93 void RealtimeOutgoingAudioSourceLibWebRTC::pullAudioData() 98 94 { 99 if (! m_inputStreamDescription || !m_outputStreamDescription) {95 if (!GST_AUDIO_INFO_IS_VALID(&m_inputStreamDescription) || !GST_AUDIO_INFO_IS_VALID(&m_outputStreamDescription)) { 100 96 GST_INFO("No stream description set yet."); 101 102 97 return; 103 98 } 104 99 105 100 size_t outChunkSampleCount = LibWebRTCAudioFormat::chunkSampleCount; 106 size_t outBufferSize = outChunkSampleCount * m_outputStreamDescription ->getInfo()->bpf;101 size_t outBufferSize = outChunkSampleCount * m_outputStreamDescription.bpf; 107 102 108 103 LockHolder locker(m_adapterMutex); 109 104 size_t inChunkSampleCount = gst_audio_converter_get_in_frames(m_sampleConverter.get(), outChunkSampleCount); 110 size_t inBufferSize = inChunkSampleCount * m_inputStreamDescription ->getInfo()->bpf;105 size_t inBufferSize = inChunkSampleCount * m_inputStreamDescription.bpf; 111 106 112 107 while (gst_adapter_available(m_adapter.get()) > inBufferSize) { … … 114 109 m_audioBuffer.grow(outBufferSize); 115 110 if (isSilenced()) 116 gst_audio_format_fill_silence(m_outputStreamDescription ->getInfo()->finfo, m_audioBuffer.data(), outBufferSize);111 gst_audio_format_fill_silence(m_outputStreamDescription.finfo, m_audioBuffer.data(), outBufferSize); 117 112 else { 118 113 GstMappedBuffer inMap(inBuffer.get(), GST_MAP_READ); … … 127 122 } 128 123 129 sendAudioFrames(m_audioBuffer.data(), LibWebRTCAudioFormat::sampleSize, static_cast<int>(m_outputStreamDescription->sampleRate()),130 static_cast<int>(m_outputStreamDescription->numberOfChannels()), outChunkSampleCount);124 sendAudioFrames(m_audioBuffer.data(), LibWebRTCAudioFormat::sampleSize, GST_AUDIO_INFO_RATE(&m_outputStreamDescription), 125 GST_AUDIO_INFO_CHANNELS(&m_outputStreamDescription), outChunkSampleCount); 131 126 } 132 127 } -
trunk/Source/WebCore/platform/mediastream/gstreamer/RealtimeOutgoingAudioSourceLibWebRTC.h
r248278 r269849 22 22 #if USE(LIBWEBRTC) 23 23 24 #include "GStreamerAudioStreamDescription.h"25 24 #include "GStreamerCommon.h" 26 25 #include "RealtimeOutgoingAudioSource.h" … … 50 49 51 50 GUniquePtr<GstAudioConverter> m_sampleConverter; 52 std::unique_ptr<GStreamerAudioStreamDescription>m_inputStreamDescription;53 std::unique_ptr<GStreamerAudioStreamDescription>m_outputStreamDescription;51 GstAudioInfo m_inputStreamDescription; 52 GstAudioInfo m_outputStreamDescription; 54 53 55 54 Lock m_adapterMutex;
Note: See TracChangeset
for help on using the changeset viewer.