Changeset 176943 in webkit


Ignore:
Timestamp:
Dec 8, 2014 1:34:25 AM (9 years ago)
Author:
commit-queue@webkit.org
Message:

[GStreamer] Major cleanup of AudioDestination implementation
https://bugs.webkit.org/show_bug.cgi?id=139370

Patch by Sebastian Dröge <sebastian@centricular.com> on 2014-12-08
Reviewed by Philippe Normand.

  • platform/audio/gstreamer/AudioDestinationGStreamer.cpp:

(WebCore::AudioDestinationGStreamer::AudioDestinationGStreamer):
Add an audioresample element before the audio sink. The audio sink
might not be able to handle our sampling rate.

(WebCore::AudioDestinationGStreamer::AudioDestinationGStreamer):
(WebCore::AudioDestinationGStreamer::~AudioDestinationGStreamer):
(WebCore::AudioDestinationGStreamer::stop):
(WebCore::AudioDestinationGStreamer::finishBuildingPipelineAfterWavParserPadReady): Deleted.
Don't use a wavparse element but directly link the raw audio from
the source to the audio sink.

(WebCore::AudioDestinationGStreamer::start):
Catch errors when going to PLAYING early, we might not get an error
message.

  • platform/audio/gstreamer/AudioDestinationGStreamer.h:
  • platform/audio/gstreamer/WebKitWebAudioSourceGStreamer.cpp:

(getGStreamerMonoAudioCaps):
(webKitWebAudioSrcConstructed):
(webKitWebAudioSrcChangeState):
Don't use a WAV encoder but directly output raw audio. Also don't
include a unneeded audioconvert element before the interleave.

(webKitWebAudioSrcLoop):
Add timestamps and durations to the output buffers, map them in
READWRITE mode and actually keep them mapped until we're sure
nothing is actually writing into them.

(webKitWebAudioSrcLoop):
Pause the task on errors instead of continuously calling it again
immediately.

Location:
trunk/Source/WebCore
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/Source/WebCore/ChangeLog

    r176942 r176943  
     12014-12-08  Sebastian Dröge  <sebastian@centricular.com>
     2
     3        [GStreamer] Major cleanup of AudioDestination implementation
     4        https://bugs.webkit.org/show_bug.cgi?id=139370
     5
     6        Reviewed by Philippe Normand.
     7
     8        * platform/audio/gstreamer/AudioDestinationGStreamer.cpp:
     9        (WebCore::AudioDestinationGStreamer::AudioDestinationGStreamer):
     10        Add an audioresample element before the audio sink. The audio sink
     11        might not be able to handle our sampling rate.
     12
     13        (WebCore::AudioDestinationGStreamer::AudioDestinationGStreamer):
     14        (WebCore::AudioDestinationGStreamer::~AudioDestinationGStreamer):
     15        (WebCore::AudioDestinationGStreamer::stop):
     16        (WebCore::AudioDestinationGStreamer::finishBuildingPipelineAfterWavParserPadReady): Deleted.
     17        Don't use a wavparse element but directly link the raw audio from
     18        the source to the audio sink.
     19
     20        (WebCore::AudioDestinationGStreamer::start):
     21        Catch errors when going to PLAYING early, we might not get an error
     22        message.
     23
     24        * platform/audio/gstreamer/AudioDestinationGStreamer.h:
     25        * platform/audio/gstreamer/WebKitWebAudioSourceGStreamer.cpp:
     26        (getGStreamerMonoAudioCaps):
     27        (webKitWebAudioSrcConstructed):
     28        (webKitWebAudioSrcChangeState):
     29        Don't use a WAV encoder but directly output raw audio. Also don't
     30        include a unneeded audioconvert element before the interleave.
     31
     32        (webKitWebAudioSrcLoop):
     33        Add timestamps and durations to the output buffers, map them in
     34        READWRITE mode and actually keep them mapped until we're sure
     35        nothing is actually writing into them.
     36
     37        (webKitWebAudioSrcLoop):
     38        Pause the task on errors instead of continuously calling it again
     39        immediately.
     40
    1412014-12-08  Sebastian Dröge  <sebastian@centricular.com>
    242
  • trunk/Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.cpp

    r163797 r176943  
    11/*
    22 *  Copyright (C) 2011, 2012 Igalia S.L
     3 *  Copyright (C) 2014 Sebastian Dröge <sebastian@centricular.com>
    34 *
    45 *  This library is free software; you can redistribute it and/or
     
    8889                                                                            "frames", framesToPull, NULL));
    8990
    90     GstElement* wavParser = gst_element_factory_make("wavparse", 0);
    91 
    92     m_wavParserAvailable = wavParser;
    93     ASSERT_WITH_MESSAGE(m_wavParserAvailable, "Failed to create GStreamer wavparse element");
    94     if (!m_wavParserAvailable)
    95         return;
    96 
    97     gst_bin_add_many(GST_BIN(m_pipeline), webkitAudioSrc, wavParser, NULL);
    98     gst_element_link_pads_full(webkitAudioSrc, "src", wavParser, "sink", GST_PAD_LINK_CHECK_NOTHING);
    99 
    100     GRefPtr<GstPad> srcPad = adoptGRef(gst_element_get_static_pad(wavParser, "src"));
    101     finishBuildingPipelineAfterWavParserPadReady(srcPad.get());
    102 }
    103 
    104 AudioDestinationGStreamer::~AudioDestinationGStreamer()
    105 {
    106     GRefPtr<GstBus> bus = adoptGRef(gst_pipeline_get_bus(GST_PIPELINE(m_pipeline)));
    107     ASSERT(bus);
    108     g_signal_handlers_disconnect_by_func(bus.get(), reinterpret_cast<gpointer>(messageCallback), this);
    109     gst_bus_remove_signal_watch(bus.get());
    110 
    111     gst_element_set_state(m_pipeline, GST_STATE_NULL);
    112     gst_object_unref(m_pipeline);
    113 }
    114 
    115 void AudioDestinationGStreamer::finishBuildingPipelineAfterWavParserPadReady(GstPad* pad)
    116 {
    117     ASSERT(m_wavParserAvailable);
     91    GRefPtr<GstPad> srcPad = adoptGRef(gst_element_get_static_pad(webkitAudioSrc, "src"));
    11892
    11993    GRefPtr<GstElement> audioSink = gst_element_factory_make("autoaudiosink", 0);
    12094    m_audioSinkAvailable = audioSink;
    121 
    12295    if (!audioSink) {
    12396        LOG_ERROR("Failed to create GStreamer autoaudiosink element");
     
    137110
    138111    GstElement* audioConvert = gst_element_factory_make("audioconvert", 0);
    139     gst_bin_add_many(GST_BIN(m_pipeline), audioConvert, audioSink.get(), NULL);
     112    GstElement* audioResample = gst_element_factory_make("audioresample", 0);
     113    gst_bin_add_many(GST_BIN(m_pipeline), webkitAudioSrc, audioConvert, audioResample, audioSink.get(), NULL);
    140114
    141115    // Link wavparse's src pad to audioconvert sink pad.
    142116    GRefPtr<GstPad> sinkPad = adoptGRef(gst_element_get_static_pad(audioConvert, "sink"));
    143     gst_pad_link_full(pad, sinkPad.get(), GST_PAD_LINK_CHECK_NOTHING);
     117    gst_pad_link_full(srcPad.get(), sinkPad.get(), GST_PAD_LINK_CHECK_NOTHING);
    144118
    145119    // Link audioconvert to audiosink and roll states.
    146     gst_element_link_pads_full(audioConvert, "src", audioSink.get(), "sink", GST_PAD_LINK_CHECK_NOTHING);
    147     gst_element_sync_state_with_parent(audioConvert);
    148     gst_element_sync_state_with_parent(audioSink.leakRef());
     120    gst_element_link_pads_full(audioConvert, "src", audioResample, "sink", GST_PAD_LINK_CHECK_NOTHING);
     121    gst_element_link_pads_full(audioResample, "src", audioSink.get(), "sink", GST_PAD_LINK_CHECK_NOTHING);
     122}
     123
     124AudioDestinationGStreamer::~AudioDestinationGStreamer()
     125{
     126    GRefPtr<GstBus> bus = adoptGRef(gst_pipeline_get_bus(GST_PIPELINE(m_pipeline)));
     127    ASSERT(bus);
     128    g_signal_handlers_disconnect_by_func(bus.get(), reinterpret_cast<gpointer>(messageCallback), this);
     129    gst_bus_remove_signal_watch(bus.get());
     130
     131    gst_element_set_state(m_pipeline, GST_STATE_NULL);
     132    gst_object_unref(m_pipeline);
    149133}
    150134
     
    173157void AudioDestinationGStreamer::start()
    174158{
    175     ASSERT(m_wavParserAvailable);
    176     if (!m_wavParserAvailable)
     159    ASSERT(m_audioSinkAvailable);
     160    if (!m_audioSinkAvailable)
    177161        return;
    178162
    179     gst_element_set_state(m_pipeline, GST_STATE_PLAYING);
     163    if (gst_element_set_state(m_pipeline, GST_STATE_PLAYING) == GST_STATE_CHANGE_FAILURE) {
     164        g_warning("Error: Failed to set pipeline to playing");
     165        m_isPlaying = false;
     166        return;
     167    }
     168
    180169    m_isPlaying = true;
    181170}
     
    183172void AudioDestinationGStreamer::stop()
    184173{
    185     ASSERT(m_wavParserAvailable && m_audioSinkAvailable);
    186     if (!m_wavParserAvailable || !m_audioSinkAvailable)
     174    ASSERT(m_audioSinkAvailable);
     175    if (!m_audioSinkAvailable)
    187176        return;
    188177
  • trunk/Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.h

    r149817 r176943  
    4242    AudioIOCallback& callback() const { return m_callback; }
    4343
    44     void finishBuildingPipelineAfterWavParserPadReady(GstPad*);
    4544    gboolean handleMessage(GstMessage*);
    4645
     
    5150    float m_sampleRate;
    5251    bool m_isPlaying;
    53     bool m_wavParserAvailable;
    5452    bool m_audioSinkAvailable;
    5553    GstElement* m_pipeline;
  • trunk/Source/WebCore/platform/audio/gstreamer/WebKitWebAudioSourceGStreamer.cpp

    r172338 r176943  
    11/*
    22 *  Copyright (C) 2011, 2012 Igalia S.L
     3 *  Copyright (C) 2014 Sebastian Dröge <sebastian@centricular.com>
    34 *
    45 *  This library is free software; you can redistribute it and/or
     
    5455
    5556    GRefPtr<GstElement> interleave;
    56     GRefPtr<GstElement> wavEncoder;
    5757
    5858    GRefPtr<GstTask> task;
     
    6464    bool newStreamEventPending;
    6565    GstSegment segment;
     66    guint64 numberOfSamples;
    6667};
    6768
     
    7374};
    7475
     76typedef struct {
     77    GstBuffer* buffer;
     78    GstMapInfo info;
     79} AudioSrcBuffer;
     80
    7581static GstStaticPadTemplate srcTemplate = GST_STATIC_PAD_TEMPLATE("src",
    76                                                                   GST_PAD_SRC,
    77                                                                   GST_PAD_ALWAYS,
    78                                                                   GST_STATIC_CAPS("audio/x-wav"));
     82    GST_PAD_SRC,
     83    GST_PAD_ALWAYS,
     84    GST_STATIC_CAPS(GST_AUDIO_CAPS_MAKE(GST_AUDIO_NE(F32))));
    7985
    8086GST_DEBUG_CATEGORY_STATIC(webkit_web_audio_src_debug);
     
    9298    return gst_caps_new_simple("audio/x-raw", "rate", G_TYPE_INT, static_cast<int>(sampleRate),
    9399        "channels", G_TYPE_INT, 1,
    94         "format", G_TYPE_STRING, gst_audio_format_to_string(GST_AUDIO_FORMAT_F32),
     100        "format", G_TYPE_STRING, GST_AUDIO_NE(F32),
    95101        "layout", G_TYPE_STRING, "interleaved", nullptr);
    96102}
     
    204210
    205211    priv->interleave = gst_element_factory_make("interleave", 0);
    206     priv->wavEncoder = gst_element_factory_make("wavenc", 0);
    207212
    208213    if (!priv->interleave) {
     
    211216    }
    212217
    213     if (!priv->wavEncoder) {
    214         GST_ERROR_OBJECT(src, "Failed to create wavenc");
    215         return;
    216     }
    217 
    218     gst_bin_add_many(GST_BIN(src), priv->interleave.get(), priv->wavEncoder.get(), NULL);
    219     gst_element_link_pads_full(priv->interleave.get(), "src", priv->wavEncoder.get(), "sink", GST_PAD_LINK_CHECK_NOTHING);
     218    gst_bin_add(GST_BIN(src), priv->interleave.get());
    220219
    221220    // For each channel of the bus create a new upstream branch for interleave, like:
    222     // queue ! capsfilter ! audioconvert. which is plugged to a new interleave request sinkpad.
     221    // queue ! capsfilter. which is plugged to a new interleave request sinkpad.
    223222    for (unsigned channelIndex = 0; channelIndex < priv->bus->numberOfChannels(); channelIndex++) {
    224223        GUniquePtr<gchar> queueName(g_strdup_printf("webaudioQueue%u", channelIndex));
    225224        GstElement* queue = gst_element_factory_make("queue", queueName.get());
    226225        GstElement* capsfilter = gst_element_factory_make("capsfilter", 0);
    227         GstElement* audioconvert = gst_element_factory_make("audioconvert", 0);
    228226
    229227        GRefPtr<GstCaps> monoCaps = adoptGRef(getGStreamerMonoAudioCaps(priv->sampleRate));
     
    241239        priv->pads = g_slist_prepend(priv->pads, pad);
    242240
    243         gst_bin_add_many(GST_BIN(src), queue, capsfilter, audioconvert, NULL);
     241        gst_bin_add_many(GST_BIN(src), queue, capsfilter, NULL);
    244242        gst_element_link_pads_full(queue, "src", capsfilter, "sink", GST_PAD_LINK_CHECK_NOTHING);
    245         gst_element_link_pads_full(capsfilter, "src", audioconvert, "sink", GST_PAD_LINK_CHECK_NOTHING);
    246         gst_element_link_pads_full(audioconvert, "src", priv->interleave.get(), 0, GST_PAD_LINK_CHECK_NOTHING);
     243        gst_element_link_pads_full(capsfilter, "src", priv->interleave.get(), "sink_%u", GST_PAD_LINK_CHECK_NOTHING);
    247244
    248245    }
     
    250247
    251248    // wavenc's src pad is the only visible pad of our element.
    252     GRefPtr<GstPad> targetPad = adoptGRef(gst_element_get_static_pad(priv->wavEncoder.get(), "src"));
     249    GRefPtr<GstPad> targetPad = adoptGRef(gst_element_get_static_pad(priv->interleave.get(), "src"));
    253250    gst_ghost_pad_set_target(GST_GHOST_PAD(priv->sourcePad), targetPad.get());
    254251}
     
    321318    ASSERT(priv->bus);
    322319    ASSERT(priv->provider);
    323     if (!priv->provider || !priv->bus)
     320    if (!priv->provider || !priv->bus) {
     321        gst_task_pause(src->priv->task.get());
    324322        return;
     323    }
     324
     325    GstClockTime timestamp = gst_util_uint64_scale(priv->numberOfSamples, GST_SECOND, priv->sampleRate);
     326    priv->numberOfSamples += priv->framesToPull;
     327    GstClockTime duration = gst_util_uint64_scale(priv->numberOfSamples, GST_SECOND, priv->sampleRate) - timestamp;
    325328
    326329    GSList* channelBufferList = 0;
     
    328331    unsigned bufferSize = priv->framesToPull * sizeof(float);
    329332    for (i = g_slist_length(priv->pads) - 1; i >= 0; i--) {
     333        AudioSrcBuffer* buffer = g_new(AudioSrcBuffer, 1);
    330334        GstBuffer* channelBuffer = gst_buffer_new_and_alloc(bufferSize);
    331335        ASSERT(channelBuffer);
    332         channelBufferList = g_slist_prepend(channelBufferList, channelBuffer);
    333         GstMapInfo info;
    334         gst_buffer_map(channelBuffer, &info, GST_MAP_READ);
    335         priv->bus->setChannelMemory(i, reinterpret_cast<float*>(info.data), priv->framesToPull);
    336         gst_buffer_unmap(channelBuffer, &info);
     336        buffer->buffer = channelBuffer;
     337        GST_BUFFER_TIMESTAMP(channelBuffer) = timestamp;
     338        GST_BUFFER_DURATION(channelBuffer) = duration;
     339        gst_buffer_map(channelBuffer, &buffer->info, (GstMapFlags) GST_MAP_READWRITE);
     340        priv->bus->setChannelMemory(i, reinterpret_cast<float*>(buffer->info.data), priv->framesToPull);
     341        channelBufferList = g_slist_prepend(channelBufferList, buffer);
    337342    }
    338343
     
    351356    for (i = 0; padsIt && buffersIt; padsIt = g_slist_next(padsIt), buffersIt = g_slist_next(buffersIt), ++i) {
    352357        GstPad* pad = static_cast<GstPad*>(padsIt->data);
    353         GstBuffer* channelBuffer = static_cast<GstBuffer*>(buffersIt->data);
     358        AudioSrcBuffer* buffer = static_cast<AudioSrcBuffer*>(buffersIt->data);
     359        GstBuffer* channelBuffer = buffer->buffer;
     360
     361        // Unmap before passing on the buffer.
     362        gst_buffer_unmap(channelBuffer, &buffer->info);
     363        g_free(buffer);
    354364
    355365        // Send stream-start, segment and caps events downstream, along with the first buffer.
     
    376386
    377387        GstFlowReturn ret = gst_pad_chain(pad, channelBuffer);
    378         if (ret != GST_FLOW_OK)
     388        if (ret != GST_FLOW_OK) {
    379389            GST_ELEMENT_ERROR(src, CORE, PAD, ("Internal WebAudioSrc error"), ("Failed to push buffer on %s:%s flow: %s", GST_DEBUG_PAD_NAME(pad), gst_flow_get_name(ret)));
     390            gst_task_pause(src->priv->task.get());
     391        }
    380392    }
    381393
     
    397409            return GST_STATE_CHANGE_FAILURE;
    398410        }
    399         if (!src->priv->wavEncoder) {
    400             gst_element_post_message(element, gst_missing_element_message_new(element, "wavenc"));
    401             GST_ELEMENT_ERROR(src, CORE, MISSING_PLUGIN, (0), ("no wavenc"));
    402             return GST_STATE_CHANGE_FAILURE;
    403         }
     411        src->priv->numberOfSamples = 0;
    404412        break;
    405413    default:
Note: See TracChangeset for help on using the changeset viewer.