root/OpenSceneGraph/trunk/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp @ 10851

Revision 10851, 10.0 kB (checked in by robert, 5 years ago)

From Julen Garcia,"I've been lately working also with the ffmpeg plugin and I implemented pause(), seek() and getReferenceTime(). I think that I have solved the internal clock issues (maybe not in the most elegant way :?"

RevLine 
[9816]1#include "FFmpegDecoderVideo.hpp"
2
3#include <osg/Notify>
[9860]4#include <osg/Timer>
[9816]5
6#include <stdexcept>
7#include <string.h>
8
9namespace osgFFmpeg {
10
11FFmpegDecoderVideo::FFmpegDecoderVideo(PacketQueue & packets, FFmpegClocks & clocks) :
12    m_packets(packets),
13    m_clocks(clocks),
14    m_stream(0),
15    m_context(0),
16    m_codec(0),
17    m_packet_data(0),
18    m_bytes_remaining(0),
19    m_packet_pts(AV_NOPTS_VALUE),
[9860]20    m_writeBuffer(0),
[9816]21    m_user_data(0),
22    m_publish_func(0),
[10851]23    m_paused(true),
[9857]24    m_exit(false)
25#ifdef USE_SWSCALE
26    ,m_swscale_ctx(0)
27#endif
[9816]28{
29
30}
31
32
33
34FFmpegDecoderVideo::~FFmpegDecoderVideo()
35{
[9912]36    osg::notify(osg::INFO)<<"Destructing FFmpegDecoderVideo..."<<std::endl;
[9861]37
[9869]38
[9816]39    if (isRunning())
40    {
41        m_exit = true;
[9869]42#if 0       
43        while(isRunning()) { OpenThreads::YieldCurrentThread(); }
44#else       
[9816]45        join();
[9869]46#endif
[9816]47    }
[9854]48   
[9857]49#ifdef USE_SWSCALE
[9854]50    if (m_swscale_ctx)
51    {
52        sws_freeContext(m_swscale_ctx);
53        m_swscale_ctx = 0;
54    }
[9857]55#endif
[9861]56
[9912]57    osg::notify(osg::INFO)<<"Destructed FFmpegDecoderVideo"<<std::endl;
[9816]58}
59
60
61
62void FFmpegDecoderVideo::open(AVStream * const stream)
63{
64    m_stream = stream;
65    m_context = stream->codec;
66
67    // Trust the video size given at this point
68    // (avcodec_open seems to sometimes return a 0x0 size)
69    m_width = m_context->width;
70    m_height = m_context->height;
71    findAspectRatio();
72
73    // Find out whether we support Alpha channel
74    m_alpha_channel = (m_context->pix_fmt == PIX_FMT_YUVA420P);
75
76    // Find out the framerate
77    m_frame_rate = av_q2d(stream->r_frame_rate);
78
79    // Find the decoder for the video stream
80    m_codec = avcodec_find_decoder(m_context->codec_id);
81
82    if (m_codec == 0)
83        throw std::runtime_error("avcodec_find_decoder() failed");
84
85    // Inform the codec that we can handle truncated bitstreams
86    //if (p_codec->capabilities & CODEC_CAP_TRUNCATED)
87    //    m_context->flags |= CODEC_FLAG_TRUNCATED;
88
89    // Open codec
90    if (avcodec_open(m_context, m_codec) < 0)
91        throw std::runtime_error("avcodec_open() failed");
92
93    // Allocate video frame
[9826]94    m_frame.reset(avcodec_alloc_frame());
[9816]95
96    // Allocate converted RGB frame
[9826]97    m_frame_rgba.reset(avcodec_alloc_frame());
[9860]98    m_buffer_rgba[0].resize(avpicture_get_size(PIX_FMT_RGB32, width(), height()));
99    m_buffer_rgba[1].resize(m_buffer_rgba[0].size());
[9816]100
101    // Assign appropriate parts of the buffer to image planes in m_frame_rgba
[9860]102    avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[0])[0], PIX_FMT_RGB32, width(), height());
[9816]103
104    // Override get_buffer()/release_buffer() from codec context in order to retrieve the PTS of each frame.
105    m_context->opaque = this;
106    m_context->get_buffer = getBuffer;
107    m_context->release_buffer = releaseBuffer;
108}
109
110
[9869]111void FFmpegDecoderVideo::close(bool waitForThreadToExit)
112{
113    m_exit = true;
114   
115    if (isRunning() && waitForThreadToExit)
116    {
117        while(isRunning()) { OpenThreads::Thread::YieldCurrentThread(); }
118    }
119}
[9816]120
[10851]121void FFmpegDecoderVideo::pause(bool pause)
122{
123    if(pause)
124        m_paused = true;
125    else
126        m_paused = false;
127}
[9869]128
[9816]129void FFmpegDecoderVideo::run()
130{
131    try
132    {
133        decodeLoop();
134    }
135
136    catch (const std::exception & error)
137    {
138        osg::notify(osg::WARN) << "FFmpegDecoderVideo::run : " << error.what() << std::endl;
139    }
140
141    catch (...)
142    {
143        osg::notify(osg::WARN) << "FFmpegDecoderVideo::run : unhandled exception" << std::endl;
144    }
145}
146
147
148
149void FFmpegDecoderVideo::decodeLoop()
150{
151    FFmpegPacket packet;
152    double pts;
153
154    while (! m_exit)
155    {
156        // Work on the current packet until we have decoded all of it
157
158        while (m_bytes_remaining > 0)
159        {
160            // Save global PTS to be stored in m_frame via getBuffer()
161
162            m_packet_pts = packet.packet.pts;
163
164            // Decode video frame
165
166            int frame_finished = 0;
167
168            const int bytes_decoded = avcodec_decode_video(m_context, m_frame.get(), &frame_finished, m_packet_data, m_bytes_remaining);
169
170            if (bytes_decoded < 0)
171                throw std::runtime_error("avcodec_decode_video failed()");
172
173            m_bytes_remaining -= bytes_decoded;
174            m_packet_data += bytes_decoded;
175
176            // Find out the frame pts
177
[10414]178            if (packet.packet.dts == int64_t(AV_NOPTS_VALUE) &&
[10088]179                m_frame->opaque != 0 &&
[10414]180                *reinterpret_cast<const int64_t*>(m_frame->opaque) != int64_t(AV_NOPTS_VALUE))
[9816]181            {
182                pts = *reinterpret_cast<const int64_t*>(m_frame->opaque);
183            }
[10414]184            else if (packet.packet.dts != int64_t(AV_NOPTS_VALUE))
[9816]185            {
186                pts = packet.packet.dts;
187            }
188            else
189            {
190                pts = 0;
191            }
192
193            pts *= av_q2d(m_stream->time_base);
194
195            // Publish the frame if we have decoded a complete frame
196            if (frame_finished)
197            {
198                const double synched_pts = m_clocks.videoSynchClock(m_frame.get(), av_q2d(m_stream->time_base), pts);
199                const double frame_delay = m_clocks.videoRefreshSchedule(synched_pts);
200
[10161]201                publishFrame(frame_delay, m_clocks.audioDisabled());
[9816]202            }
203        }
204
[10851]205        while(m_paused && !m_exit)
206        {
207            microSleep(10000);
208        }
209
[9816]210        // Get the next packet
211
212        pts = 0;
213
214        if (packet.valid())
215            packet.clear();
216
217        bool is_empty = true;
218        packet = m_packets.timedPop(is_empty, 10);
219
220        if (! is_empty)
221        {
222            if (packet.type == FFmpegPacket::PACKET_DATA)
223            {
224                m_bytes_remaining = packet.packet.size;
225                m_packet_data = packet.packet.data;
226            }
227            else if (packet.type == FFmpegPacket::PACKET_FLUSH)
228            {
229                avcodec_flush_buffers(m_context);
230                m_clocks.rewindVideo();
231            }
232        }
233    }
234}
235
236
237
238void FFmpegDecoderVideo::findAspectRatio()
239{
[9910]240    float ratio = 0.0f;
[9816]241
242    if (m_context->sample_aspect_ratio.num != 0)
[9910]243        ratio = float(av_q2d(m_context->sample_aspect_ratio));
[9816]244
[9910]245    if (ratio <= 0.0f)
246        ratio = 1.0f;
[9816]247
[9910]248    m_pixel_aspect_ratio = ratio;
[9816]249}
250
[9960]251int FFmpegDecoderVideo::convert(AVPicture *dst, int dst_pix_fmt, AVPicture *src,
[9854]252            int src_pix_fmt, int src_width, int src_height)
253{
[9860]254    osg::Timer_t startTick = osg::Timer::instance()->tick();
[9854]255#ifdef USE_SWSCALE
256    if (m_swscale_ctx==0)
257    {
[9965]258        m_swscale_ctx = sws_getContext(src_width, src_height, (PixelFormat) src_pix_fmt,
259                                      src_width, src_height, (PixelFormat) dst_pix_fmt,
[9860]260                                      /*SWS_BILINEAR*/ SWS_BICUBIC, NULL, NULL, NULL);
[9854]261    }
262   
[9857]263
[9912]264    osg::notify(osg::INFO)<<"Using sws_scale ";
[9960]265   
[9860]266    int result =  sws_scale(m_swscale_ctx,
[9960]267                            (src->data), (src->linesize), 0, src_height,
[9933]268                            (dst->data), (dst->linesize));
[9854]269#else
[9857]270
[9912]271    osg::notify(osg::INFO)<<"Using img_convert ";
[9860]272
273    int result = img_convert(dst, dst_pix_fmt, src,
274                             src_pix_fmt, src_width, src_height);
275
[9854]276#endif
[9860]277    osg::Timer_t endTick = osg::Timer::instance()->tick();
[9912]278    osg::notify(osg::INFO)<<" time = "<<osg::Timer::instance()->delta_m(startTick,endTick)<<"ms"<<std::endl;
[9860]279
280    return result;
[9854]281}
[9816]282
283
[10161]284void FFmpegDecoderVideo::publishFrame(const double delay, bool audio_disabled)
[9816]285{
286    // If no publishing function, just ignore the frame
287    if (m_publish_func == 0)
288        return;
289
[10161]290#if 1
291    // new code from Jean-Sebasiten Guay - needs testing as we're unclear on the best solution
[9816]292    // If the display delay is too small, we better skip the frame.
[10161]293    if (!audio_disabled && delay < -0.010)
294        return;
295#else
296    // original solution that hung on video stream over web.
297    // If the display delay is too small, we better skip the frame.
[9816]298    if (delay < -0.010)
299        return;
[10161]300#endif
301
[9960]302    AVPicture * const src = (AVPicture *) m_frame.get();
[9816]303    AVPicture * const dst = (AVPicture *) m_frame_rgba.get();
304
[9860]305    // Assign appropriate parts of the buffer to image planes in m_frame_rgba
306    avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[m_writeBuffer])[0], PIX_FMT_RGB32, width(), height());
307
[9816]308    // Convert YUVA420p (i.e. YUV420p plus alpha channel) using our own routine
309
310    if (m_context->pix_fmt == PIX_FMT_YUVA420P)
311        yuva420pToRgba(dst, src, width(), height());
312    else
[9854]313        convert(dst, PIX_FMT_RGB32, src, m_context->pix_fmt, width(), height());
[9816]314
315    // Wait 'delay' seconds before publishing the picture.
316    int i_delay = static_cast<int>(delay * 1000000 + 0.5);
317
318    while (i_delay > 1000)
319    {
320        // Avoid infinite/very long loops
321        if (m_exit)
322            return;
323
324        const int micro_delay = (std::min)(1000000, i_delay);
325
326        OpenThreads::Thread::microSleep(micro_delay);
327
328        i_delay -= micro_delay;
329    }
330
[9860]331    m_writeBuffer = 1-m_writeBuffer;
332
[9816]333    m_publish_func(* this, m_user_data);
334}
335
336
337
[9960]338void FFmpegDecoderVideo::yuva420pToRgba(AVPicture * const dst, AVPicture * const src, int width, int height)
[9816]339{
[9854]340    convert(dst, PIX_FMT_RGB32, src, m_context->pix_fmt, width, height);
[9816]341
342    const size_t bpp = 4;
343
344    uint8_t * a_dst = dst->data[0] + 3;
345
346    for (int h = 0; h < height; ++h) {
347
348        const uint8_t * a_src = src->data[3] + h * src->linesize[3];
349
350        for (int w = 0; w < width; ++w) {
351            *a_dst = *a_src;
352            a_dst += bpp;
353            a_src += 1;
354        }
355    }
356}
357
358
359
360int FFmpegDecoderVideo::getBuffer(AVCodecContext * const context, AVFrame * const picture)
361{
362    const FFmpegDecoderVideo * const this_ = reinterpret_cast<const FFmpegDecoderVideo*>(context->opaque);
363
364    const int result = avcodec_default_get_buffer(context, picture);
365    int64_t * p_pts = reinterpret_cast<int64_t*>( av_malloc(sizeof(int64_t)) );
366
367    *p_pts = this_->m_packet_pts;
368    picture->opaque = p_pts;
369
370    return result;
371}
372
373
374
375void FFmpegDecoderVideo::releaseBuffer(AVCodecContext * const context, AVFrame * const picture)
376{
377    if (picture != 0)
378        av_freep(&picture->opaque);
379
380    avcodec_default_release_buffer(context, picture);
381}
382
383
384
385} // namespace osgFFmpeg
Note: See TracBrowser for help on using the browser.