root/OpenSceneGraph/trunk/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp @ 11177

Revision 11177, 10.8 kB (checked in by robert, 4 years ago)

Added extra ffmpeg version check

RevLine 
[9816]1#include "FFmpegDecoderVideo.hpp"
2
3#include <osg/Notify>
[9860]4#include <osg/Timer>
[9816]5
6#include <stdexcept>
7#include <string.h>
8
9namespace osgFFmpeg {
10
[11165]11static int decode_video(AVCodecContext *avctx, AVFrame *picture,
12                         int *got_picture_ptr,
13                         const uint8_t *buf, int buf_size)
14{
[11177]15#if LIBAVCODEC_VERSION_MAJOR >= 53 || (LIBAVCODEC_VERSION_MAJOR==52 && LIBAVCODEC_VERSION_MINOR>=32)
[11165]16    // following code segment copied from ffmpeg avcodec_decode_video() implementation
17    // to avoid warnings about deprecated function usage.
18    AVPacket avpkt;
19    av_init_packet(&avpkt);
20    avpkt.data = const_cast<uint8_t *>(buf);
21    avpkt.size = buf_size;
22    // HACK for CorePNG to decode as normal PNG by default
23    avpkt.flags = AV_PKT_FLAG_KEY;
24
25    return avcodec_decode_video2(avctx, picture, got_picture_ptr, &avpkt);
26#else
27    // fallback for older versions of ffmpeg that don't have avcodec_decode_video2.
28    return avcodec_decode_video(avctx, picture, got_picture_ptr, buf, buf_size);
29#endif
30}
31
32
[9816]33FFmpegDecoderVideo::FFmpegDecoderVideo(PacketQueue & packets, FFmpegClocks & clocks) :
34    m_packets(packets),
35    m_clocks(clocks),
36    m_stream(0),
37    m_context(0),
38    m_codec(0),
39    m_packet_data(0),
40    m_bytes_remaining(0),
41    m_packet_pts(AV_NOPTS_VALUE),
[9860]42    m_writeBuffer(0),
[9816]43    m_user_data(0),
44    m_publish_func(0),
[10851]45    m_paused(true),
[9857]46    m_exit(false)
47#ifdef USE_SWSCALE
48    ,m_swscale_ctx(0)
49#endif
[9816]50{
51
52}
53
54
55
56FFmpegDecoderVideo::~FFmpegDecoderVideo()
57{
[9912]58    osg::notify(osg::INFO)<<"Destructing FFmpegDecoderVideo..."<<std::endl;
[9861]59
[9869]60
[9816]61    if (isRunning())
62    {
63        m_exit = true;
[9869]64#if 0       
65        while(isRunning()) { OpenThreads::YieldCurrentThread(); }
66#else       
[9816]67        join();
[9869]68#endif
[9816]69    }
[9854]70   
[9857]71#ifdef USE_SWSCALE
[9854]72    if (m_swscale_ctx)
73    {
74        sws_freeContext(m_swscale_ctx);
75        m_swscale_ctx = 0;
76    }
[9857]77#endif
[9861]78
[9912]79    osg::notify(osg::INFO)<<"Destructed FFmpegDecoderVideo"<<std::endl;
[9816]80}
81
82
83
84void FFmpegDecoderVideo::open(AVStream * const stream)
85{
86    m_stream = stream;
87    m_context = stream->codec;
88
89    // Trust the video size given at this point
90    // (avcodec_open seems to sometimes return a 0x0 size)
91    m_width = m_context->width;
92    m_height = m_context->height;
93    findAspectRatio();
94
95    // Find out whether we support Alpha channel
96    m_alpha_channel = (m_context->pix_fmt == PIX_FMT_YUVA420P);
97
98    // Find out the framerate
99    m_frame_rate = av_q2d(stream->r_frame_rate);
100
101    // Find the decoder for the video stream
102    m_codec = avcodec_find_decoder(m_context->codec_id);
103
104    if (m_codec == 0)
105        throw std::runtime_error("avcodec_find_decoder() failed");
106
107    // Inform the codec that we can handle truncated bitstreams
108    //if (p_codec->capabilities & CODEC_CAP_TRUNCATED)
109    //    m_context->flags |= CODEC_FLAG_TRUNCATED;
110
111    // Open codec
112    if (avcodec_open(m_context, m_codec) < 0)
113        throw std::runtime_error("avcodec_open() failed");
114
115    // Allocate video frame
[9826]116    m_frame.reset(avcodec_alloc_frame());
[9816]117
118    // Allocate converted RGB frame
[9826]119    m_frame_rgba.reset(avcodec_alloc_frame());
[9860]120    m_buffer_rgba[0].resize(avpicture_get_size(PIX_FMT_RGB32, width(), height()));
121    m_buffer_rgba[1].resize(m_buffer_rgba[0].size());
[9816]122
123    // Assign appropriate parts of the buffer to image planes in m_frame_rgba
[9860]124    avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[0])[0], PIX_FMT_RGB32, width(), height());
[9816]125
126    // Override get_buffer()/release_buffer() from codec context in order to retrieve the PTS of each frame.
127    m_context->opaque = this;
128    m_context->get_buffer = getBuffer;
129    m_context->release_buffer = releaseBuffer;
130}
131
132
[9869]133void FFmpegDecoderVideo::close(bool waitForThreadToExit)
134{
135    m_exit = true;
136   
137    if (isRunning() && waitForThreadToExit)
138    {
139        while(isRunning()) { OpenThreads::Thread::YieldCurrentThread(); }
140    }
141}
[9816]142
[10851]143void FFmpegDecoderVideo::pause(bool pause)
144{
145    if(pause)
146        m_paused = true;
147    else
148        m_paused = false;
149}
[9869]150
[9816]151void FFmpegDecoderVideo::run()
152{
153    try
154    {
155        decodeLoop();
156    }
157
158    catch (const std::exception & error)
159    {
160        osg::notify(osg::WARN) << "FFmpegDecoderVideo::run : " << error.what() << std::endl;
161    }
162
163    catch (...)
164    {
165        osg::notify(osg::WARN) << "FFmpegDecoderVideo::run : unhandled exception" << std::endl;
166    }
167}
168
169
170
171void FFmpegDecoderVideo::decodeLoop()
172{
173    FFmpegPacket packet;
174    double pts;
175
176    while (! m_exit)
177    {
178        // Work on the current packet until we have decoded all of it
179
180        while (m_bytes_remaining > 0)
181        {
182            // Save global PTS to be stored in m_frame via getBuffer()
183
184            m_packet_pts = packet.packet.pts;
185
186            // Decode video frame
187
188            int frame_finished = 0;
189
[11165]190            const int bytes_decoded = decode_video(m_context, m_frame.get(), &frame_finished, m_packet_data, m_bytes_remaining);
[9816]191
192            if (bytes_decoded < 0)
193                throw std::runtime_error("avcodec_decode_video failed()");
194
195            m_bytes_remaining -= bytes_decoded;
196            m_packet_data += bytes_decoded;
197
198            // Find out the frame pts
199
[10414]200            if (packet.packet.dts == int64_t(AV_NOPTS_VALUE) &&
[10088]201                m_frame->opaque != 0 &&
[10414]202                *reinterpret_cast<const int64_t*>(m_frame->opaque) != int64_t(AV_NOPTS_VALUE))
[9816]203            {
204                pts = *reinterpret_cast<const int64_t*>(m_frame->opaque);
205            }
[10414]206            else if (packet.packet.dts != int64_t(AV_NOPTS_VALUE))
[9816]207            {
208                pts = packet.packet.dts;
209            }
210            else
211            {
212                pts = 0;
213            }
214
215            pts *= av_q2d(m_stream->time_base);
216
217            // Publish the frame if we have decoded a complete frame
218            if (frame_finished)
219            {
220                const double synched_pts = m_clocks.videoSynchClock(m_frame.get(), av_q2d(m_stream->time_base), pts);
221                const double frame_delay = m_clocks.videoRefreshSchedule(synched_pts);
222
[10161]223                publishFrame(frame_delay, m_clocks.audioDisabled());
[9816]224            }
225        }
226
[10851]227        while(m_paused && !m_exit)
228        {
229            microSleep(10000);
230        }
231
[9816]232        // Get the next packet
233
234        pts = 0;
235
236        if (packet.valid())
237            packet.clear();
238
239        bool is_empty = true;
240        packet = m_packets.timedPop(is_empty, 10);
241
242        if (! is_empty)
243        {
244            if (packet.type == FFmpegPacket::PACKET_DATA)
245            {
246                m_bytes_remaining = packet.packet.size;
247                m_packet_data = packet.packet.data;
248            }
249            else if (packet.type == FFmpegPacket::PACKET_FLUSH)
250            {
251                avcodec_flush_buffers(m_context);
252            }
253        }
254    }
255}
256
257
258
259void FFmpegDecoderVideo::findAspectRatio()
260{
[9910]261    float ratio = 0.0f;
[9816]262
263    if (m_context->sample_aspect_ratio.num != 0)
[9910]264        ratio = float(av_q2d(m_context->sample_aspect_ratio));
[9816]265
[9910]266    if (ratio <= 0.0f)
267        ratio = 1.0f;
[9816]268
[9910]269    m_pixel_aspect_ratio = ratio;
[9816]270}
271
[9960]272int FFmpegDecoderVideo::convert(AVPicture *dst, int dst_pix_fmt, AVPicture *src,
[9854]273            int src_pix_fmt, int src_width, int src_height)
274{
[9860]275    osg::Timer_t startTick = osg::Timer::instance()->tick();
[9854]276#ifdef USE_SWSCALE
277    if (m_swscale_ctx==0)
278    {
[9965]279        m_swscale_ctx = sws_getContext(src_width, src_height, (PixelFormat) src_pix_fmt,
280                                      src_width, src_height, (PixelFormat) dst_pix_fmt,
[9860]281                                      /*SWS_BILINEAR*/ SWS_BICUBIC, NULL, NULL, NULL);
[9854]282    }
283   
[9857]284
[9912]285    osg::notify(osg::INFO)<<"Using sws_scale ";
[9960]286   
[9860]287    int result =  sws_scale(m_swscale_ctx,
[9960]288                            (src->data), (src->linesize), 0, src_height,
[9933]289                            (dst->data), (dst->linesize));
[9854]290#else
[9857]291
[9912]292    osg::notify(osg::INFO)<<"Using img_convert ";
[9860]293
294    int result = img_convert(dst, dst_pix_fmt, src,
295                             src_pix_fmt, src_width, src_height);
296
[9854]297#endif
[9860]298    osg::Timer_t endTick = osg::Timer::instance()->tick();
[9912]299    osg::notify(osg::INFO)<<" time = "<<osg::Timer::instance()->delta_m(startTick,endTick)<<"ms"<<std::endl;
[9860]300
301    return result;
[9854]302}
[9816]303
304
[10161]305void FFmpegDecoderVideo::publishFrame(const double delay, bool audio_disabled)
[9816]306{
307    // If no publishing function, just ignore the frame
308    if (m_publish_func == 0)
309        return;
310
[10161]311#if 1
312    // new code from Jean-Sebasiten Guay - needs testing as we're unclear on the best solution
[9816]313    // If the display delay is too small, we better skip the frame.
[10161]314    if (!audio_disabled && delay < -0.010)
315        return;
316#else
317    // original solution that hung on video stream over web.
318    // If the display delay is too small, we better skip the frame.
[9816]319    if (delay < -0.010)
320        return;
[10161]321#endif
322
[9960]323    AVPicture * const src = (AVPicture *) m_frame.get();
[9816]324    AVPicture * const dst = (AVPicture *) m_frame_rgba.get();
325
[9860]326    // Assign appropriate parts of the buffer to image planes in m_frame_rgba
327    avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[m_writeBuffer])[0], PIX_FMT_RGB32, width(), height());
328
[9816]329    // Convert YUVA420p (i.e. YUV420p plus alpha channel) using our own routine
330
331    if (m_context->pix_fmt == PIX_FMT_YUVA420P)
332        yuva420pToRgba(dst, src, width(), height());
333    else
[9854]334        convert(dst, PIX_FMT_RGB32, src, m_context->pix_fmt, width(), height());
[9816]335
336    // Wait 'delay' seconds before publishing the picture.
337    int i_delay = static_cast<int>(delay * 1000000 + 0.5);
338
339    while (i_delay > 1000)
340    {
341        // Avoid infinite/very long loops
342        if (m_exit)
343            return;
344
345        const int micro_delay = (std::min)(1000000, i_delay);
346
347        OpenThreads::Thread::microSleep(micro_delay);
348
349        i_delay -= micro_delay;
350    }
351
[9860]352    m_writeBuffer = 1-m_writeBuffer;
353
[9816]354    m_publish_func(* this, m_user_data);
355}
356
357
358
[9960]359void FFmpegDecoderVideo::yuva420pToRgba(AVPicture * const dst, AVPicture * const src, int width, int height)
[9816]360{
[9854]361    convert(dst, PIX_FMT_RGB32, src, m_context->pix_fmt, width, height);
[9816]362
363    const size_t bpp = 4;
364
365    uint8_t * a_dst = dst->data[0] + 3;
366
367    for (int h = 0; h < height; ++h) {
368
369        const uint8_t * a_src = src->data[3] + h * src->linesize[3];
370
371        for (int w = 0; w < width; ++w) {
372            *a_dst = *a_src;
373            a_dst += bpp;
374            a_src += 1;
375        }
376    }
377}
378
379
380
381int FFmpegDecoderVideo::getBuffer(AVCodecContext * const context, AVFrame * const picture)
382{
383    const FFmpegDecoderVideo * const this_ = reinterpret_cast<const FFmpegDecoderVideo*>(context->opaque);
384
385    const int result = avcodec_default_get_buffer(context, picture);
386    int64_t * p_pts = reinterpret_cast<int64_t*>( av_malloc(sizeof(int64_t)) );
387
388    *p_pts = this_->m_packet_pts;
389    picture->opaque = p_pts;
390
391    return result;
392}
393
394
395
396void FFmpegDecoderVideo::releaseBuffer(AVCodecContext * const context, AVFrame * const picture)
397{
398    if (picture != 0)
399        av_freep(&picture->opaque);
400
401    avcodec_default_release_buffer(context, picture);
402}
403
404
405
406} // namespace osgFFmpeg
Note: See TracBrowser for help on using the browser.