root/OpenSceneGraph/trunk/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp @ 9860

Revision 9860, 9.5 kB (checked in by robert, 5 years ago)

Introduced double buffering of video stream to avoid tearing of image.

Removed swapBufers call and image y inversion.

RevLine 
[9816]1#include "FFmpegDecoderVideo.hpp"
2
3#include <osg/Notify>
[9860]4#include <osg/Timer>
[9816]5
6#include <stdexcept>
7#include <string.h>
8
9namespace osgFFmpeg {
10
11FFmpegDecoderVideo::FFmpegDecoderVideo(PacketQueue & packets, FFmpegClocks & clocks) :
12    m_packets(packets),
13    m_clocks(clocks),
14    m_stream(0),
15    m_context(0),
16    m_codec(0),
17    m_packet_data(0),
18    m_bytes_remaining(0),
19    m_packet_pts(AV_NOPTS_VALUE),
[9860]20    m_writeBuffer(0),
[9816]21    m_user_data(0),
22    m_publish_func(0),
[9857]23    m_exit(false)
24#ifdef USE_SWSCALE
25    ,m_swscale_ctx(0)
26#endif
[9816]27{
28
29}
30
31
32
33FFmpegDecoderVideo::~FFmpegDecoderVideo()
34{
35    if (isRunning())
36    {
37        m_exit = true;
38        join();
39    }
[9854]40   
[9857]41#ifdef USE_SWSCALE
[9854]42    if (m_swscale_ctx)
43    {
44        sws_freeContext(m_swscale_ctx);
45        m_swscale_ctx = 0;
46    }
[9857]47#endif
[9816]48}
49
50
51
52void FFmpegDecoderVideo::open(AVStream * const stream)
53{
54    m_stream = stream;
55    m_context = stream->codec;
56
57    // Trust the video size given at this point
58    // (avcodec_open seems to sometimes return a 0x0 size)
59    m_width = m_context->width;
60    m_height = m_context->height;
61    findAspectRatio();
62
63    // Find out whether we support Alpha channel
64    m_alpha_channel = (m_context->pix_fmt == PIX_FMT_YUVA420P);
65
66    // Find out the framerate
67    m_frame_rate = av_q2d(stream->r_frame_rate);
68
69    // Find the decoder for the video stream
70    m_codec = avcodec_find_decoder(m_context->codec_id);
71
72    if (m_codec == 0)
73        throw std::runtime_error("avcodec_find_decoder() failed");
74
75    // Inform the codec that we can handle truncated bitstreams
76    //if (p_codec->capabilities & CODEC_CAP_TRUNCATED)
77    //    m_context->flags |= CODEC_FLAG_TRUNCATED;
78
79    // Open codec
80    if (avcodec_open(m_context, m_codec) < 0)
81        throw std::runtime_error("avcodec_open() failed");
82
83    // Allocate video frame
[9826]84    m_frame.reset(avcodec_alloc_frame());
[9816]85
86    // Allocate converted RGB frame
[9826]87    m_frame_rgba.reset(avcodec_alloc_frame());
[9860]88    m_buffer_rgba[0].resize(avpicture_get_size(PIX_FMT_RGB32, width(), height()));
89    m_buffer_rgba[1].resize(m_buffer_rgba[0].size());
[9816]90
91    // Assign appropriate parts of the buffer to image planes in m_frame_rgba
[9860]92    avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[0])[0], PIX_FMT_RGB32, width(), height());
[9816]93
94    // Override get_buffer()/release_buffer() from codec context in order to retrieve the PTS of each frame.
95    m_context->opaque = this;
96    m_context->get_buffer = getBuffer;
97    m_context->release_buffer = releaseBuffer;
98}
99
100
101
102void FFmpegDecoderVideo::run()
103{
104    try
105    {
106        decodeLoop();
107    }
108
109    catch (const std::exception & error)
110    {
111        osg::notify(osg::WARN) << "FFmpegDecoderVideo::run : " << error.what() << std::endl;
112    }
113
114    catch (...)
115    {
116        osg::notify(osg::WARN) << "FFmpegDecoderVideo::run : unhandled exception" << std::endl;
117    }
118}
119
120
121
122void FFmpegDecoderVideo::decodeLoop()
123{
124    FFmpegPacket packet;
125    double pts;
126
127    while (! m_exit)
128    {
129        // Work on the current packet until we have decoded all of it
130
131        while (m_bytes_remaining > 0)
132        {
133            // Save global PTS to be stored in m_frame via getBuffer()
134
135            m_packet_pts = packet.packet.pts;
136
137            // Decode video frame
138
139            int frame_finished = 0;
140
141            const int bytes_decoded = avcodec_decode_video(m_context, m_frame.get(), &frame_finished, m_packet_data, m_bytes_remaining);
142
143            if (bytes_decoded < 0)
144                throw std::runtime_error("avcodec_decode_video failed()");
145
146            m_bytes_remaining -= bytes_decoded;
147            m_packet_data += bytes_decoded;
148
149            // Find out the frame pts
150
151            if (packet.packet.dts == AV_NOPTS_VALUE && m_frame->opaque != 0 && *reinterpret_cast<const int64_t*>(m_frame->opaque) != AV_NOPTS_VALUE)
152            {
153                pts = *reinterpret_cast<const int64_t*>(m_frame->opaque);
154            }
155            else if (packet.packet.dts != AV_NOPTS_VALUE)
156            {
157                pts = packet.packet.dts;
158            }
159            else
160            {
161                pts = 0;
162            }
163
164            pts *= av_q2d(m_stream->time_base);
165
166            // Publish the frame if we have decoded a complete frame
167            if (frame_finished)
168            {
169                const double synched_pts = m_clocks.videoSynchClock(m_frame.get(), av_q2d(m_stream->time_base), pts);
170                const double frame_delay = m_clocks.videoRefreshSchedule(synched_pts);
171
172                publishFrame(frame_delay);
173            }
174        }
175
176        // Get the next packet
177
178        pts = 0;
179
180        if (packet.valid())
181            packet.clear();
182
183        bool is_empty = true;
184        packet = m_packets.timedPop(is_empty, 10);
185
186        if (! is_empty)
187        {
188            if (packet.type == FFmpegPacket::PACKET_DATA)
189            {
190                m_bytes_remaining = packet.packet.size;
191                m_packet_data = packet.packet.data;
192            }
193            else if (packet.type == FFmpegPacket::PACKET_FLUSH)
194            {
195                avcodec_flush_buffers(m_context);
196                m_clocks.rewindVideo();
197            }
198        }
199    }
200}
201
202
203
204void FFmpegDecoderVideo::findAspectRatio()
205{
206    double ratio = 0.0;
207
208    if (m_context->sample_aspect_ratio.num != 0)
209        ratio = (av_q2d(m_context->sample_aspect_ratio) * m_width) / m_height;
210
211    if (ratio <= 0.0)
212        ratio = double(m_width) / double(m_height);
213
214    m_aspect_ratio = ratio;
215}
216
[9854]217int FFmpegDecoderVideo::convert(AVPicture *dst, int dst_pix_fmt, const AVPicture *src,
218            int src_pix_fmt, int src_width, int src_height)
219{
[9860]220    osg::Timer_t startTick = osg::Timer::instance()->tick();
[9854]221#ifdef USE_SWSCALE
222    if (m_swscale_ctx==0)
223    {
224        m_swscale_ctx = sws_getContext(src_width, src_height, src_pix_fmt,
225                                      src_width, src_height, dst_pix_fmt,                                   
[9860]226                                      /*SWS_BILINEAR*/ SWS_BICUBIC, NULL, NULL, NULL);
[9854]227    }
228   
[9857]229
[9860]230    osg::notify(osg::NOTICE)<<"Using sws_scale ";
231
232    int result =  sws_scale(m_swscale_ctx,
233                            src->data, src->linesize, 0, src_height,
234                            dst->data, dst->linesize);
[9854]235#else
[9857]236
[9860]237    osg::notify(osg::NOTICE)<<"Using img_convert ";
238
239    int result = img_convert(dst, dst_pix_fmt, src,
240                             src_pix_fmt, src_width, src_height);
241
[9854]242#endif
[9860]243    osg::Timer_t endTick = osg::Timer::instance()->tick();
244    osg::notify(osg::NOTICE)<<" time = "<<osg::Timer::instance()->delta_m(startTick,endTick)<<"ms"<<std::endl;
245
246    return result;
[9854]247}
[9816]248
249
250void FFmpegDecoderVideo::publishFrame(const double delay)
251{
252    // If no publishing function, just ignore the frame
253    if (m_publish_func == 0)
254        return;
255
256    // If the display delay is too small, we better skip the frame.
257    if (delay < -0.010)
258        return;
[9860]259       
[9816]260    const AVPicture * const src = (const AVPicture *) m_frame.get();
261    AVPicture * const dst = (AVPicture *) m_frame_rgba.get();
262
[9860]263    osg::Timer_t startTick = osg::Timer::instance()->tick();
264
265    // Assign appropriate parts of the buffer to image planes in m_frame_rgba
266    avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[m_writeBuffer])[0], PIX_FMT_RGB32, width(), height());
267
[9816]268    // Convert YUVA420p (i.e. YUV420p plus alpha channel) using our own routine
269
270    if (m_context->pix_fmt == PIX_FMT_YUVA420P)
271        yuva420pToRgba(dst, src, width(), height());
272    else
[9854]273        convert(dst, PIX_FMT_RGB32, src, m_context->pix_fmt, width(), height());
[9816]274
[9860]275
[9816]276    // Flip and swap buffer
[9860]277    // swapBuffers();
[9816]278
[9860]279
280    osg::Timer_t endTick = osg::Timer::instance()->tick();
281    osg::notify(osg::NOTICE)<<" time of swapBuffers = "<<osg::Timer::instance()->delta_m(startTick,endTick)<<"ms"<<std::endl;
282
[9816]283    // Wait 'delay' seconds before publishing the picture.
284    int i_delay = static_cast<int>(delay * 1000000 + 0.5);
285
286    while (i_delay > 1000)
287    {
288        // Avoid infinite/very long loops
289        if (m_exit)
290            return;
291
292        const int micro_delay = (std::min)(1000000, i_delay);
293
294        OpenThreads::Thread::microSleep(micro_delay);
295
296        i_delay -= micro_delay;
297    }
298
[9860]299    m_writeBuffer = 1-m_writeBuffer;
300
[9816]301    m_publish_func(* this, m_user_data);
302}
303
304
305
306void FFmpegDecoderVideo::swapBuffers()
307{
308    for (int h = 0; h < height(); ++h)
[9860]309        memcpy(&(m_buffer_rgba[1-m_writeBuffer])[(height() - h - 1) * width() * 4], &(m_buffer_rgba[m_writeBuffer])[h * width() * 4], width() * 4);
[9816]310}
311
312
313
314void FFmpegDecoderVideo::yuva420pToRgba(AVPicture * const dst, const AVPicture * const src, int width, int height)
315{
[9854]316    convert(dst, PIX_FMT_RGB32, src, m_context->pix_fmt, width, height);
[9816]317
318    const size_t bpp = 4;
319
320    uint8_t * a_dst = dst->data[0] + 3;
321
322    for (int h = 0; h < height; ++h) {
323
324        const uint8_t * a_src = src->data[3] + h * src->linesize[3];
325
326        for (int w = 0; w < width; ++w) {
327            *a_dst = *a_src;
328            a_dst += bpp;
329            a_src += 1;
330        }
331    }
332}
333
334
335
336int FFmpegDecoderVideo::getBuffer(AVCodecContext * const context, AVFrame * const picture)
337{
338    const FFmpegDecoderVideo * const this_ = reinterpret_cast<const FFmpegDecoderVideo*>(context->opaque);
339
340    const int result = avcodec_default_get_buffer(context, picture);
341    int64_t * p_pts = reinterpret_cast<int64_t*>( av_malloc(sizeof(int64_t)) );
342
343    *p_pts = this_->m_packet_pts;
344    picture->opaque = p_pts;
345
346    return result;
347}
348
349
350
351void FFmpegDecoderVideo::releaseBuffer(AVCodecContext * const context, AVFrame * const picture)
352{
353    if (picture != 0)
354        av_freep(&picture->opaque);
355
356    avcodec_default_release_buffer(context, picture);
357}
358
359
360
361} // namespace osgFFmpeg
Note: See TracBrowser for help on using the browser.