root/OpenSceneGraph/trunk/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp @ 13041

Revision 13041, 10.6 kB (checked in by robert, 2 years ago)

Ran script to remove trailing spaces and tabs

  • Property svn:eol-style set to native
Line 
1#include "FFmpegDecoderVideo.hpp"
2
3#include <osg/Notify>
4#include <osg/Timer>
5
6#include <stdexcept>
7#include <string.h>
8
9namespace osgFFmpeg {
10
11static int decode_video(AVCodecContext *avctx, AVFrame *picture,
12                         int *got_picture_ptr,
13                         const uint8_t *buf, int buf_size)
14{
15#if LIBAVCODEC_VERSION_MAJOR >= 53 || (LIBAVCODEC_VERSION_MAJOR==52 && LIBAVCODEC_VERSION_MINOR>=32)
16    // following code segment copied from ffmpeg avcodec_decode_video() implementation
17    // to avoid warnings about deprecated function usage.
18    AVPacket avpkt;
19    av_init_packet(&avpkt);
20    avpkt.data = const_cast<uint8_t *>(buf);
21    avpkt.size = buf_size;
22    // HACK for CorePNG to decode as normal PNG by default
23    avpkt.flags = AV_PKT_FLAG_KEY;
24
25    return avcodec_decode_video2(avctx, picture, got_picture_ptr, &avpkt);
26#else
27    // fallback for older versions of ffmpeg that don't have avcodec_decode_video2.
28    return avcodec_decode_video(avctx, picture, got_picture_ptr, buf, buf_size);
29#endif
30}
31
32
33FFmpegDecoderVideo::FFmpegDecoderVideo(PacketQueue & packets, FFmpegClocks & clocks) :
34    m_packets(packets),
35    m_clocks(clocks),
36    m_stream(0),
37    m_context(0),
38    m_codec(0),
39    m_packet_data(0),
40    m_bytes_remaining(0),
41    m_packet_pts(AV_NOPTS_VALUE),
42    m_writeBuffer(0),
43    m_user_data(0),
44    m_publish_func(0),
45    m_paused(true),
46    m_exit(false)
47#ifdef USE_SWSCALE
48    ,m_swscale_ctx(0)
49#endif
50{
51
52}
53
54
55
56FFmpegDecoderVideo::~FFmpegDecoderVideo()
57{
58    OSG_INFO<<"Destructing FFmpegDecoderVideo..."<<std::endl;
59
60    this->close(true);
61
62#ifdef USE_SWSCALE
63    if (m_swscale_ctx)
64    {
65        sws_freeContext(m_swscale_ctx);
66        m_swscale_ctx = 0;
67    }
68#endif
69
70    if (m_context)
71    {
72        avcodec_close(m_context);
73    }
74
75    OSG_INFO<<"Destructed FFmpegDecoderVideo"<<std::endl;
76}
77
78
79
80void FFmpegDecoderVideo::open(AVStream * const stream)
81{
82    m_stream = stream;
83    m_context = stream->codec;
84
85    // Trust the video size given at this point
86    // (avcodec_open seems to sometimes return a 0x0 size)
87    m_width = m_context->width;
88    m_height = m_context->height;
89    findAspectRatio();
90
91    // Find out whether we support Alpha channel
92    m_alpha_channel = (m_context->pix_fmt == PIX_FMT_YUVA420P);
93
94    // Find out the framerate
95    m_frame_rate = av_q2d(stream->r_frame_rate);
96
97    // Find the decoder for the video stream
98    m_codec = avcodec_find_decoder(m_context->codec_id);
99
100    if (m_codec == 0)
101        throw std::runtime_error("avcodec_find_decoder() failed");
102
103    // Inform the codec that we can handle truncated bitstreams
104    //if (p_codec->capabilities & CODEC_CAP_TRUNCATED)
105    //    m_context->flags |= CODEC_FLAG_TRUNCATED;
106
107    // Open codec
108    if (avcodec_open(m_context, m_codec) < 0)
109        throw std::runtime_error("avcodec_open() failed");
110
111    // Allocate video frame
112    m_frame.reset(avcodec_alloc_frame());
113
114    // Allocate converted RGB frame
115    m_frame_rgba.reset(avcodec_alloc_frame());
116    m_buffer_rgba[0].resize(avpicture_get_size(PIX_FMT_RGB32, width(), height()));
117    m_buffer_rgba[1].resize(m_buffer_rgba[0].size());
118
119    // Assign appropriate parts of the buffer to image planes in m_frame_rgba
120    avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[0])[0], PIX_FMT_RGB32, width(), height());
121
122    // Override get_buffer()/release_buffer() from codec context in order to retrieve the PTS of each frame.
123    m_context->opaque = this;
124    m_context->get_buffer = getBuffer;
125    m_context->release_buffer = releaseBuffer;
126}
127
128
129void FFmpegDecoderVideo::close(bool waitForThreadToExit)
130{
131    if (isRunning())
132    {
133        m_exit = true;
134        if (waitForThreadToExit)
135            join();
136    }
137}
138
139void FFmpegDecoderVideo::pause(bool pause)
140{
141    if(pause)
142        m_paused = true;
143    else
144        m_paused = false;
145}
146
147void FFmpegDecoderVideo::run()
148{
149    try
150    {
151        decodeLoop();
152    }
153
154    catch (const std::exception & error)
155    {
156        OSG_WARN << "FFmpegDecoderVideo::run : " << error.what() << std::endl;
157    }
158
159    catch (...)
160    {
161        OSG_WARN << "FFmpegDecoderVideo::run : unhandled exception" << std::endl;
162    }
163}
164
165
166
167void FFmpegDecoderVideo::decodeLoop()
168{
169    FFmpegPacket packet;
170    double pts;
171
172    while (! m_exit)
173    {
174        // Work on the current packet until we have decoded all of it
175
176        while (m_bytes_remaining > 0)
177        {
178            // Save global PTS to be stored in m_frame via getBuffer()
179
180            m_packet_pts = packet.packet.pts;
181
182            // Decode video frame
183
184            int frame_finished = 0;
185
186            const int bytes_decoded = decode_video(m_context, m_frame.get(), &frame_finished, m_packet_data, m_bytes_remaining);
187
188            if (bytes_decoded < 0)
189                throw std::runtime_error("avcodec_decode_video failed()");
190
191            m_bytes_remaining -= bytes_decoded;
192            m_packet_data += bytes_decoded;
193
194            // Find out the frame pts
195
196            if (packet.packet.dts == int64_t(AV_NOPTS_VALUE) &&
197                m_frame->opaque != 0 &&
198                *reinterpret_cast<const int64_t*>(m_frame->opaque) != int64_t(AV_NOPTS_VALUE))
199            {
200                pts = *reinterpret_cast<const int64_t*>(m_frame->opaque);
201            }
202            else if (packet.packet.dts != int64_t(AV_NOPTS_VALUE))
203            {
204                pts = packet.packet.dts;
205            }
206            else
207            {
208                pts = 0;
209            }
210
211            pts *= av_q2d(m_stream->time_base);
212
213            // Publish the frame if we have decoded a complete frame
214            if (frame_finished)
215            {
216                const double synched_pts = m_clocks.videoSynchClock(m_frame.get(), av_q2d(m_stream->time_base), pts);
217                const double frame_delay = m_clocks.videoRefreshSchedule(synched_pts);
218
219                publishFrame(frame_delay, m_clocks.audioDisabled());
220            }
221        }
222
223        while(m_paused && !m_exit)
224        {
225            microSleep(10000);
226        }
227
228        // Get the next packet
229
230        pts = 0;
231
232        if (packet.valid())
233            packet.clear();
234
235        bool is_empty = true;
236        packet = m_packets.timedPop(is_empty, 10);
237
238        if (! is_empty)
239        {
240            if (packet.type == FFmpegPacket::PACKET_DATA)
241            {
242                m_bytes_remaining = packet.packet.size;
243                m_packet_data = packet.packet.data;
244            }
245            else if (packet.type == FFmpegPacket::PACKET_FLUSH)
246            {
247                avcodec_flush_buffers(m_context);
248            }
249        }
250    }
251}
252
253
254
255void FFmpegDecoderVideo::findAspectRatio()
256{
257    float ratio = 0.0f;
258
259    if (m_context->sample_aspect_ratio.num != 0)
260        ratio = float(av_q2d(m_context->sample_aspect_ratio));
261
262    if (ratio <= 0.0f)
263        ratio = 1.0f;
264
265    m_pixel_aspect_ratio = ratio;
266}
267
268int FFmpegDecoderVideo::convert(AVPicture *dst, int dst_pix_fmt, AVPicture *src,
269            int src_pix_fmt, int src_width, int src_height)
270{
271    osg::Timer_t startTick = osg::Timer::instance()->tick();
272#ifdef USE_SWSCALE
273    if (m_swscale_ctx==0)
274    {
275        m_swscale_ctx = sws_getContext(src_width, src_height, (PixelFormat) src_pix_fmt,
276                                      src_width, src_height, (PixelFormat) dst_pix_fmt,
277                                      /*SWS_BILINEAR*/ SWS_BICUBIC, NULL, NULL, NULL);
278    }
279
280
281    OSG_INFO<<"Using sws_scale ";
282
283    int result =  sws_scale(m_swscale_ctx,
284                            (src->data), (src->linesize), 0, src_height,
285                            (dst->data), (dst->linesize));
286#else
287
288    OSG_INFO<<"Using img_convert ";
289
290    int result = img_convert(dst, dst_pix_fmt, src,
291                             src_pix_fmt, src_width, src_height);
292
293#endif
294    osg::Timer_t endTick = osg::Timer::instance()->tick();
295    OSG_INFO<<" time = "<<osg::Timer::instance()->delta_m(startTick,endTick)<<"ms"<<std::endl;
296
297    return result;
298}
299
300
301void FFmpegDecoderVideo::publishFrame(const double delay, bool audio_disabled)
302{
303    // If no publishing function, just ignore the frame
304    if (m_publish_func == 0)
305        return;
306
307#if 1
308    // new code from Jean-Sebasiten Guay - needs testing as we're unclear on the best solution
309    // If the display delay is too small, we better skip the frame.
310    if (!audio_disabled && delay < -0.010)
311        return;
312#else
313    // original solution that hung on video stream over web.
314    // If the display delay is too small, we better skip the frame.
315    if (delay < -0.010)
316        return;
317#endif
318
319    AVPicture * const src = (AVPicture *) m_frame.get();
320    AVPicture * const dst = (AVPicture *) m_frame_rgba.get();
321
322    // Assign appropriate parts of the buffer to image planes in m_frame_rgba
323    avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[m_writeBuffer])[0], PIX_FMT_RGB32, width(), height());
324
325    // Convert YUVA420p (i.e. YUV420p plus alpha channel) using our own routine
326
327    if (m_context->pix_fmt == PIX_FMT_YUVA420P)
328        yuva420pToRgba(dst, src, width(), height());
329    else
330        convert(dst, PIX_FMT_RGB32, src, m_context->pix_fmt, width(), height());
331
332    // Wait 'delay' seconds before publishing the picture.
333    int i_delay = static_cast<int>(delay * 1000000 + 0.5);
334
335    while (i_delay > 1000)
336    {
337        // Avoid infinite/very long loops
338        if (m_exit)
339            return;
340
341        const int micro_delay = (std::min)(1000000, i_delay);
342
343        OpenThreads::Thread::microSleep(micro_delay);
344
345        i_delay -= micro_delay;
346    }
347
348    m_writeBuffer = 1-m_writeBuffer;
349
350    m_publish_func(* this, m_user_data);
351}
352
353
354
355void FFmpegDecoderVideo::yuva420pToRgba(AVPicture * const dst, AVPicture * const src, int width, int height)
356{
357    convert(dst, PIX_FMT_RGB32, src, m_context->pix_fmt, width, height);
358
359    const size_t bpp = 4;
360
361    uint8_t * a_dst = dst->data[0] + 3;
362
363    for (int h = 0; h < height; ++h) {
364
365        const uint8_t * a_src = src->data[3] + h * src->linesize[3];
366
367        for (int w = 0; w < width; ++w) {
368            *a_dst = *a_src;
369            a_dst += bpp;
370            a_src += 1;
371        }
372    }
373}
374
375
376
377int FFmpegDecoderVideo::getBuffer(AVCodecContext * const context, AVFrame * const picture)
378{
379    const FFmpegDecoderVideo * const this_ = reinterpret_cast<const FFmpegDecoderVideo*>(context->opaque);
380
381    const int result = avcodec_default_get_buffer(context, picture);
382    int64_t * p_pts = reinterpret_cast<int64_t*>( av_malloc(sizeof(int64_t)) );
383
384    *p_pts = this_->m_packet_pts;
385    picture->opaque = p_pts;
386
387    return result;
388}
389
390
391
392void FFmpegDecoderVideo::releaseBuffer(AVCodecContext * const context, AVFrame * const picture)
393{
394    if (picture != 0)
395        av_freep(&picture->opaque);
396
397    avcodec_default_release_buffer(context, picture);
398}
399
400
401
402} // namespace osgFFmpeg
Note: See TracBrowser for help on using the browser.