root/OpenSceneGraph/trunk/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp @ 9965

Revision 9965, 9.4 kB (checked in by robert, 6 years ago)

From Ulrich Hertlein, "I got the following type error from gcc 4.0.1 on OS X 10.5.6:
/Users/uli/Projects/osg/OpenSceneGraph/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp: In member function \u2018int osgFFmpeg::FFmpegDecoderVideo::convert(AVPicture*, int, AVPicture*, int, int, int)\u2019:
/Users/uli/Projects/osg/OpenSceneGraph/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp:245: error: invalid conversion from \u2018int\u2019 to \u2018PixelFormat\u2019
/Users/uli/Projects/osg/OpenSceneGraph/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp:245: error: initializing argument 3 of \u2018SwsContext* sws_getContext(int, int, PixelFormat?, int, int, PixelFormat?, int, SwsFilter?*, SwsFilter?*, double*)\u2019

It expects 'src_pix_fmt' and 'dst_pix_fmt' to be of type 'PixelFormat?' rather than int. The attached cast fixes this (for me).

I've also added Matroska video to the list of supported extensions"

RevLine 
[9816]1#include "FFmpegDecoderVideo.hpp"
2
3#include <osg/Notify>
[9860]4#include <osg/Timer>
[9816]5
6#include <stdexcept>
7#include <string.h>
8
9namespace osgFFmpeg {
10
11FFmpegDecoderVideo::FFmpegDecoderVideo(PacketQueue & packets, FFmpegClocks & clocks) :
12    m_packets(packets),
13    m_clocks(clocks),
14    m_stream(0),
15    m_context(0),
16    m_codec(0),
17    m_packet_data(0),
18    m_bytes_remaining(0),
19    m_packet_pts(AV_NOPTS_VALUE),
[9860]20    m_writeBuffer(0),
[9816]21    m_user_data(0),
22    m_publish_func(0),
[9857]23    m_exit(false)
24#ifdef USE_SWSCALE
25    ,m_swscale_ctx(0)
26#endif
[9816]27{
28
29}
30
31
32
33FFmpegDecoderVideo::~FFmpegDecoderVideo()
34{
[9912]35    osg::notify(osg::INFO)<<"Destructing FFmpegDecoderVideo..."<<std::endl;
[9861]36
[9869]37
[9816]38    if (isRunning())
39    {
40        m_exit = true;
[9869]41#if 0       
42        while(isRunning()) { OpenThreads::YieldCurrentThread(); }
43#else       
[9816]44        join();
[9869]45#endif
[9816]46    }
[9854]47   
[9857]48#ifdef USE_SWSCALE
[9854]49    if (m_swscale_ctx)
50    {
51        sws_freeContext(m_swscale_ctx);
52        m_swscale_ctx = 0;
53    }
[9857]54#endif
[9861]55
[9912]56    osg::notify(osg::INFO)<<"Destructed FFmpegDecoderVideo"<<std::endl;
[9816]57}
58
59
60
61void FFmpegDecoderVideo::open(AVStream * const stream)
62{
63    m_stream = stream;
64    m_context = stream->codec;
65
66    // Trust the video size given at this point
67    // (avcodec_open seems to sometimes return a 0x0 size)
68    m_width = m_context->width;
69    m_height = m_context->height;
70    findAspectRatio();
71
72    // Find out whether we support Alpha channel
73    m_alpha_channel = (m_context->pix_fmt == PIX_FMT_YUVA420P);
74
75    // Find out the framerate
76    m_frame_rate = av_q2d(stream->r_frame_rate);
77
78    // Find the decoder for the video stream
79    m_codec = avcodec_find_decoder(m_context->codec_id);
80
81    if (m_codec == 0)
82        throw std::runtime_error("avcodec_find_decoder() failed");
83
84    // Inform the codec that we can handle truncated bitstreams
85    //if (p_codec->capabilities & CODEC_CAP_TRUNCATED)
86    //    m_context->flags |= CODEC_FLAG_TRUNCATED;
87
88    // Open codec
89    if (avcodec_open(m_context, m_codec) < 0)
90        throw std::runtime_error("avcodec_open() failed");
91
92    // Allocate video frame
[9826]93    m_frame.reset(avcodec_alloc_frame());
[9816]94
95    // Allocate converted RGB frame
[9826]96    m_frame_rgba.reset(avcodec_alloc_frame());
[9860]97    m_buffer_rgba[0].resize(avpicture_get_size(PIX_FMT_RGB32, width(), height()));
98    m_buffer_rgba[1].resize(m_buffer_rgba[0].size());
[9816]99
100    // Assign appropriate parts of the buffer to image planes in m_frame_rgba
[9860]101    avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[0])[0], PIX_FMT_RGB32, width(), height());
[9816]102
103    // Override get_buffer()/release_buffer() from codec context in order to retrieve the PTS of each frame.
104    m_context->opaque = this;
105    m_context->get_buffer = getBuffer;
106    m_context->release_buffer = releaseBuffer;
107}
108
109
[9869]110void FFmpegDecoderVideo::close(bool waitForThreadToExit)
111{
112    m_exit = true;
113   
114    if (isRunning() && waitForThreadToExit)
115    {
116        while(isRunning()) { OpenThreads::Thread::YieldCurrentThread(); }
117    }
118}
[9816]119
[9869]120
[9816]121void FFmpegDecoderVideo::run()
122{
123    try
124    {
125        decodeLoop();
126    }
127
128    catch (const std::exception & error)
129    {
130        osg::notify(osg::WARN) << "FFmpegDecoderVideo::run : " << error.what() << std::endl;
131    }
132
133    catch (...)
134    {
135        osg::notify(osg::WARN) << "FFmpegDecoderVideo::run : unhandled exception" << std::endl;
136    }
137}
138
139
140
141void FFmpegDecoderVideo::decodeLoop()
142{
143    FFmpegPacket packet;
144    double pts;
145
146    while (! m_exit)
147    {
148        // Work on the current packet until we have decoded all of it
149
150        while (m_bytes_remaining > 0)
151        {
152            // Save global PTS to be stored in m_frame via getBuffer()
153
154            m_packet_pts = packet.packet.pts;
155
156            // Decode video frame
157
158            int frame_finished = 0;
159
160            const int bytes_decoded = avcodec_decode_video(m_context, m_frame.get(), &frame_finished, m_packet_data, m_bytes_remaining);
161
162            if (bytes_decoded < 0)
163                throw std::runtime_error("avcodec_decode_video failed()");
164
165            m_bytes_remaining -= bytes_decoded;
166            m_packet_data += bytes_decoded;
167
168            // Find out the frame pts
169
170            if (packet.packet.dts == AV_NOPTS_VALUE && m_frame->opaque != 0 && *reinterpret_cast<const int64_t*>(m_frame->opaque) != AV_NOPTS_VALUE)
171            {
172                pts = *reinterpret_cast<const int64_t*>(m_frame->opaque);
173            }
174            else if (packet.packet.dts != AV_NOPTS_VALUE)
175            {
176                pts = packet.packet.dts;
177            }
178            else
179            {
180                pts = 0;
181            }
182
183            pts *= av_q2d(m_stream->time_base);
184
185            // Publish the frame if we have decoded a complete frame
186            if (frame_finished)
187            {
188                const double synched_pts = m_clocks.videoSynchClock(m_frame.get(), av_q2d(m_stream->time_base), pts);
189                const double frame_delay = m_clocks.videoRefreshSchedule(synched_pts);
190
191                publishFrame(frame_delay);
192            }
193        }
194
195        // Get the next packet
196
197        pts = 0;
198
199        if (packet.valid())
200            packet.clear();
201
202        bool is_empty = true;
203        packet = m_packets.timedPop(is_empty, 10);
204
205        if (! is_empty)
206        {
207            if (packet.type == FFmpegPacket::PACKET_DATA)
208            {
209                m_bytes_remaining = packet.packet.size;
210                m_packet_data = packet.packet.data;
211            }
212            else if (packet.type == FFmpegPacket::PACKET_FLUSH)
213            {
214                avcodec_flush_buffers(m_context);
215                m_clocks.rewindVideo();
216            }
217        }
218    }
219}
220
221
222
223void FFmpegDecoderVideo::findAspectRatio()
224{
[9910]225    float ratio = 0.0f;
[9816]226
227    if (m_context->sample_aspect_ratio.num != 0)
[9910]228        ratio = float(av_q2d(m_context->sample_aspect_ratio));
[9816]229
[9910]230    if (ratio <= 0.0f)
231        ratio = 1.0f;
[9816]232
[9910]233    m_pixel_aspect_ratio = ratio;
[9816]234}
235
[9960]236int FFmpegDecoderVideo::convert(AVPicture *dst, int dst_pix_fmt, AVPicture *src,
[9854]237            int src_pix_fmt, int src_width, int src_height)
238{
[9860]239    osg::Timer_t startTick = osg::Timer::instance()->tick();
[9854]240#ifdef USE_SWSCALE
241    if (m_swscale_ctx==0)
242    {
[9965]243        m_swscale_ctx = sws_getContext(src_width, src_height, (PixelFormat) src_pix_fmt,
244                                      src_width, src_height, (PixelFormat) dst_pix_fmt,
[9860]245                                      /*SWS_BILINEAR*/ SWS_BICUBIC, NULL, NULL, NULL);
[9854]246    }
247   
[9857]248
[9912]249    osg::notify(osg::INFO)<<"Using sws_scale ";
[9960]250   
[9860]251    int result =  sws_scale(m_swscale_ctx,
[9960]252                            (src->data), (src->linesize), 0, src_height,
[9933]253                            (dst->data), (dst->linesize));
[9854]254#else
[9857]255
[9912]256    osg::notify(osg::INFO)<<"Using img_convert ";
[9860]257
258    int result = img_convert(dst, dst_pix_fmt, src,
259                             src_pix_fmt, src_width, src_height);
260
[9854]261#endif
[9860]262    osg::Timer_t endTick = osg::Timer::instance()->tick();
[9912]263    osg::notify(osg::INFO)<<" time = "<<osg::Timer::instance()->delta_m(startTick,endTick)<<"ms"<<std::endl;
[9860]264
265    return result;
[9854]266}
[9816]267
268
269void FFmpegDecoderVideo::publishFrame(const double delay)
270{
271    // If no publishing function, just ignore the frame
272    if (m_publish_func == 0)
273        return;
274
275    // If the display delay is too small, we better skip the frame.
276    if (delay < -0.010)
277        return;
[9860]278       
[9960]279    AVPicture * const src = (AVPicture *) m_frame.get();
[9816]280    AVPicture * const dst = (AVPicture *) m_frame_rgba.get();
281
[9860]282    // Assign appropriate parts of the buffer to image planes in m_frame_rgba
283    avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[m_writeBuffer])[0], PIX_FMT_RGB32, width(), height());
284
[9816]285    // Convert YUVA420p (i.e. YUV420p plus alpha channel) using our own routine
286
287    if (m_context->pix_fmt == PIX_FMT_YUVA420P)
288        yuva420pToRgba(dst, src, width(), height());
289    else
[9854]290        convert(dst, PIX_FMT_RGB32, src, m_context->pix_fmt, width(), height());
[9816]291
292    // Wait 'delay' seconds before publishing the picture.
293    int i_delay = static_cast<int>(delay * 1000000 + 0.5);
294
295    while (i_delay > 1000)
296    {
297        // Avoid infinite/very long loops
298        if (m_exit)
299            return;
300
301        const int micro_delay = (std::min)(1000000, i_delay);
302
303        OpenThreads::Thread::microSleep(micro_delay);
304
305        i_delay -= micro_delay;
306    }
307
[9860]308    m_writeBuffer = 1-m_writeBuffer;
309
[9816]310    m_publish_func(* this, m_user_data);
311}
312
313
314
[9960]315void FFmpegDecoderVideo::yuva420pToRgba(AVPicture * const dst, AVPicture * const src, int width, int height)
[9816]316{
[9854]317    convert(dst, PIX_FMT_RGB32, src, m_context->pix_fmt, width, height);
[9816]318
319    const size_t bpp = 4;
320
321    uint8_t * a_dst = dst->data[0] + 3;
322
323    for (int h = 0; h < height; ++h) {
324
325        const uint8_t * a_src = src->data[3] + h * src->linesize[3];
326
327        for (int w = 0; w < width; ++w) {
328            *a_dst = *a_src;
329            a_dst += bpp;
330            a_src += 1;
331        }
332    }
333}
334
335
336
337int FFmpegDecoderVideo::getBuffer(AVCodecContext * const context, AVFrame * const picture)
338{
339    const FFmpegDecoderVideo * const this_ = reinterpret_cast<const FFmpegDecoderVideo*>(context->opaque);
340
341    const int result = avcodec_default_get_buffer(context, picture);
342    int64_t * p_pts = reinterpret_cast<int64_t*>( av_malloc(sizeof(int64_t)) );
343
344    *p_pts = this_->m_packet_pts;
345    picture->opaque = p_pts;
346
347    return result;
348}
349
350
351
352void FFmpegDecoderVideo::releaseBuffer(AVCodecContext * const context, AVFrame * const picture)
353{
354    if (picture != 0)
355        av_freep(&picture->opaque);
356
357    avcodec_default_release_buffer(context, picture);
358}
359
360
361
362} // namespace osgFFmpeg
Note: See TracBrowser for help on using the browser.