root/OpenSceneGraph/trunk/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp @ 9816

Revision 9816, 7.6 kB (checked in by robert, 6 years ago)

From Tanguy Fautre (Aris Technologies), ffmpeg plugin

RevLine 
[9816]1
2#include "FFmpegDecoderVideo.hpp"
3
4#include <osg/Notify>
5
6#include <stdexcept>
7#include <string.h>
8
9
10
11namespace osgFFmpeg {
12
13
14
15FFmpegDecoderVideo::FFmpegDecoderVideo(PacketQueue & packets, FFmpegClocks & clocks) :
16    m_packets(packets),
17    m_clocks(clocks),
18    m_stream(0),
19    m_context(0),
20    m_codec(0),
21    m_packet_data(0),
22    m_bytes_remaining(0),
23    m_packet_pts(AV_NOPTS_VALUE),
24    m_user_data(0),
25    m_publish_func(0),
26    m_exit(false)
27{
28
29}
30
31
32
33FFmpegDecoderVideo::~FFmpegDecoderVideo()
34{
35    if (isRunning())
36    {
37        m_exit = true;
38        join();
39    }
40}
41
42
43
44void FFmpegDecoderVideo::open(AVStream * const stream)
45{
46    m_stream = stream;
47    m_context = stream->codec;
48
49    // Trust the video size given at this point
50    // (avcodec_open seems to sometimes return a 0x0 size)
51    m_width = m_context->width;
52    m_height = m_context->height;
53    findAspectRatio();
54
55    // Find out whether we support Alpha channel
56    m_alpha_channel = (m_context->pix_fmt == PIX_FMT_YUVA420P);
57
58    // Find out the framerate
59    m_frame_rate = av_q2d(stream->r_frame_rate);
60
61    // Find the decoder for the video stream
62    m_codec = avcodec_find_decoder(m_context->codec_id);
63
64    if (m_codec == 0)
65        throw std::runtime_error("avcodec_find_decoder() failed");
66
67    // Inform the codec that we can handle truncated bitstreams
68    //if (p_codec->capabilities & CODEC_CAP_TRUNCATED)
69    //    m_context->flags |= CODEC_FLAG_TRUNCATED;
70
71    // Open codec
72    if (avcodec_open(m_context, m_codec) < 0)
73        throw std::runtime_error("avcodec_open() failed");
74
75    // Allocate video frame
76    m_frame.reset(avcodec_alloc_frame(), av_free);
77
78    // Allocate converted RGB frame
79    m_frame_rgba.reset(avcodec_alloc_frame(), av_free);
80    m_buffer_rgba.resize(avpicture_get_size(PIX_FMT_RGB32, width(), height()));
81    m_buffer_rgba_public.resize(m_buffer_rgba.size());
82
83    // Assign appropriate parts of the buffer to image planes in m_frame_rgba
84    avpicture_fill((AVPicture *) m_frame_rgba.get(), &m_buffer_rgba[0], PIX_FMT_RGB32, width(), height());
85
86    // Override get_buffer()/release_buffer() from codec context in order to retrieve the PTS of each frame.
87    m_context->opaque = this;
88    m_context->get_buffer = getBuffer;
89    m_context->release_buffer = releaseBuffer;
90}
91
92
93
94void FFmpegDecoderVideo::run()
95{
96    try
97    {
98        decodeLoop();
99    }
100
101    catch (const std::exception & error)
102    {
103        osg::notify(osg::WARN) << "FFmpegDecoderVideo::run : " << error.what() << std::endl;
104    }
105
106    catch (...)
107    {
108        osg::notify(osg::WARN) << "FFmpegDecoderVideo::run : unhandled exception" << std::endl;
109    }
110}
111
112
113
114void FFmpegDecoderVideo::decodeLoop()
115{
116    FFmpegPacket packet;
117    double pts;
118
119    while (! m_exit)
120    {
121        // Work on the current packet until we have decoded all of it
122
123        while (m_bytes_remaining > 0)
124        {
125            // Save global PTS to be stored in m_frame via getBuffer()
126
127            m_packet_pts = packet.packet.pts;
128
129            // Decode video frame
130
131            int frame_finished = 0;
132
133            const int bytes_decoded = avcodec_decode_video(m_context, m_frame.get(), &frame_finished, m_packet_data, m_bytes_remaining);
134
135            if (bytes_decoded < 0)
136                throw std::runtime_error("avcodec_decode_video failed()");
137
138            m_bytes_remaining -= bytes_decoded;
139            m_packet_data += bytes_decoded;
140
141            // Find out the frame pts
142
143            if (packet.packet.dts == AV_NOPTS_VALUE && m_frame->opaque != 0 && *reinterpret_cast<const int64_t*>(m_frame->opaque) != AV_NOPTS_VALUE)
144            {
145                pts = *reinterpret_cast<const int64_t*>(m_frame->opaque);
146            }
147            else if (packet.packet.dts != AV_NOPTS_VALUE)
148            {
149                pts = packet.packet.dts;
150            }
151            else
152            {
153                pts = 0;
154            }
155
156            pts *= av_q2d(m_stream->time_base);
157
158            // Publish the frame if we have decoded a complete frame
159            if (frame_finished)
160            {
161                const double synched_pts = m_clocks.videoSynchClock(m_frame.get(), av_q2d(m_stream->time_base), pts);
162                const double frame_delay = m_clocks.videoRefreshSchedule(synched_pts);
163
164                publishFrame(frame_delay);
165            }
166        }
167
168        // Get the next packet
169
170        pts = 0;
171
172        if (packet.valid())
173            packet.clear();
174
175        bool is_empty = true;
176        packet = m_packets.timedPop(is_empty, 10);
177
178        if (! is_empty)
179        {
180            if (packet.type == FFmpegPacket::PACKET_DATA)
181            {
182                m_bytes_remaining = packet.packet.size;
183                m_packet_data = packet.packet.data;
184            }
185            else if (packet.type == FFmpegPacket::PACKET_FLUSH)
186            {
187                avcodec_flush_buffers(m_context);
188                m_clocks.rewindVideo();
189            }
190        }
191    }
192}
193
194
195
196void FFmpegDecoderVideo::findAspectRatio()
197{
198    double ratio = 0.0;
199
200    if (m_context->sample_aspect_ratio.num != 0)
201        ratio = (av_q2d(m_context->sample_aspect_ratio) * m_width) / m_height;
202
203    if (ratio <= 0.0)
204        ratio = double(m_width) / double(m_height);
205
206    m_aspect_ratio = ratio;
207}
208
209
210
211void FFmpegDecoderVideo::publishFrame(const double delay)
212{
213    // If no publishing function, just ignore the frame
214    if (m_publish_func == 0)
215        return;
216
217    // If the display delay is too small, we better skip the frame.
218    if (delay < -0.010)
219        return;
220
221    const AVPicture * const src = (const AVPicture *) m_frame.get();
222    AVPicture * const dst = (AVPicture *) m_frame_rgba.get();
223
224    // Convert YUVA420p (i.e. YUV420p plus alpha channel) using our own routine
225
226    if (m_context->pix_fmt == PIX_FMT_YUVA420P)
227        yuva420pToRgba(dst, src, width(), height());
228    else
229        img_convert(dst, PIX_FMT_RGB32, src, m_context->pix_fmt, width(), height());
230
231    // Flip and swap buffer
232    swapBuffers();
233
234    // Wait 'delay' seconds before publishing the picture.
235    int i_delay = static_cast<int>(delay * 1000000 + 0.5);
236
237    while (i_delay > 1000)
238    {
239        // Avoid infinite/very long loops
240        if (m_exit)
241            return;
242
243        const int micro_delay = (std::min)(1000000, i_delay);
244
245        OpenThreads::Thread::microSleep(micro_delay);
246
247        i_delay -= micro_delay;
248    }
249
250    m_publish_func(* this, m_user_data);
251}
252
253
254
255void FFmpegDecoderVideo::swapBuffers()
256{
257    for (int h = 0; h < height(); ++h)
258        memcpy(&m_buffer_rgba_public[(height() - h - 1) * width() * 4], &m_buffer_rgba[h * width() * 4], width() * 4);
259}
260
261
262
263void FFmpegDecoderVideo::yuva420pToRgba(AVPicture * const dst, const AVPicture * const src, int width, int height)
264{
265    img_convert(dst, PIX_FMT_RGB32, src, m_context->pix_fmt, width, height);
266
267    const size_t bpp = 4;
268
269    uint8_t * a_dst = dst->data[0] + 3;
270
271    for (int h = 0; h < height; ++h) {
272
273        const uint8_t * a_src = src->data[3] + h * src->linesize[3];
274
275        for (int w = 0; w < width; ++w) {
276            *a_dst = *a_src;
277            a_dst += bpp;
278            a_src += 1;
279        }
280    }
281}
282
283
284
285int FFmpegDecoderVideo::getBuffer(AVCodecContext * const context, AVFrame * const picture)
286{
287    const FFmpegDecoderVideo * const this_ = reinterpret_cast<const FFmpegDecoderVideo*>(context->opaque);
288
289    const int result = avcodec_default_get_buffer(context, picture);
290    int64_t * p_pts = reinterpret_cast<int64_t*>( av_malloc(sizeof(int64_t)) );
291
292    *p_pts = this_->m_packet_pts;
293    picture->opaque = p_pts;
294
295    return result;
296}
297
298
299
300void FFmpegDecoderVideo::releaseBuffer(AVCodecContext * const context, AVFrame * const picture)
301{
302    if (picture != 0)
303        av_freep(&picture->opaque);
304
305    avcodec_default_release_buffer(context, picture);
306}
307
308
309
310} // namespace osgFFmpeg
Note: See TracBrowser for help on using the browser.