root/OpenSceneGraph/trunk/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp @ 9826

Revision 9826, 7.6 kB (checked in by robert, 6 years ago)

Ported across from using boost pointers, and prepped for integration of audio interface into core OSG

RevLine 
[9816]1#include "FFmpegDecoderVideo.hpp"
2
3#include <osg/Notify>
4
5#include <stdexcept>
6#include <string.h>
7
8
9
10namespace osgFFmpeg {
11
12
13
14FFmpegDecoderVideo::FFmpegDecoderVideo(PacketQueue & packets, FFmpegClocks & clocks) :
15    m_packets(packets),
16    m_clocks(clocks),
17    m_stream(0),
18    m_context(0),
19    m_codec(0),
20    m_packet_data(0),
21    m_bytes_remaining(0),
22    m_packet_pts(AV_NOPTS_VALUE),
23    m_user_data(0),
24    m_publish_func(0),
25    m_exit(false)
26{
27
28}
29
30
31
32FFmpegDecoderVideo::~FFmpegDecoderVideo()
33{
34    if (isRunning())
35    {
36        m_exit = true;
37        join();
38    }
39}
40
41
42
43void FFmpegDecoderVideo::open(AVStream * const stream)
44{
45    m_stream = stream;
46    m_context = stream->codec;
47
48    // Trust the video size given at this point
49    // (avcodec_open seems to sometimes return a 0x0 size)
50    m_width = m_context->width;
51    m_height = m_context->height;
52    findAspectRatio();
53
54    // Find out whether we support Alpha channel
55    m_alpha_channel = (m_context->pix_fmt == PIX_FMT_YUVA420P);
56
57    // Find out the framerate
58    m_frame_rate = av_q2d(stream->r_frame_rate);
59
60    // Find the decoder for the video stream
61    m_codec = avcodec_find_decoder(m_context->codec_id);
62
63    if (m_codec == 0)
64        throw std::runtime_error("avcodec_find_decoder() failed");
65
66    // Inform the codec that we can handle truncated bitstreams
67    //if (p_codec->capabilities & CODEC_CAP_TRUNCATED)
68    //    m_context->flags |= CODEC_FLAG_TRUNCATED;
69
70    // Open codec
71    if (avcodec_open(m_context, m_codec) < 0)
72        throw std::runtime_error("avcodec_open() failed");
73
74    // Allocate video frame
[9826]75    m_frame.reset(avcodec_alloc_frame());
[9816]76
77    // Allocate converted RGB frame
[9826]78    m_frame_rgba.reset(avcodec_alloc_frame());
[9816]79    m_buffer_rgba.resize(avpicture_get_size(PIX_FMT_RGB32, width(), height()));
80    m_buffer_rgba_public.resize(m_buffer_rgba.size());
81
82    // Assign appropriate parts of the buffer to image planes in m_frame_rgba
83    avpicture_fill((AVPicture *) m_frame_rgba.get(), &m_buffer_rgba[0], PIX_FMT_RGB32, width(), height());
84
85    // Override get_buffer()/release_buffer() from codec context in order to retrieve the PTS of each frame.
86    m_context->opaque = this;
87    m_context->get_buffer = getBuffer;
88    m_context->release_buffer = releaseBuffer;
89}
90
91
92
93void FFmpegDecoderVideo::run()
94{
95    try
96    {
97        decodeLoop();
98    }
99
100    catch (const std::exception & error)
101    {
102        osg::notify(osg::WARN) << "FFmpegDecoderVideo::run : " << error.what() << std::endl;
103    }
104
105    catch (...)
106    {
107        osg::notify(osg::WARN) << "FFmpegDecoderVideo::run : unhandled exception" << std::endl;
108    }
109}
110
111
112
113void FFmpegDecoderVideo::decodeLoop()
114{
115    FFmpegPacket packet;
116    double pts;
117
118    while (! m_exit)
119    {
120        // Work on the current packet until we have decoded all of it
121
122        while (m_bytes_remaining > 0)
123        {
124            // Save global PTS to be stored in m_frame via getBuffer()
125
126            m_packet_pts = packet.packet.pts;
127
128            // Decode video frame
129
130            int frame_finished = 0;
131
132            const int bytes_decoded = avcodec_decode_video(m_context, m_frame.get(), &frame_finished, m_packet_data, m_bytes_remaining);
133
134            if (bytes_decoded < 0)
135                throw std::runtime_error("avcodec_decode_video failed()");
136
137            m_bytes_remaining -= bytes_decoded;
138            m_packet_data += bytes_decoded;
139
140            // Find out the frame pts
141
142            if (packet.packet.dts == AV_NOPTS_VALUE && m_frame->opaque != 0 && *reinterpret_cast<const int64_t*>(m_frame->opaque) != AV_NOPTS_VALUE)
143            {
144                pts = *reinterpret_cast<const int64_t*>(m_frame->opaque);
145            }
146            else if (packet.packet.dts != AV_NOPTS_VALUE)
147            {
148                pts = packet.packet.dts;
149            }
150            else
151            {
152                pts = 0;
153            }
154
155            pts *= av_q2d(m_stream->time_base);
156
157            // Publish the frame if we have decoded a complete frame
158            if (frame_finished)
159            {
160                const double synched_pts = m_clocks.videoSynchClock(m_frame.get(), av_q2d(m_stream->time_base), pts);
161                const double frame_delay = m_clocks.videoRefreshSchedule(synched_pts);
162
163                publishFrame(frame_delay);
164            }
165        }
166
167        // Get the next packet
168
169        pts = 0;
170
171        if (packet.valid())
172            packet.clear();
173
174        bool is_empty = true;
175        packet = m_packets.timedPop(is_empty, 10);
176
177        if (! is_empty)
178        {
179            if (packet.type == FFmpegPacket::PACKET_DATA)
180            {
181                m_bytes_remaining = packet.packet.size;
182                m_packet_data = packet.packet.data;
183            }
184            else if (packet.type == FFmpegPacket::PACKET_FLUSH)
185            {
186                avcodec_flush_buffers(m_context);
187                m_clocks.rewindVideo();
188            }
189        }
190    }
191}
192
193
194
195void FFmpegDecoderVideo::findAspectRatio()
196{
197    double ratio = 0.0;
198
199    if (m_context->sample_aspect_ratio.num != 0)
200        ratio = (av_q2d(m_context->sample_aspect_ratio) * m_width) / m_height;
201
202    if (ratio <= 0.0)
203        ratio = double(m_width) / double(m_height);
204
205    m_aspect_ratio = ratio;
206}
207
208
209
210void FFmpegDecoderVideo::publishFrame(const double delay)
211{
212    // If no publishing function, just ignore the frame
213    if (m_publish_func == 0)
214        return;
215
216    // If the display delay is too small, we better skip the frame.
217    if (delay < -0.010)
218        return;
219
220    const AVPicture * const src = (const AVPicture *) m_frame.get();
221    AVPicture * const dst = (AVPicture *) m_frame_rgba.get();
222
223    // Convert YUVA420p (i.e. YUV420p plus alpha channel) using our own routine
224
225    if (m_context->pix_fmt == PIX_FMT_YUVA420P)
226        yuva420pToRgba(dst, src, width(), height());
227    else
228        img_convert(dst, PIX_FMT_RGB32, src, m_context->pix_fmt, width(), height());
229
230    // Flip and swap buffer
231    swapBuffers();
232
233    // Wait 'delay' seconds before publishing the picture.
234    int i_delay = static_cast<int>(delay * 1000000 + 0.5);
235
236    while (i_delay > 1000)
237    {
238        // Avoid infinite/very long loops
239        if (m_exit)
240            return;
241
242        const int micro_delay = (std::min)(1000000, i_delay);
243
244        OpenThreads::Thread::microSleep(micro_delay);
245
246        i_delay -= micro_delay;
247    }
248
249    m_publish_func(* this, m_user_data);
250}
251
252
253
254void FFmpegDecoderVideo::swapBuffers()
255{
256    for (int h = 0; h < height(); ++h)
257        memcpy(&m_buffer_rgba_public[(height() - h - 1) * width() * 4], &m_buffer_rgba[h * width() * 4], width() * 4);
258}
259
260
261
262void FFmpegDecoderVideo::yuva420pToRgba(AVPicture * const dst, const AVPicture * const src, int width, int height)
263{
264    img_convert(dst, PIX_FMT_RGB32, src, m_context->pix_fmt, width, height);
265
266    const size_t bpp = 4;
267
268    uint8_t * a_dst = dst->data[0] + 3;
269
270    for (int h = 0; h < height; ++h) {
271
272        const uint8_t * a_src = src->data[3] + h * src->linesize[3];
273
274        for (int w = 0; w < width; ++w) {
275            *a_dst = *a_src;
276            a_dst += bpp;
277            a_src += 1;
278        }
279    }
280}
281
282
283
284int FFmpegDecoderVideo::getBuffer(AVCodecContext * const context, AVFrame * const picture)
285{
286    const FFmpegDecoderVideo * const this_ = reinterpret_cast<const FFmpegDecoderVideo*>(context->opaque);
287
288    const int result = avcodec_default_get_buffer(context, picture);
289    int64_t * p_pts = reinterpret_cast<int64_t*>( av_malloc(sizeof(int64_t)) );
290
291    *p_pts = this_->m_packet_pts;
292    picture->opaque = p_pts;
293
294    return result;
295}
296
297
298
299void FFmpegDecoderVideo::releaseBuffer(AVCodecContext * const context, AVFrame * const picture)
300{
301    if (picture != 0)
302        av_freep(&picture->opaque);
303
304    avcodec_default_release_buffer(context, picture);
305}
306
307
308
309} // namespace osgFFmpeg
Note: See TracBrowser for help on using the browser.