root/OpenSceneGraph/trunk/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp @ 9837

Revision 9837, 7.8 kB (checked in by robert, 6 years ago)

Added extern for img_convert to get round disappeance of the declaration from headers.

RevLine 
[9816]1#include "FFmpegDecoderVideo.hpp"
2
3#include <osg/Notify>
4
5#include <stdexcept>
6#include <string.h>
7
[9837]8extern "C" 
9{
10    int img_convert(AVPicture *dst, int dst_pix_fmt, const AVPicture *src,
11                int src_pix_fmt, int src_width, int src_height);
[9816]12
[9837]13};
[9816]14
15namespace osgFFmpeg {
16
17
18
19FFmpegDecoderVideo::FFmpegDecoderVideo(PacketQueue & packets, FFmpegClocks & clocks) :
20    m_packets(packets),
21    m_clocks(clocks),
22    m_stream(0),
23    m_context(0),
24    m_codec(0),
25    m_packet_data(0),
26    m_bytes_remaining(0),
27    m_packet_pts(AV_NOPTS_VALUE),
28    m_user_data(0),
29    m_publish_func(0),
30    m_exit(false)
31{
32
33}
34
35
36
37FFmpegDecoderVideo::~FFmpegDecoderVideo()
38{
39    if (isRunning())
40    {
41        m_exit = true;
42        join();
43    }
44}
45
46
47
48void FFmpegDecoderVideo::open(AVStream * const stream)
49{
50    m_stream = stream;
51    m_context = stream->codec;
52
53    // Trust the video size given at this point
54    // (avcodec_open seems to sometimes return a 0x0 size)
55    m_width = m_context->width;
56    m_height = m_context->height;
57    findAspectRatio();
58
59    // Find out whether we support Alpha channel
60    m_alpha_channel = (m_context->pix_fmt == PIX_FMT_YUVA420P);
61
62    // Find out the framerate
63    m_frame_rate = av_q2d(stream->r_frame_rate);
64
65    // Find the decoder for the video stream
66    m_codec = avcodec_find_decoder(m_context->codec_id);
67
68    if (m_codec == 0)
69        throw std::runtime_error("avcodec_find_decoder() failed");
70
71    // Inform the codec that we can handle truncated bitstreams
72    //if (p_codec->capabilities & CODEC_CAP_TRUNCATED)
73    //    m_context->flags |= CODEC_FLAG_TRUNCATED;
74
75    // Open codec
76    if (avcodec_open(m_context, m_codec) < 0)
77        throw std::runtime_error("avcodec_open() failed");
78
79    // Allocate video frame
[9826]80    m_frame.reset(avcodec_alloc_frame());
[9816]81
82    // Allocate converted RGB frame
[9826]83    m_frame_rgba.reset(avcodec_alloc_frame());
[9816]84    m_buffer_rgba.resize(avpicture_get_size(PIX_FMT_RGB32, width(), height()));
85    m_buffer_rgba_public.resize(m_buffer_rgba.size());
86
87    // Assign appropriate parts of the buffer to image planes in m_frame_rgba
88    avpicture_fill((AVPicture *) m_frame_rgba.get(), &m_buffer_rgba[0], PIX_FMT_RGB32, width(), height());
89
90    // Override get_buffer()/release_buffer() from codec context in order to retrieve the PTS of each frame.
91    m_context->opaque = this;
92    m_context->get_buffer = getBuffer;
93    m_context->release_buffer = releaseBuffer;
94}
95
96
97
98void FFmpegDecoderVideo::run()
99{
100    try
101    {
102        decodeLoop();
103    }
104
105    catch (const std::exception & error)
106    {
107        osg::notify(osg::WARN) << "FFmpegDecoderVideo::run : " << error.what() << std::endl;
108    }
109
110    catch (...)
111    {
112        osg::notify(osg::WARN) << "FFmpegDecoderVideo::run : unhandled exception" << std::endl;
113    }
114}
115
116
117
118void FFmpegDecoderVideo::decodeLoop()
119{
120    FFmpegPacket packet;
121    double pts;
122
123    while (! m_exit)
124    {
125        // Work on the current packet until we have decoded all of it
126
127        while (m_bytes_remaining > 0)
128        {
129            // Save global PTS to be stored in m_frame via getBuffer()
130
131            m_packet_pts = packet.packet.pts;
132
133            // Decode video frame
134
135            int frame_finished = 0;
136
137            const int bytes_decoded = avcodec_decode_video(m_context, m_frame.get(), &frame_finished, m_packet_data, m_bytes_remaining);
138
139            if (bytes_decoded < 0)
140                throw std::runtime_error("avcodec_decode_video failed()");
141
142            m_bytes_remaining -= bytes_decoded;
143            m_packet_data += bytes_decoded;
144
145            // Find out the frame pts
146
147            if (packet.packet.dts == AV_NOPTS_VALUE && m_frame->opaque != 0 && *reinterpret_cast<const int64_t*>(m_frame->opaque) != AV_NOPTS_VALUE)
148            {
149                pts = *reinterpret_cast<const int64_t*>(m_frame->opaque);
150            }
151            else if (packet.packet.dts != AV_NOPTS_VALUE)
152            {
153                pts = packet.packet.dts;
154            }
155            else
156            {
157                pts = 0;
158            }
159
160            pts *= av_q2d(m_stream->time_base);
161
162            // Publish the frame if we have decoded a complete frame
163            if (frame_finished)
164            {
165                const double synched_pts = m_clocks.videoSynchClock(m_frame.get(), av_q2d(m_stream->time_base), pts);
166                const double frame_delay = m_clocks.videoRefreshSchedule(synched_pts);
167
168                publishFrame(frame_delay);
169            }
170        }
171
172        // Get the next packet
173
174        pts = 0;
175
176        if (packet.valid())
177            packet.clear();
178
179        bool is_empty = true;
180        packet = m_packets.timedPop(is_empty, 10);
181
182        if (! is_empty)
183        {
184            if (packet.type == FFmpegPacket::PACKET_DATA)
185            {
186                m_bytes_remaining = packet.packet.size;
187                m_packet_data = packet.packet.data;
188            }
189            else if (packet.type == FFmpegPacket::PACKET_FLUSH)
190            {
191                avcodec_flush_buffers(m_context);
192                m_clocks.rewindVideo();
193            }
194        }
195    }
196}
197
198
199
200void FFmpegDecoderVideo::findAspectRatio()
201{
202    double ratio = 0.0;
203
204    if (m_context->sample_aspect_ratio.num != 0)
205        ratio = (av_q2d(m_context->sample_aspect_ratio) * m_width) / m_height;
206
207    if (ratio <= 0.0)
208        ratio = double(m_width) / double(m_height);
209
210    m_aspect_ratio = ratio;
211}
212
213
214
215void FFmpegDecoderVideo::publishFrame(const double delay)
216{
217    // If no publishing function, just ignore the frame
218    if (m_publish_func == 0)
219        return;
220
221    // If the display delay is too small, we better skip the frame.
222    if (delay < -0.010)
223        return;
224
225    const AVPicture * const src = (const AVPicture *) m_frame.get();
226    AVPicture * const dst = (AVPicture *) m_frame_rgba.get();
227
228    // Convert YUVA420p (i.e. YUV420p plus alpha channel) using our own routine
229
230    if (m_context->pix_fmt == PIX_FMT_YUVA420P)
231        yuva420pToRgba(dst, src, width(), height());
232    else
233        img_convert(dst, PIX_FMT_RGB32, src, m_context->pix_fmt, width(), height());
234
235    // Flip and swap buffer
236    swapBuffers();
237
238    // Wait 'delay' seconds before publishing the picture.
239    int i_delay = static_cast<int>(delay * 1000000 + 0.5);
240
241    while (i_delay > 1000)
242    {
243        // Avoid infinite/very long loops
244        if (m_exit)
245            return;
246
247        const int micro_delay = (std::min)(1000000, i_delay);
248
249        OpenThreads::Thread::microSleep(micro_delay);
250
251        i_delay -= micro_delay;
252    }
253
254    m_publish_func(* this, m_user_data);
255}
256
257
258
259void FFmpegDecoderVideo::swapBuffers()
260{
261    for (int h = 0; h < height(); ++h)
262        memcpy(&m_buffer_rgba_public[(height() - h - 1) * width() * 4], &m_buffer_rgba[h * width() * 4], width() * 4);
263}
264
265
266
267void FFmpegDecoderVideo::yuva420pToRgba(AVPicture * const dst, const AVPicture * const src, int width, int height)
268{
269    img_convert(dst, PIX_FMT_RGB32, src, m_context->pix_fmt, width, height);
270
271    const size_t bpp = 4;
272
273    uint8_t * a_dst = dst->data[0] + 3;
274
275    for (int h = 0; h < height; ++h) {
276
277        const uint8_t * a_src = src->data[3] + h * src->linesize[3];
278
279        for (int w = 0; w < width; ++w) {
280            *a_dst = *a_src;
281            a_dst += bpp;
282            a_src += 1;
283        }
284    }
285}
286
287
288
289int FFmpegDecoderVideo::getBuffer(AVCodecContext * const context, AVFrame * const picture)
290{
291    const FFmpegDecoderVideo * const this_ = reinterpret_cast<const FFmpegDecoderVideo*>(context->opaque);
292
293    const int result = avcodec_default_get_buffer(context, picture);
294    int64_t * p_pts = reinterpret_cast<int64_t*>( av_malloc(sizeof(int64_t)) );
295
296    *p_pts = this_->m_packet_pts;
297    picture->opaque = p_pts;
298
299    return result;
300}
301
302
303
304void FFmpegDecoderVideo::releaseBuffer(AVCodecContext * const context, AVFrame * const picture)
305{
306    if (picture != 0)
307        av_freep(&picture->opaque);
308
309    avcodec_default_release_buffer(context, picture);
310}
311
312
313
314} // namespace osgFFmpeg
Note: See TracBrowser for help on using the browser.