Try to improve refetching of last frame and seek backwards.
[dcpomatic.git] / src / lib / ffmpeg_decoder.cc
1 /*
2     Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
3
4     This program is free software; you can redistribute it and/or modify
5     it under the terms of the GNU General Public License as published by
6     the Free Software Foundation; either version 2 of the License, or
7     (at your option) any later version.
8
9     This program is distributed in the hope that it will be useful,
10     but WITHOUT ANY WARRANTY; without even the implied warranty of
11     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12     GNU General Public License for more details.
13
14     You should have received a copy of the GNU General Public License
15     along with this program; if not, write to the Free Software
16     Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17
18 */
19
20 /** @file  src/ffmpeg_decoder.cc
21  *  @brief A decoder using FFmpeg to decode content.
22  */
23
24 #include <stdexcept>
25 #include <vector>
26 #include <sstream>
27 #include <iomanip>
28 #include <iostream>
29 #include <stdint.h>
30 #include <boost/lexical_cast.hpp>
31 #include <sndfile.h>
32 extern "C" {
33 #include <libavcodec/avcodec.h>
34 #include <libavformat/avformat.h>
35 }
36 #include "film.h"
37 #include "filter.h"
38 #include "exceptions.h"
39 #include "image.h"
40 #include "util.h"
41 #include "log.h"
42 #include "ffmpeg_decoder.h"
43 #include "filter_graph.h"
44 #include "audio_buffers.h"
45
46 #include "i18n.h"
47
48 using std::cout;
49 using std::string;
50 using std::vector;
51 using std::stringstream;
52 using std::list;
53 using std::min;
54 using boost::shared_ptr;
55 using boost::optional;
56 using boost::dynamic_pointer_cast;
57 using libdcp::Size;
58
59 FFmpegDecoder::FFmpegDecoder (shared_ptr<const Film> f, shared_ptr<const FFmpegContent> c, bool video, bool audio)
60         : Decoder (f)
61         , VideoDecoder (f)
62         , AudioDecoder (f)
63         , SubtitleDecoder (f)
64         , FFmpeg (c)
65         , _subtitle_codec_context (0)
66         , _subtitle_codec (0)
67         , _decode_video (video)
68         , _decode_audio (audio)
69         , _pts_offset (0)
70         , _just_sought (false)
71 {
72         setup_subtitle ();
73
74         if (video && audio && c->audio_stream() && c->first_video() && c->audio_stream()->first_audio) {
75                 _pts_offset = compute_pts_offset (c->first_video().get(), c->audio_stream()->first_audio.get(), c->video_frame_rate());
76         }
77 }
78
79 double
80 FFmpegDecoder::compute_pts_offset (double first_video, double first_audio, float video_frame_rate)
81 {
82         double const old_first_video = first_video;
83         
84         /* Round the first video to a frame boundary */
85         if (fabs (rint (first_video * video_frame_rate) - first_video * video_frame_rate) > 1e-6) {
86                 first_video = ceil (first_video * video_frame_rate) / video_frame_rate;
87         }
88
89         /* Compute the required offset (also removing any common start delay) */
90         return first_video - old_first_video - min (first_video, first_audio);
91 }
92
93 FFmpegDecoder::~FFmpegDecoder ()
94 {
95         boost::mutex::scoped_lock lm (_mutex);
96
97         if (_subtitle_codec_context) {
98                 avcodec_close (_subtitle_codec_context);
99         }
100 }       
101
102 void
103 FFmpegDecoder::pass ()
104 {
105         int r = av_read_frame (_format_context, &_packet);
106
107         if (r < 0) {
108                 if (r != AVERROR_EOF) {
109                         /* Maybe we should fail here, but for now we'll just finish off instead */
110                         char buf[256];
111                         av_strerror (r, buf, sizeof(buf));
112                         shared_ptr<const Film> film = _film.lock ();
113                         assert (film);
114                         film->log()->log (String::compose (N_("error on av_read_frame (%1) (%2)"), buf, r));
115                 }
116
117                 /* Get any remaining frames */
118                 
119                 _packet.data = 0;
120                 _packet.size = 0;
121                 
122                 /* XXX: should we reset _packet.data and size after each *_decode_* call? */
123                 
124                 if (_decode_video) {
125                         while (decode_video_packet ()) {}
126                 }
127
128                 if (_ffmpeg_content->audio_stream() && _decode_audio) {
129                         decode_audio_packet ();
130                 }
131
132                 /* Stop us being asked for any more data */
133                 _video_position = _ffmpeg_content->video_length ();
134                 _audio_position = _ffmpeg_content->audio_length ();
135                 return;
136         }
137
138         avcodec_get_frame_defaults (_frame);
139
140         if (_packet.stream_index == _video_stream && _decode_video) {
141                 decode_video_packet ();
142         } else if (_ffmpeg_content->audio_stream() && _packet.stream_index == _ffmpeg_content->audio_stream()->id && _decode_audio) {
143                 decode_audio_packet ();
144         } else if (_ffmpeg_content->subtitle_stream() && _packet.stream_index == _ffmpeg_content->subtitle_stream()->id) {
145                 decode_subtitle_packet ();
146         }
147
148         av_free_packet (&_packet);
149 }
150
151 /** @param data pointer to array of pointers to buffers.
152  *  Only the first buffer will be used for non-planar data, otherwise there will be one per channel.
153  */
154 shared_ptr<AudioBuffers>
155 FFmpegDecoder::deinterleave_audio (uint8_t** data, int size)
156 {
157         assert (_ffmpeg_content->audio_channels());
158         assert (bytes_per_audio_sample());
159
160         /* Deinterleave and convert to float */
161
162         assert ((size % (bytes_per_audio_sample() * _ffmpeg_content->audio_channels())) == 0);
163
164         int const total_samples = size / bytes_per_audio_sample();
165         int const frames = total_samples / _ffmpeg_content->audio_channels();
166         shared_ptr<AudioBuffers> audio (new AudioBuffers (_ffmpeg_content->audio_channels(), frames));
167
168         switch (audio_sample_format()) {
169         case AV_SAMPLE_FMT_S16:
170         {
171                 int16_t* p = reinterpret_cast<int16_t *> (data[0]);
172                 int sample = 0;
173                 int channel = 0;
174                 for (int i = 0; i < total_samples; ++i) {
175                         audio->data(channel)[sample] = float(*p++) / (1 << 15);
176
177                         ++channel;
178                         if (channel == _ffmpeg_content->audio_channels()) {
179                                 channel = 0;
180                                 ++sample;
181                         }
182                 }
183         }
184         break;
185
186         case AV_SAMPLE_FMT_S16P:
187         {
188                 int16_t** p = reinterpret_cast<int16_t **> (data);
189                 for (int i = 0; i < _ffmpeg_content->audio_channels(); ++i) {
190                         for (int j = 0; j < frames; ++j) {
191                                 audio->data(i)[j] = static_cast<float>(p[i][j]) / (1 << 15);
192                         }
193                 }
194         }
195         break;
196         
197         case AV_SAMPLE_FMT_S32:
198         {
199                 int32_t* p = reinterpret_cast<int32_t *> (data[0]);
200                 int sample = 0;
201                 int channel = 0;
202                 for (int i = 0; i < total_samples; ++i) {
203                         audio->data(channel)[sample] = static_cast<float>(*p++) / (1 << 31);
204
205                         ++channel;
206                         if (channel == _ffmpeg_content->audio_channels()) {
207                                 channel = 0;
208                                 ++sample;
209                         }
210                 }
211         }
212         break;
213
214         case AV_SAMPLE_FMT_FLT:
215         {
216                 float* p = reinterpret_cast<float*> (data[0]);
217                 int sample = 0;
218                 int channel = 0;
219                 for (int i = 0; i < total_samples; ++i) {
220                         audio->data(channel)[sample] = *p++;
221
222                         ++channel;
223                         if (channel == _ffmpeg_content->audio_channels()) {
224                                 channel = 0;
225                                 ++sample;
226                         }
227                 }
228         }
229         break;
230                 
231         case AV_SAMPLE_FMT_FLTP:
232         {
233                 float** p = reinterpret_cast<float**> (data);
234                 for (int i = 0; i < _ffmpeg_content->audio_channels(); ++i) {
235                         memcpy (audio->data(i), p[i], frames * sizeof(float));
236                 }
237         }
238         break;
239
240         default:
241                 throw DecodeError (String::compose (_("Unrecognised audio sample format (%1)"), static_cast<int> (audio_sample_format())));
242         }
243
244         return audio;
245 }
246
247 AVSampleFormat
248 FFmpegDecoder::audio_sample_format () const
249 {
250         if (!_ffmpeg_content->audio_stream()) {
251                 return (AVSampleFormat) 0;
252         }
253         
254         return audio_codec_context()->sample_fmt;
255 }
256
257 int
258 FFmpegDecoder::bytes_per_audio_sample () const
259 {
260         return av_get_bytes_per_sample (audio_sample_format ());
261 }
262
263 void
264 FFmpegDecoder::seek (VideoContent::Frame frame, bool accurate)
265 {
266         double const time_base = av_q2d (_format_context->streams[_video_stream]->time_base);
267
268         /* If we are doing an accurate seek, our initial shot will be 5 frames (5 being
269            a number plucked from the air) earlier than we want to end up.  The loop below
270            will hopefully then step through to where we want to be.
271         */
272         int initial = frame;
273         if (accurate) {
274                 initial -= 5;
275         }
276
277         /* Initial seek time in the stream's timebase */
278         int64_t const initial_vt = initial / (_ffmpeg_content->video_frame_rate() * time_base);
279         /* Wanted final seek time in the stream's timebase */
280         int64_t const final_vt = frame / (_ffmpeg_content->video_frame_rate() * time_base);
281         
282         av_seek_frame (_format_context, _video_stream, initial_vt, AVSEEK_FLAG_BACKWARD);
283
284         avcodec_flush_buffers (video_codec_context());
285         if (_subtitle_codec_context) {
286                 avcodec_flush_buffers (_subtitle_codec_context);
287         }
288
289         _just_sought = true;
290
291         if (frame == 0) {
292                 /* We're already there; from here on we can only seek non-zero amounts */
293                 return;
294         }
295
296         if (accurate) {
297                 while (1) {
298                         int r = av_read_frame (_format_context, &_packet);
299                         if (r < 0) {
300                                 return;
301                         }
302                         
303                         avcodec_get_frame_defaults (_frame);
304                         
305                         if (_packet.stream_index == _video_stream) {
306                                 int finished = 0;
307                                 int const r = avcodec_decode_video2 (video_codec_context(), _frame, &finished, &_packet);
308                                 if (r >= 0 && finished) {
309                                         int64_t const bet = av_frame_get_best_effort_timestamp (_frame);
310                                         if (bet >= final_vt) {
311                                                 _video_position = rint (
312                                                         (bet * time_base + _pts_offset) * _ffmpeg_content->video_frame_rate()
313                                                         );
314                                                 av_free_packet (&_packet);
315                                                 break;
316                                         }
317                                 }
318                         }
319                         
320                         av_free_packet (&_packet);
321                 }
322         }
323 }
324
325 void
326 FFmpegDecoder::decode_audio_packet ()
327 {
328         /* Audio packets can contain multiple frames, so we may have to call avcodec_decode_audio4
329            several times.
330         */
331         
332         AVPacket copy_packet = _packet;
333
334         while (copy_packet.size > 0) {
335
336                 int frame_finished;
337                 int const decode_result = avcodec_decode_audio4 (audio_codec_context(), _frame, &frame_finished, &copy_packet);
338                 if (decode_result >= 0) {
339                         if (frame_finished) {
340
341                                 if (_audio_position == 0) {
342                                         /* Where we are in the source, in seconds */
343                                         double const pts = av_q2d (_format_context->streams[copy_packet.stream_index]->time_base)
344                                                 * av_frame_get_best_effort_timestamp(_frame) - _pts_offset;
345
346                                         if (pts > 0) {
347                                                 /* Emit some silence */
348                                                 shared_ptr<AudioBuffers> silence (
349                                                         new AudioBuffers (
350                                                                 _ffmpeg_content->audio_channels(),
351                                                                 pts * _ffmpeg_content->content_audio_frame_rate()
352                                                                 )
353                                                         );
354                                                 
355                                                 silence->make_silent ();
356                                                 audio (silence, _audio_position);
357                                         }
358                                 }
359                                         
360                                 copy_packet.data += decode_result;
361                                 copy_packet.size -= decode_result;
362                         }
363                 }
364         }
365 }
366
367 bool
368 FFmpegDecoder::decode_video_packet ()
369 {
370         int frame_finished;
371         if (avcodec_decode_video2 (video_codec_context(), _frame, &frame_finished, &_packet) < 0 || !frame_finished) {
372                 return false;
373         }
374
375         boost::mutex::scoped_lock lm (_filter_graphs_mutex);
376
377         shared_ptr<FilterGraph> graph;
378         
379         list<shared_ptr<FilterGraph> >::iterator i = _filter_graphs.begin();
380         while (i != _filter_graphs.end() && !(*i)->can_process (libdcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) {
381                 ++i;
382         }
383
384         if (i == _filter_graphs.end ()) {
385                 shared_ptr<const Film> film = _film.lock ();
386                 assert (film);
387
388                 graph.reset (new FilterGraph (_ffmpeg_content, libdcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format));
389                 _filter_graphs.push_back (graph);
390
391                 film->log()->log (String::compose (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format));
392         } else {
393                 graph = *i;
394         }
395
396         list<shared_ptr<Image> > images = graph->process (_frame);
397
398         string post_process = Filter::ffmpeg_strings (_ffmpeg_content->filters()).second;
399         
400         for (list<shared_ptr<Image> >::iterator i = images.begin(); i != images.end(); ++i) {
401
402                 shared_ptr<Image> image = *i;
403                 if (!post_process.empty ()) {
404                         image = image->post_process (post_process, true);
405                 }
406                 
407                 int64_t const bet = av_frame_get_best_effort_timestamp (_frame);
408                 if (bet != AV_NOPTS_VALUE) {
409
410                         double const pts = bet * av_q2d (_format_context->streams[_video_stream]->time_base) - _pts_offset;
411
412                         if (_just_sought) {
413                                 /* We just did a seek, so disable any attempts to correct for where we
414                                    are / should be.
415                                 */
416                                 _video_position = rint (pts * _ffmpeg_content->video_frame_rate ());
417                                 _just_sought = false;
418                         }
419
420                         double const next = _video_position / _ffmpeg_content->video_frame_rate();
421                         double const one_frame = 1 / _ffmpeg_content->video_frame_rate ();
422                         double delta = pts - next;
423
424                         while (delta > one_frame) {
425                                 /* This PTS is more than one frame forward in time of where we think we should be; emit
426                                    a black frame.
427                                 */
428                                 boost::shared_ptr<Image> black (
429                                         new Image (
430                                                 static_cast<AVPixelFormat> (_frame->format),
431                                                 libdcp::Size (video_codec_context()->width, video_codec_context()->height),
432                                                 true
433                                                 )
434                                         );
435                                 
436                                 black->make_black ();
437                                 video (image, false, _video_position);
438                                 delta -= one_frame;
439                         }
440
441                         if (delta > -one_frame) {
442                                 /* This PTS is within a frame of being right; emit this (otherwise it will be dropped) */
443                                 video (image, false, _video_position);
444                         }
445                                 
446                 } else {
447                         shared_ptr<const Film> film = _film.lock ();
448                         assert (film);
449                         film->log()->log ("Dropping frame without PTS");
450                 }
451         }
452
453         return true;
454 }
455
456         
457 void
458 FFmpegDecoder::setup_subtitle ()
459 {
460         boost::mutex::scoped_lock lm (_mutex);
461         
462         if (!_ffmpeg_content->subtitle_stream() || _ffmpeg_content->subtitle_stream()->id >= int (_format_context->nb_streams)) {
463                 return;
464         }
465
466         _subtitle_codec_context = _format_context->streams[_ffmpeg_content->subtitle_stream()->id]->codec;
467         _subtitle_codec = avcodec_find_decoder (_subtitle_codec_context->codec_id);
468
469         if (_subtitle_codec == 0) {
470                 throw DecodeError (_("could not find subtitle decoder"));
471         }
472         
473         if (avcodec_open2 (_subtitle_codec_context, _subtitle_codec, 0) < 0) {
474                 throw DecodeError (N_("could not open subtitle decoder"));
475         }
476 }
477
478 bool
479 FFmpegDecoder::done () const
480 {
481         bool const vd = !_decode_video || (_video_position >= _ffmpeg_content->video_length());
482         bool const ad = !_decode_audio || !_ffmpeg_content->audio_stream() || (_audio_position >= _ffmpeg_content->audio_length());
483         return vd && ad;
484 }
485         
486 void
487 FFmpegDecoder::decode_subtitle_packet ()
488 {
489         int got_subtitle;
490         AVSubtitle sub;
491         if (avcodec_decode_subtitle2 (_subtitle_codec_context, &sub, &got_subtitle, &_packet) < 0 || !got_subtitle) {
492                 return;
493         }
494
495         /* Sometimes we get an empty AVSubtitle, which is used by some codecs to
496            indicate that the previous subtitle should stop.
497         */
498         if (sub.num_rects <= 0) {
499                 subtitle (shared_ptr<Image> (), dcpomatic::Rect<double> (), 0, 0);
500                 return;
501         } else if (sub.num_rects > 1) {
502                 throw DecodeError (_("multi-part subtitles not yet supported"));
503         }
504                 
505         /* Subtitle PTS in seconds (within the source, not taking into account any of the
506            source that we may have chopped off for the DCP)
507         */
508         double const packet_time = static_cast<double> (sub.pts) / AV_TIME_BASE;
509         
510         /* hence start time for this sub */
511         Time const from = (packet_time + (double (sub.start_display_time) / 1e3)) * TIME_HZ;
512         Time const to = (packet_time + (double (sub.end_display_time) / 1e3)) * TIME_HZ;
513
514         AVSubtitleRect const * rect = sub.rects[0];
515
516         if (rect->type != SUBTITLE_BITMAP) {
517                 throw DecodeError (_("non-bitmap subtitles not yet supported"));
518         }
519         
520         shared_ptr<Image> image (new Image (PIX_FMT_RGBA, libdcp::Size (rect->w, rect->h), true));
521
522         /* Start of the first line in the subtitle */
523         uint8_t* sub_p = rect->pict.data[0];
524         /* sub_p looks up into a RGB palette which is here */
525         uint32_t const * palette = (uint32_t *) rect->pict.data[1];
526         /* Start of the output data */
527         uint32_t* out_p = (uint32_t *) image->data()[0];
528         
529         for (int y = 0; y < rect->h; ++y) {
530                 uint8_t* sub_line_p = sub_p;
531                 uint32_t* out_line_p = out_p;
532                 for (int x = 0; x < rect->w; ++x) {
533                         *out_line_p++ = palette[*sub_line_p++];
534                 }
535                 sub_p += rect->pict.linesize[0];
536                 out_p += image->stride()[0] / sizeof (uint32_t);
537         }
538
539         libdcp::Size const vs = _ffmpeg_content->video_size ();
540
541         subtitle (
542                 image,
543                 dcpomatic::Rect<double> (
544                         static_cast<double> (rect->x) / vs.width,
545                         static_cast<double> (rect->y) / vs.height,
546                         static_cast<double> (rect->w) / vs.width,
547                         static_cast<double> (rect->h) / vs.height
548                         ),
549                 from,
550                 to
551                 );
552                           
553         
554         avsubtitle_free (&sub);
555 }