Partial work on using a no-video FFmpeg file.
[dcpomatic.git] / src / lib / ffmpeg_decoder.cc
1 /*
2     Copyright (C) 2012-2016 Carl Hetherington <cth@carlh.net>
3
4     This program is free software; you can redistribute it and/or modify
5     it under the terms of the GNU General Public License as published by
6     the Free Software Foundation; either version 2 of the License, or
7     (at your option) any later version.
8
9     This program is distributed in the hope that it will be useful,
10     but WITHOUT ANY WARRANTY; without even the implied warranty of
11     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12     GNU General Public License for more details.
13
14     You should have received a copy of the GNU General Public License
15     along with this program; if not, write to the Free Software
16     Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17
18 */
19
20 /** @file  src/ffmpeg_decoder.cc
21  *  @brief A decoder using FFmpeg to decode content.
22  */
23
24 #include "filter.h"
25 #include "exceptions.h"
26 #include "image.h"
27 #include "util.h"
28 #include "log.h"
29 #include "ffmpeg_decoder.h"
30 #include "ffmpeg_audio_stream.h"
31 #include "ffmpeg_subtitle_stream.h"
32 #include "video_filter_graph.h"
33 #include "audio_buffers.h"
34 #include "ffmpeg_content.h"
35 #include "raw_image_proxy.h"
36 #include "film.h"
37 #include "md5_digester.h"
38 #include "compose.hpp"
39 #include <dcp/subtitle_string.h>
40 #include <sub/ssa_reader.h>
41 #include <sub/subtitle.h>
42 #include <sub/collect.h>
43 extern "C" {
44 #include <libavcodec/avcodec.h>
45 #include <libavformat/avformat.h>
46 }
47 #include <boost/foreach.hpp>
48 #include <boost/algorithm/string.hpp>
49 #include <vector>
50 #include <iomanip>
51 #include <iostream>
52 #include <stdint.h>
53
54 #include "i18n.h"
55
56 #define LOG_GENERAL(...) _log->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
57 #define LOG_ERROR(...) _log->log (String::compose (__VA_ARGS__), LogEntry::TYPE_ERROR);
58 #define LOG_WARNING_NC(...) _log->log (__VA_ARGS__, LogEntry::TYPE_WARNING);
59 #define LOG_WARNING(...) _log->log (String::compose (__VA_ARGS__), LogEntry::TYPE_WARNING);
60
61 using std::cout;
62 using std::string;
63 using std::vector;
64 using std::list;
65 using std::min;
66 using std::pair;
67 using std::max;
68 using std::map;
69 using boost::shared_ptr;
70 using boost::is_any_of;
71 using boost::split;
72 using dcp::Size;
73
74 FFmpegDecoder::FFmpegDecoder (shared_ptr<const FFmpegContent> c, shared_ptr<Log> log, bool fast)
75         : VideoDecoder (c, log)
76         , AudioDecoder (c->audio, fast, log)
77         , SubtitleDecoder (c->subtitle)
78         , FFmpeg (c)
79         , _log (log)
80 {
81         if (c->video) {
82                 _pts_offset = pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->active_video_frame_rate());
83         } else {
84                 _pts_offset = ContentTime ();
85         }
86 }
87
88 void
89 FFmpegDecoder::flush ()
90 {
91         /* Get any remaining frames */
92
93         _packet.data = 0;
94         _packet.size = 0;
95
96         /* XXX: should we reset _packet.data and size after each *_decode_* call? */
97
98         while (decode_video_packet ()) {}
99
100         decode_audio_packet ();
101         AudioDecoder::flush ();
102 }
103
104 bool
105 FFmpegDecoder::pass (PassReason reason, bool accurate)
106 {
107         int r = av_read_frame (_format_context, &_packet);
108
109         /* AVERROR_INVALIDDATA can apparently be returned sometimes even when av_read_frame
110            has pretty-much succeeded (and hence generated data which should be processed).
111            Hence it makes sense to continue here in that case.
112         */
113         if (r < 0 && r != AVERROR_INVALIDDATA) {
114                 if (r != AVERROR_EOF) {
115                         /* Maybe we should fail here, but for now we'll just finish off instead */
116                         char buf[256];
117                         av_strerror (r, buf, sizeof(buf));
118                         LOG_ERROR (N_("error on av_read_frame (%1) (%2)"), buf, r);
119                 }
120
121                 flush ();
122                 return true;
123         }
124
125         int const si = _packet.stream_index;
126         shared_ptr<const FFmpegContent> fc = _ffmpeg_content;
127
128         if (_video_stream && si == _video_stream.get() && !_ignore_video && (accurate || reason != PASS_REASON_SUBTITLE)) {
129                 decode_video_packet ();
130         } else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index (_format_context, si)) {
131                 decode_subtitle_packet ();
132         } else if (accurate || reason != PASS_REASON_SUBTITLE) {
133                 decode_audio_packet ();
134         }
135
136         av_packet_unref (&_packet);
137         return false;
138 }
139
140 /** @param data pointer to array of pointers to buffers.
141  *  Only the first buffer will be used for non-planar data, otherwise there will be one per channel.
142  */
143 shared_ptr<AudioBuffers>
144 FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream) const
145 {
146         DCPOMATIC_ASSERT (bytes_per_audio_sample (stream));
147
148         int const size = av_samples_get_buffer_size (
149                 0, stream->stream(_format_context)->codec->channels, _frame->nb_samples, audio_sample_format (stream), 1
150                 );
151
152         /* Deinterleave and convert to float */
153
154         /* total_samples and frames will be rounded down here, so if there are stray samples at the end
155            of the block that do not form a complete sample or frame they will be dropped.
156         */
157         int const total_samples = size / bytes_per_audio_sample (stream);
158         int const frames = total_samples / stream->channels();
159         shared_ptr<AudioBuffers> audio (new AudioBuffers (stream->channels(), frames));
160
161         switch (audio_sample_format (stream)) {
162         case AV_SAMPLE_FMT_U8:
163         {
164                 uint8_t* p = reinterpret_cast<uint8_t *> (_frame->data[0]);
165                 int sample = 0;
166                 int channel = 0;
167                 for (int i = 0; i < total_samples; ++i) {
168                         audio->data(channel)[sample] = float(*p++) / (1 << 23);
169
170                         ++channel;
171                         if (channel == stream->channels()) {
172                                 channel = 0;
173                                 ++sample;
174                         }
175                 }
176         }
177         break;
178
179         case AV_SAMPLE_FMT_S16:
180         {
181                 int16_t* p = reinterpret_cast<int16_t *> (_frame->data[0]);
182                 int sample = 0;
183                 int channel = 0;
184                 for (int i = 0; i < total_samples; ++i) {
185                         audio->data(channel)[sample] = float(*p++) / (1 << 15);
186
187                         ++channel;
188                         if (channel == stream->channels()) {
189                                 channel = 0;
190                                 ++sample;
191                         }
192                 }
193         }
194         break;
195
196         case AV_SAMPLE_FMT_S16P:
197         {
198                 int16_t** p = reinterpret_cast<int16_t **> (_frame->data);
199                 for (int i = 0; i < stream->channels(); ++i) {
200                         for (int j = 0; j < frames; ++j) {
201                                 audio->data(i)[j] = static_cast<float>(p[i][j]) / (1 << 15);
202                         }
203                 }
204         }
205         break;
206
207         case AV_SAMPLE_FMT_S32:
208         {
209                 int32_t* p = reinterpret_cast<int32_t *> (_frame->data[0]);
210                 int sample = 0;
211                 int channel = 0;
212                 for (int i = 0; i < total_samples; ++i) {
213                         audio->data(channel)[sample] = static_cast<float>(*p++) / (1 << 31);
214
215                         ++channel;
216                         if (channel == stream->channels()) {
217                                 channel = 0;
218                                 ++sample;
219                         }
220                 }
221         }
222         break;
223
224         case AV_SAMPLE_FMT_S32P:
225         {
226                 int32_t** p = reinterpret_cast<int32_t **> (_frame->data);
227                 for (int i = 0; i < stream->channels(); ++i) {
228                         for (int j = 0; j < frames; ++j) {
229                                 audio->data(i)[j] = static_cast<float>(p[i][j]) / (1 << 31);
230                         }
231                 }
232         }
233         break;
234
235         case AV_SAMPLE_FMT_FLT:
236         {
237                 float* p = reinterpret_cast<float*> (_frame->data[0]);
238                 int sample = 0;
239                 int channel = 0;
240                 for (int i = 0; i < total_samples; ++i) {
241                         audio->data(channel)[sample] = *p++;
242
243                         ++channel;
244                         if (channel == stream->channels()) {
245                                 channel = 0;
246                                 ++sample;
247                         }
248                 }
249         }
250         break;
251
252         case AV_SAMPLE_FMT_FLTP:
253         {
254                 float** p = reinterpret_cast<float**> (_frame->data);
255                 /* Sometimes there aren't as many channels in the _frame as in the stream */
256                 for (int i = 0; i < _frame->channels; ++i) {
257                         memcpy (audio->data(i), p[i], frames * sizeof(float));
258                 }
259                 for (int i = _frame->channels; i < stream->channels(); ++i) {
260                         audio->make_silent (i);
261                 }
262         }
263         break;
264
265         default:
266                 throw DecodeError (String::compose (_("Unrecognised audio sample format (%1)"), static_cast<int> (audio_sample_format (stream))));
267         }
268
269         return audio;
270 }
271
272 AVSampleFormat
273 FFmpegDecoder::audio_sample_format (shared_ptr<FFmpegAudioStream> stream) const
274 {
275         return stream->stream (_format_context)->codec->sample_fmt;
276 }
277
278 int
279 FFmpegDecoder::bytes_per_audio_sample (shared_ptr<FFmpegAudioStream> stream) const
280 {
281         return av_get_bytes_per_sample (audio_sample_format (stream));
282 }
283
284 void
285 FFmpegDecoder::seek (ContentTime time, bool accurate)
286 {
287         VideoDecoder::seek (time, accurate);
288         AudioDecoder::seek (time, accurate);
289         SubtitleDecoder::seek (time, accurate);
290
291         /* If we are doing an `accurate' seek, we need to use pre-roll, as
292            we don't really know what the seek will give us.
293         */
294
295         ContentTime pre_roll = accurate ? ContentTime::from_seconds (2) : ContentTime (0);
296         time -= pre_roll;
297
298         /* XXX: it seems debatable whether PTS should be used here...
299            http://www.mjbshaw.com/2012/04/seeking-in-ffmpeg-know-your-timestamp.html
300         */
301
302         DCPOMATIC_ASSERT (_video_stream);
303
304         ContentTime u = time - _pts_offset;
305         if (u < ContentTime ()) {
306                 u = ContentTime ();
307         }
308         av_seek_frame (
309                 _format_context,
310                 _video_stream.get(),
311                 u.seconds() / av_q2d (_format_context->streams[_video_stream.get()]->time_base),
312                 AVSEEK_FLAG_BACKWARD
313                 );
314
315         avcodec_flush_buffers (video_codec_context());
316
317         /* XXX: should be flushing audio buffers? */
318
319         if (subtitle_codec_context ()) {
320                 avcodec_flush_buffers (subtitle_codec_context ());
321         }
322 }
323
324 void
325 FFmpegDecoder::decode_audio_packet ()
326 {
327         /* Audio packets can contain multiple frames, so we may have to call avcodec_decode_audio4
328            several times.
329         */
330
331         AVPacket copy_packet = _packet;
332
333         /* XXX: inefficient */
334         vector<shared_ptr<FFmpegAudioStream> > streams = ffmpeg_content()->ffmpeg_audio_streams ();
335         vector<shared_ptr<FFmpegAudioStream> >::const_iterator stream = streams.begin ();
336         while (stream != streams.end () && !(*stream)->uses_index (_format_context, copy_packet.stream_index)) {
337                 ++stream;
338         }
339
340         if (stream == streams.end ()) {
341                 /* The packet's stream may not be an audio one; just ignore it in this method if so */
342                 return;
343         }
344
345         while (copy_packet.size > 0) {
346
347                 int frame_finished;
348                 int decode_result = avcodec_decode_audio4 ((*stream)->stream (_format_context)->codec, _frame, &frame_finished, &copy_packet);
349                 if (decode_result < 0) {
350                         /* avcodec_decode_audio4 can sometimes return an error even though it has decoded
351                            some valid data; for example dca_subframe_footer can return AVERROR_INVALIDDATA
352                            if it overreads the auxiliary data.  ffplay carries on if frame_finished is true,
353                            even in the face of such an error, so I think we should too.
354
355                            Returning from the method here caused mantis #352.
356                         */
357                         LOG_WARNING ("avcodec_decode_audio4 failed (%1)", decode_result);
358
359                         /* Fudge decode_result so that we come out of the while loop when
360                            we've processed this data.
361                         */
362                         decode_result = copy_packet.size;
363                 }
364
365                 if (frame_finished) {
366                         ContentTime ct = ContentTime::from_seconds (
367                                 av_frame_get_best_effort_timestamp (_frame) *
368                                 av_q2d ((*stream)->stream (_format_context)->time_base))
369                                 + _pts_offset;
370
371                         shared_ptr<AudioBuffers> data = deinterleave_audio (*stream);
372
373                         if (ct < ContentTime ()) {
374                                 /* Discard audio data that comes before time 0 */
375                                 Frame const remove = min (int64_t (data->frames()), (-ct).frames_ceil(double((*stream)->frame_rate ())));
376                                 data->move (remove, 0, data->frames() - remove);
377                                 data->set_frames (data->frames() - remove);
378                                 ct += ContentTime::from_frames (remove, (*stream)->frame_rate ());
379                         }
380
381                         if (data->frames() > 0) {
382                                 audio (*stream, data, ct);
383                         }
384                 }
385
386                 copy_packet.data += decode_result;
387                 copy_packet.size -= decode_result;
388         }
389 }
390
391 bool
392 FFmpegDecoder::decode_video_packet ()
393 {
394         DCPOMATIC_ASSERT (_video_stream);
395
396         int frame_finished;
397         if (avcodec_decode_video2 (video_codec_context(), _frame, &frame_finished, &_packet) < 0 || !frame_finished) {
398                 return false;
399         }
400
401         boost::mutex::scoped_lock lm (_filter_graphs_mutex);
402
403         shared_ptr<VideoFilterGraph> graph;
404
405         list<shared_ptr<VideoFilterGraph> >::iterator i = _filter_graphs.begin();
406         while (i != _filter_graphs.end() && !(*i)->can_process (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) {
407                 ++i;
408         }
409
410         if (i == _filter_graphs.end ()) {
411                 graph.reset (new VideoFilterGraph (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format));
412                 graph->setup (_ffmpeg_content->filters ());
413                 _filter_graphs.push_back (graph);
414                 LOG_GENERAL (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format);
415         } else {
416                 graph = *i;
417         }
418
419         list<pair<shared_ptr<Image>, int64_t> > images = graph->process (_frame);
420
421         for (list<pair<shared_ptr<Image>, int64_t> >::iterator i = images.begin(); i != images.end(); ++i) {
422
423                 shared_ptr<Image> image = i->first;
424
425                 if (i->second != AV_NOPTS_VALUE) {
426                         double const pts = i->second * av_q2d (_format_context->streams[_video_stream.get()]->time_base) + _pts_offset.seconds ();
427                         video (
428                                 shared_ptr<ImageProxy> (new RawImageProxy (image)),
429                                 llrint (pts * _ffmpeg_content->active_video_frame_rate ())
430                                 );
431                 } else {
432                         LOG_WARNING_NC ("Dropping frame without PTS");
433                 }
434         }
435
436         return true;
437 }
438
439 void
440 FFmpegDecoder::decode_subtitle_packet ()
441 {
442         int got_subtitle;
443         AVSubtitle sub;
444         if (avcodec_decode_subtitle2 (subtitle_codec_context(), &sub, &got_subtitle, &_packet) < 0 || !got_subtitle) {
445                 return;
446         }
447
448         if (sub.num_rects <= 0) {
449                 /* Sometimes we get an empty AVSubtitle, which is used by some codecs to
450                    indicate that the previous subtitle should stop.  We can ignore it here.
451                 */
452                 return;
453         }
454
455         /* Subtitle PTS (within the source, not taking into account any of the
456            source that we may have chopped off for the DCP).
457         */
458         FFmpegSubtitlePeriod sub_period = subtitle_period (sub);
459         ContentTimePeriod period;
460         period.from = sub_period.from + _pts_offset;
461         if (sub_period.to) {
462                 /* We already know the subtitle period `to' time */
463                 period.to = sub_period.to.get() + _pts_offset;
464         } else {
465                 /* We have to look up the `to' time in the stream's records */
466                 period.to = ffmpeg_content()->subtitle_stream()->find_subtitle_to (subtitle_id (sub));
467         }
468
469         for (unsigned int i = 0; i < sub.num_rects; ++i) {
470                 AVSubtitleRect const * rect = sub.rects[i];
471
472                 switch (rect->type) {
473                 case SUBTITLE_NONE:
474                         break;
475                 case SUBTITLE_BITMAP:
476                         decode_bitmap_subtitle (rect, period);
477                         break;
478                 case SUBTITLE_TEXT:
479                         cout << "XXX: SUBTITLE_TEXT " << rect->text << "\n";
480                         break;
481                 case SUBTITLE_ASS:
482                         decode_ass_subtitle (rect->ass, period);
483                         break;
484                 }
485         }
486
487         avsubtitle_free (&sub);
488 }
489
490 list<ContentTimePeriod>
491 FFmpegDecoder::image_subtitles_during (ContentTimePeriod p, bool starting) const
492 {
493         return _ffmpeg_content->image_subtitles_during (p, starting);
494 }
495
496 list<ContentTimePeriod>
497 FFmpegDecoder::text_subtitles_during (ContentTimePeriod p, bool starting) const
498 {
499         return _ffmpeg_content->text_subtitles_during (p, starting);
500 }
501
502 void
503 FFmpegDecoder::decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTimePeriod period)
504 {
505         /* Note RGBA is expressed little-endian, so the first byte in the word is R, second
506            G, third B, fourth A.
507         */
508         shared_ptr<Image> image (new Image (AV_PIX_FMT_RGBA, dcp::Size (rect->w, rect->h), true));
509
510 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
511         /* Start of the first line in the subtitle */
512         uint8_t* sub_p = rect->pict.data[0];
513         /* sub_p looks up into a BGRA palette which is here
514            (i.e. first byte B, second G, third R, fourth A)
515         */
516         uint32_t const * palette = (uint32_t *) rect->pict.data[1];
517 #else
518         /* Start of the first line in the subtitle */
519         uint8_t* sub_p = rect->data[0];
520         /* sub_p looks up into a BGRA palette which is here
521            (i.e. first byte B, second G, third R, fourth A)
522         */
523         uint32_t const * palette = (uint32_t *) rect->data[1];
524 #endif
525         /* And the stream has a map of those palette colours to colours
526            chosen by the user; created a `mapped' palette from those settings.
527         */
528         map<RGBA, RGBA> colour_map = ffmpeg_content()->subtitle_stream()->colours ();
529         vector<RGBA> mapped_palette (rect->nb_colors);
530         for (int i = 0; i < rect->nb_colors; ++i) {
531                 RGBA c ((palette[i] & 0xff0000) >> 16, (palette[i] & 0xff00) >> 8, palette[i] & 0xff, (palette[i] & 0xff000000) >> 24);
532                 map<RGBA, RGBA>::const_iterator j = colour_map.find (c);
533                 if (j != colour_map.end ()) {
534                         mapped_palette[i] = j->second;
535                 } else {
536                         /* This colour was not found in the FFmpegSubtitleStream's colour map; probably because
537                            it is from a project that was created before this stuff was added.  Just use the
538                            colour straight from the original palette.
539                         */
540                         mapped_palette[i] = c;
541                 }
542         }
543
544         /* Start of the output data */
545         uint32_t* out_p = (uint32_t *) image->data()[0];
546
547         for (int y = 0; y < rect->h; ++y) {
548                 uint8_t* sub_line_p = sub_p;
549                 uint32_t* out_line_p = out_p;
550                 for (int x = 0; x < rect->w; ++x) {
551                         RGBA const p = mapped_palette[*sub_line_p++];
552                         /* XXX: this seems to be wrong to me (isn't the output image RGBA?) but it looks right on screen */
553                         *out_line_p++ = (p.a << 24) | (p.r << 16) | (p.g << 8) | p.b;
554                 }
555 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
556                 sub_p += rect->pict.linesize[0];
557 #else
558                 sub_p += rect->linesize[0];
559 #endif
560                 out_p += image->stride()[0] / sizeof (uint32_t);
561         }
562
563         dcp::Size const vs = _ffmpeg_content->video->size ();
564         dcpomatic::Rect<double> const scaled_rect (
565                 static_cast<double> (rect->x) / vs.width,
566                 static_cast<double> (rect->y) / vs.height,
567                 static_cast<double> (rect->w) / vs.width,
568                 static_cast<double> (rect->h) / vs.height
569                 );
570
571         image_subtitle (period, image, scaled_rect);
572 }
573
574 void
575 FFmpegDecoder::decode_ass_subtitle (string ass, ContentTimePeriod period)
576 {
577         /* We have no styles and no Format: line, so I'm assuming that FFmpeg
578            produces a single format of Dialogue: lines...
579         */
580
581         vector<string> bits;
582         split (bits, ass, is_any_of (","));
583         if (bits.size() < 10) {
584                 return;
585         }
586
587         sub::RawSubtitle base;
588         list<sub::RawSubtitle> raw = sub::SSAReader::parse_line (base, bits[9]);
589         list<sub::Subtitle> subs = sub::collect<list<sub::Subtitle> > (raw);
590
591         /* XXX: lots of this is copied from TextSubtitle; there should probably be some sharing */
592
593         /* Highest line index in this subtitle */
594         int highest = 0;
595         BOOST_FOREACH (sub::Subtitle i, subs) {
596                 BOOST_FOREACH (sub::Line j, i.lines) {
597                         DCPOMATIC_ASSERT (j.vertical_position.reference && j.vertical_position.reference.get() == sub::TOP_OF_SUBTITLE);
598                         DCPOMATIC_ASSERT (j.vertical_position.line);
599                         highest = max (highest, j.vertical_position.line.get());
600                 }
601         }
602
603         list<dcp::SubtitleString> ss;
604
605         BOOST_FOREACH (sub::Subtitle i, sub::collect<list<sub::Subtitle> > (sub::SSAReader::parse_line (base, bits[9]))) {
606                 BOOST_FOREACH (sub::Line j, i.lines) {
607                         BOOST_FOREACH (sub::Block k, j.blocks) {
608                                 ss.push_back (
609                                         dcp::SubtitleString (
610                                                 boost::optional<string> (),
611                                                 k.italic,
612                                                 k.bold,
613                                                 dcp::Colour (255, 255, 255),
614                                                 /* 48pt is 1/22nd of the screen height */
615                                                 48,
616                                                 1,
617                                                 dcp::Time (i.from.seconds(), 1000),
618                                                 dcp::Time (i.to.seconds(), 1000),
619                                                 0,
620                                                 dcp::HALIGN_CENTER,
621                                                 /* This 1.015 is an arbitrary value to lift the bottom sub off the bottom
622                                                    of the screen a bit to a pleasing degree.
623                                                 */
624                                                 1.015 - ((1 + highest - j.vertical_position.line.get()) * 1.5 / 22),
625                                                 dcp::VALIGN_TOP,
626                                                 dcp::DIRECTION_LTR,
627                                                 k.text,
628                                                 static_cast<dcp::Effect> (0),
629                                                 dcp::Colour (255, 255, 255),
630                                                 dcp::Time (),
631                                                 dcp::Time ()
632                                                 )
633                                         );
634                         }
635                 }
636         }
637
638         text_subtitle (period, ss);
639 }