Use optional<> for _video_stream.
[dcpomatic.git] / src / lib / ffmpeg_decoder.cc
1 /*
2     Copyright (C) 2012-2016 Carl Hetherington <cth@carlh.net>
3
4     This program is free software; you can redistribute it and/or modify
5     it under the terms of the GNU General Public License as published by
6     the Free Software Foundation; either version 2 of the License, or
7     (at your option) any later version.
8
9     This program is distributed in the hope that it will be useful,
10     but WITHOUT ANY WARRANTY; without even the implied warranty of
11     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12     GNU General Public License for more details.
13
14     You should have received a copy of the GNU General Public License
15     along with this program; if not, write to the Free Software
16     Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17
18 */
19
20 /** @file  src/ffmpeg_decoder.cc
21  *  @brief A decoder using FFmpeg to decode content.
22  */
23
24 #include "filter.h"
25 #include "exceptions.h"
26 #include "image.h"
27 #include "util.h"
28 #include "log.h"
29 #include "ffmpeg_decoder.h"
30 #include "ffmpeg_audio_stream.h"
31 #include "ffmpeg_subtitle_stream.h"
32 #include "video_filter_graph.h"
33 #include "audio_buffers.h"
34 #include "ffmpeg_content.h"
35 #include "raw_image_proxy.h"
36 #include "film.h"
37 #include "md5_digester.h"
38 #include "compose.hpp"
39 #include <dcp/subtitle_string.h>
40 #include <sub/ssa_reader.h>
41 #include <sub/subtitle.h>
42 #include <sub/collect.h>
43 extern "C" {
44 #include <libavcodec/avcodec.h>
45 #include <libavformat/avformat.h>
46 }
47 #include <boost/foreach.hpp>
48 #include <boost/algorithm/string.hpp>
49 #include <vector>
50 #include <iomanip>
51 #include <iostream>
52 #include <stdint.h>
53
54 #include "i18n.h"
55
56 #define LOG_GENERAL(...) _log->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
57 #define LOG_ERROR(...) _log->log (String::compose (__VA_ARGS__), LogEntry::TYPE_ERROR);
58 #define LOG_WARNING_NC(...) _log->log (__VA_ARGS__, LogEntry::TYPE_WARNING);
59 #define LOG_WARNING(...) _log->log (String::compose (__VA_ARGS__), LogEntry::TYPE_WARNING);
60
61 using std::cout;
62 using std::string;
63 using std::vector;
64 using std::list;
65 using std::min;
66 using std::pair;
67 using std::max;
68 using std::map;
69 using boost::shared_ptr;
70 using boost::is_any_of;
71 using boost::split;
72 using dcp::Size;
73
74 FFmpegDecoder::FFmpegDecoder (shared_ptr<const FFmpegContent> c, shared_ptr<Log> log, bool fast)
75         : VideoDecoder (c, log)
76         , AudioDecoder (c->audio, fast, log)
77         , SubtitleDecoder (c->subtitle)
78         , FFmpeg (c)
79         , _log (log)
80         , _pts_offset (pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->active_video_frame_rate()))
81 {
82 }
83
84 void
85 FFmpegDecoder::flush ()
86 {
87         /* Get any remaining frames */
88
89         _packet.data = 0;
90         _packet.size = 0;
91
92         /* XXX: should we reset _packet.data and size after each *_decode_* call? */
93
94         while (decode_video_packet ()) {}
95
96         decode_audio_packet ();
97         AudioDecoder::flush ();
98 }
99
100 bool
101 FFmpegDecoder::pass (PassReason reason, bool accurate)
102 {
103         int r = av_read_frame (_format_context, &_packet);
104
105         /* AVERROR_INVALIDDATA can apparently be returned sometimes even when av_read_frame
106            has pretty-much succeeded (and hence generated data which should be processed).
107            Hence it makes sense to continue here in that case.
108         */
109         if (r < 0 && r != AVERROR_INVALIDDATA) {
110                 if (r != AVERROR_EOF) {
111                         /* Maybe we should fail here, but for now we'll just finish off instead */
112                         char buf[256];
113                         av_strerror (r, buf, sizeof(buf));
114                         LOG_ERROR (N_("error on av_read_frame (%1) (%2)"), buf, r);
115                 }
116
117                 flush ();
118                 return true;
119         }
120
121         int const si = _packet.stream_index;
122         shared_ptr<const FFmpegContent> fc = _ffmpeg_content;
123
124         if (_video_stream && si == _video_stream.get() && !_ignore_video && (accurate || reason != PASS_REASON_SUBTITLE)) {
125                 decode_video_packet ();
126         } else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index (_format_context, si)) {
127                 decode_subtitle_packet ();
128         } else if (accurate || reason != PASS_REASON_SUBTITLE) {
129                 decode_audio_packet ();
130         }
131
132         av_packet_unref (&_packet);
133         return false;
134 }
135
136 /** @param data pointer to array of pointers to buffers.
137  *  Only the first buffer will be used for non-planar data, otherwise there will be one per channel.
138  */
139 shared_ptr<AudioBuffers>
140 FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream) const
141 {
142         DCPOMATIC_ASSERT (bytes_per_audio_sample (stream));
143
144         int const size = av_samples_get_buffer_size (
145                 0, stream->stream(_format_context)->codec->channels, _frame->nb_samples, audio_sample_format (stream), 1
146                 );
147
148         /* Deinterleave and convert to float */
149
150         /* total_samples and frames will be rounded down here, so if there are stray samples at the end
151            of the block that do not form a complete sample or frame they will be dropped.
152         */
153         int const total_samples = size / bytes_per_audio_sample (stream);
154         int const frames = total_samples / stream->channels();
155         shared_ptr<AudioBuffers> audio (new AudioBuffers (stream->channels(), frames));
156
157         switch (audio_sample_format (stream)) {
158         case AV_SAMPLE_FMT_U8:
159         {
160                 uint8_t* p = reinterpret_cast<uint8_t *> (_frame->data[0]);
161                 int sample = 0;
162                 int channel = 0;
163                 for (int i = 0; i < total_samples; ++i) {
164                         audio->data(channel)[sample] = float(*p++) / (1 << 23);
165
166                         ++channel;
167                         if (channel == stream->channels()) {
168                                 channel = 0;
169                                 ++sample;
170                         }
171                 }
172         }
173         break;
174
175         case AV_SAMPLE_FMT_S16:
176         {
177                 int16_t* p = reinterpret_cast<int16_t *> (_frame->data[0]);
178                 int sample = 0;
179                 int channel = 0;
180                 for (int i = 0; i < total_samples; ++i) {
181                         audio->data(channel)[sample] = float(*p++) / (1 << 15);
182
183                         ++channel;
184                         if (channel == stream->channels()) {
185                                 channel = 0;
186                                 ++sample;
187                         }
188                 }
189         }
190         break;
191
192         case AV_SAMPLE_FMT_S16P:
193         {
194                 int16_t** p = reinterpret_cast<int16_t **> (_frame->data);
195                 for (int i = 0; i < stream->channels(); ++i) {
196                         for (int j = 0; j < frames; ++j) {
197                                 audio->data(i)[j] = static_cast<float>(p[i][j]) / (1 << 15);
198                         }
199                 }
200         }
201         break;
202
203         case AV_SAMPLE_FMT_S32:
204         {
205                 int32_t* p = reinterpret_cast<int32_t *> (_frame->data[0]);
206                 int sample = 0;
207                 int channel = 0;
208                 for (int i = 0; i < total_samples; ++i) {
209                         audio->data(channel)[sample] = static_cast<float>(*p++) / (1 << 31);
210
211                         ++channel;
212                         if (channel == stream->channels()) {
213                                 channel = 0;
214                                 ++sample;
215                         }
216                 }
217         }
218         break;
219
220         case AV_SAMPLE_FMT_S32P:
221         {
222                 int32_t** p = reinterpret_cast<int32_t **> (_frame->data);
223                 for (int i = 0; i < stream->channels(); ++i) {
224                         for (int j = 0; j < frames; ++j) {
225                                 audio->data(i)[j] = static_cast<float>(p[i][j]) / (1 << 31);
226                         }
227                 }
228         }
229         break;
230
231         case AV_SAMPLE_FMT_FLT:
232         {
233                 float* p = reinterpret_cast<float*> (_frame->data[0]);
234                 int sample = 0;
235                 int channel = 0;
236                 for (int i = 0; i < total_samples; ++i) {
237                         audio->data(channel)[sample] = *p++;
238
239                         ++channel;
240                         if (channel == stream->channels()) {
241                                 channel = 0;
242                                 ++sample;
243                         }
244                 }
245         }
246         break;
247
248         case AV_SAMPLE_FMT_FLTP:
249         {
250                 float** p = reinterpret_cast<float**> (_frame->data);
251                 /* Sometimes there aren't as many channels in the _frame as in the stream */
252                 for (int i = 0; i < _frame->channels; ++i) {
253                         memcpy (audio->data(i), p[i], frames * sizeof(float));
254                 }
255                 for (int i = _frame->channels; i < stream->channels(); ++i) {
256                         audio->make_silent (i);
257                 }
258         }
259         break;
260
261         default:
262                 throw DecodeError (String::compose (_("Unrecognised audio sample format (%1)"), static_cast<int> (audio_sample_format (stream))));
263         }
264
265         return audio;
266 }
267
268 AVSampleFormat
269 FFmpegDecoder::audio_sample_format (shared_ptr<FFmpegAudioStream> stream) const
270 {
271         return stream->stream (_format_context)->codec->sample_fmt;
272 }
273
274 int
275 FFmpegDecoder::bytes_per_audio_sample (shared_ptr<FFmpegAudioStream> stream) const
276 {
277         return av_get_bytes_per_sample (audio_sample_format (stream));
278 }
279
280 void
281 FFmpegDecoder::seek (ContentTime time, bool accurate)
282 {
283         VideoDecoder::seek (time, accurate);
284         AudioDecoder::seek (time, accurate);
285         SubtitleDecoder::seek (time, accurate);
286
287         /* If we are doing an `accurate' seek, we need to use pre-roll, as
288            we don't really know what the seek will give us.
289         */
290
291         ContentTime pre_roll = accurate ? ContentTime::from_seconds (2) : ContentTime (0);
292         time -= pre_roll;
293
294         /* XXX: it seems debatable whether PTS should be used here...
295            http://www.mjbshaw.com/2012/04/seeking-in-ffmpeg-know-your-timestamp.html
296         */
297
298         DCPOMATIC_ASSERT (_video_stream);
299
300         ContentTime u = time - _pts_offset;
301         if (u < ContentTime ()) {
302                 u = ContentTime ();
303         }
304         av_seek_frame (
305                 _format_context,
306                 _video_stream.get(),
307                 u.seconds() / av_q2d (_format_context->streams[_video_stream.get()]->time_base),
308                 AVSEEK_FLAG_BACKWARD
309                 );
310
311         avcodec_flush_buffers (video_codec_context());
312
313         /* XXX: should be flushing audio buffers? */
314
315         if (subtitle_codec_context ()) {
316                 avcodec_flush_buffers (subtitle_codec_context ());
317         }
318 }
319
320 void
321 FFmpegDecoder::decode_audio_packet ()
322 {
323         /* Audio packets can contain multiple frames, so we may have to call avcodec_decode_audio4
324            several times.
325         */
326
327         AVPacket copy_packet = _packet;
328
329         /* XXX: inefficient */
330         vector<shared_ptr<FFmpegAudioStream> > streams = ffmpeg_content()->ffmpeg_audio_streams ();
331         vector<shared_ptr<FFmpegAudioStream> >::const_iterator stream = streams.begin ();
332         while (stream != streams.end () && !(*stream)->uses_index (_format_context, copy_packet.stream_index)) {
333                 ++stream;
334         }
335
336         if (stream == streams.end ()) {
337                 /* The packet's stream may not be an audio one; just ignore it in this method if so */
338                 return;
339         }
340
341         while (copy_packet.size > 0) {
342
343                 int frame_finished;
344                 int decode_result = avcodec_decode_audio4 ((*stream)->stream (_format_context)->codec, _frame, &frame_finished, &copy_packet);
345                 if (decode_result < 0) {
346                         /* avcodec_decode_audio4 can sometimes return an error even though it has decoded
347                            some valid data; for example dca_subframe_footer can return AVERROR_INVALIDDATA
348                            if it overreads the auxiliary data.  ffplay carries on if frame_finished is true,
349                            even in the face of such an error, so I think we should too.
350
351                            Returning from the method here caused mantis #352.
352                         */
353                         LOG_WARNING ("avcodec_decode_audio4 failed (%1)", decode_result);
354
355                         /* Fudge decode_result so that we come out of the while loop when
356                            we've processed this data.
357                         */
358                         decode_result = copy_packet.size;
359                 }
360
361                 if (frame_finished) {
362                         ContentTime ct = ContentTime::from_seconds (
363                                 av_frame_get_best_effort_timestamp (_frame) *
364                                 av_q2d ((*stream)->stream (_format_context)->time_base))
365                                 + _pts_offset;
366
367                         shared_ptr<AudioBuffers> data = deinterleave_audio (*stream);
368
369                         if (ct < ContentTime ()) {
370                                 /* Discard audio data that comes before time 0 */
371                                 Frame const remove = min (int64_t (data->frames()), (-ct).frames_ceil(double((*stream)->frame_rate ())));
372                                 data->move (remove, 0, data->frames() - remove);
373                                 data->set_frames (data->frames() - remove);
374                                 ct += ContentTime::from_frames (remove, (*stream)->frame_rate ());
375                         }
376
377                         if (data->frames() > 0) {
378                                 audio (*stream, data, ct);
379                         }
380                 }
381
382                 copy_packet.data += decode_result;
383                 copy_packet.size -= decode_result;
384         }
385 }
386
387 bool
388 FFmpegDecoder::decode_video_packet ()
389 {
390         DCPOMATIC_ASSERT (_video_stream);
391
392         int frame_finished;
393         if (avcodec_decode_video2 (video_codec_context(), _frame, &frame_finished, &_packet) < 0 || !frame_finished) {
394                 return false;
395         }
396
397         boost::mutex::scoped_lock lm (_filter_graphs_mutex);
398
399         shared_ptr<VideoFilterGraph> graph;
400
401         list<shared_ptr<VideoFilterGraph> >::iterator i = _filter_graphs.begin();
402         while (i != _filter_graphs.end() && !(*i)->can_process (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) {
403                 ++i;
404         }
405
406         if (i == _filter_graphs.end ()) {
407                 graph.reset (new VideoFilterGraph (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format));
408                 graph->setup (_ffmpeg_content->filters ());
409                 _filter_graphs.push_back (graph);
410                 LOG_GENERAL (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format);
411         } else {
412                 graph = *i;
413         }
414
415         list<pair<shared_ptr<Image>, int64_t> > images = graph->process (_frame);
416
417         for (list<pair<shared_ptr<Image>, int64_t> >::iterator i = images.begin(); i != images.end(); ++i) {
418
419                 shared_ptr<Image> image = i->first;
420
421                 if (i->second != AV_NOPTS_VALUE) {
422                         double const pts = i->second * av_q2d (_format_context->streams[_video_stream.get()]->time_base) + _pts_offset.seconds ();
423                         video (
424                                 shared_ptr<ImageProxy> (new RawImageProxy (image)),
425                                 llrint (pts * _ffmpeg_content->active_video_frame_rate ())
426                                 );
427                 } else {
428                         LOG_WARNING_NC ("Dropping frame without PTS");
429                 }
430         }
431
432         return true;
433 }
434
435 void
436 FFmpegDecoder::decode_subtitle_packet ()
437 {
438         int got_subtitle;
439         AVSubtitle sub;
440         if (avcodec_decode_subtitle2 (subtitle_codec_context(), &sub, &got_subtitle, &_packet) < 0 || !got_subtitle) {
441                 return;
442         }
443
444         if (sub.num_rects <= 0) {
445                 /* Sometimes we get an empty AVSubtitle, which is used by some codecs to
446                    indicate that the previous subtitle should stop.  We can ignore it here.
447                 */
448                 return;
449         }
450
451         /* Subtitle PTS (within the source, not taking into account any of the
452            source that we may have chopped off for the DCP).
453         */
454         FFmpegSubtitlePeriod sub_period = subtitle_period (sub);
455         ContentTimePeriod period;
456         period.from = sub_period.from + _pts_offset;
457         if (sub_period.to) {
458                 /* We already know the subtitle period `to' time */
459                 period.to = sub_period.to.get() + _pts_offset;
460         } else {
461                 /* We have to look up the `to' time in the stream's records */
462                 period.to = ffmpeg_content()->subtitle_stream()->find_subtitle_to (subtitle_id (sub));
463         }
464
465         for (unsigned int i = 0; i < sub.num_rects; ++i) {
466                 AVSubtitleRect const * rect = sub.rects[i];
467
468                 switch (rect->type) {
469                 case SUBTITLE_NONE:
470                         break;
471                 case SUBTITLE_BITMAP:
472                         decode_bitmap_subtitle (rect, period);
473                         break;
474                 case SUBTITLE_TEXT:
475                         cout << "XXX: SUBTITLE_TEXT " << rect->text << "\n";
476                         break;
477                 case SUBTITLE_ASS:
478                         decode_ass_subtitle (rect->ass, period);
479                         break;
480                 }
481         }
482
483         avsubtitle_free (&sub);
484 }
485
486 list<ContentTimePeriod>
487 FFmpegDecoder::image_subtitles_during (ContentTimePeriod p, bool starting) const
488 {
489         return _ffmpeg_content->image_subtitles_during (p, starting);
490 }
491
492 list<ContentTimePeriod>
493 FFmpegDecoder::text_subtitles_during (ContentTimePeriod p, bool starting) const
494 {
495         return _ffmpeg_content->text_subtitles_during (p, starting);
496 }
497
498 void
499 FFmpegDecoder::decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTimePeriod period)
500 {
501         /* Note RGBA is expressed little-endian, so the first byte in the word is R, second
502            G, third B, fourth A.
503         */
504         shared_ptr<Image> image (new Image (AV_PIX_FMT_RGBA, dcp::Size (rect->w, rect->h), true));
505
506 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
507         /* Start of the first line in the subtitle */
508         uint8_t* sub_p = rect->pict.data[0];
509         /* sub_p looks up into a BGRA palette which is here
510            (i.e. first byte B, second G, third R, fourth A)
511         */
512         uint32_t const * palette = (uint32_t *) rect->pict.data[1];
513 #else
514         /* Start of the first line in the subtitle */
515         uint8_t* sub_p = rect->data[0];
516         /* sub_p looks up into a BGRA palette which is here
517            (i.e. first byte B, second G, third R, fourth A)
518         */
519         uint32_t const * palette = (uint32_t *) rect->data[1];
520 #endif
521         /* And the stream has a map of those palette colours to colours
522            chosen by the user; created a `mapped' palette from those settings.
523         */
524         map<RGBA, RGBA> colour_map = ffmpeg_content()->subtitle_stream()->colours ();
525         vector<RGBA> mapped_palette (rect->nb_colors);
526         for (int i = 0; i < rect->nb_colors; ++i) {
527                 RGBA c ((palette[i] & 0xff0000) >> 16, (palette[i] & 0xff00) >> 8, palette[i] & 0xff, (palette[i] & 0xff000000) >> 24);
528                 map<RGBA, RGBA>::const_iterator j = colour_map.find (c);
529                 if (j != colour_map.end ()) {
530                         mapped_palette[i] = j->second;
531                 } else {
532                         /* This colour was not found in the FFmpegSubtitleStream's colour map; probably because
533                            it is from a project that was created before this stuff was added.  Just use the
534                            colour straight from the original palette.
535                         */
536                         mapped_palette[i] = c;
537                 }
538         }
539
540         /* Start of the output data */
541         uint32_t* out_p = (uint32_t *) image->data()[0];
542
543         for (int y = 0; y < rect->h; ++y) {
544                 uint8_t* sub_line_p = sub_p;
545                 uint32_t* out_line_p = out_p;
546                 for (int x = 0; x < rect->w; ++x) {
547                         RGBA const p = mapped_palette[*sub_line_p++];
548                         /* XXX: this seems to be wrong to me (isn't the output image RGBA?) but it looks right on screen */
549                         *out_line_p++ = (p.a << 24) | (p.r << 16) | (p.g << 8) | p.b;
550                 }
551 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
552                 sub_p += rect->pict.linesize[0];
553 #else
554                 sub_p += rect->linesize[0];
555 #endif
556                 out_p += image->stride()[0] / sizeof (uint32_t);
557         }
558
559         dcp::Size const vs = _ffmpeg_content->video->size ();
560         dcpomatic::Rect<double> const scaled_rect (
561                 static_cast<double> (rect->x) / vs.width,
562                 static_cast<double> (rect->y) / vs.height,
563                 static_cast<double> (rect->w) / vs.width,
564                 static_cast<double> (rect->h) / vs.height
565                 );
566
567         image_subtitle (period, image, scaled_rect);
568 }
569
570 void
571 FFmpegDecoder::decode_ass_subtitle (string ass, ContentTimePeriod period)
572 {
573         /* We have no styles and no Format: line, so I'm assuming that FFmpeg
574            produces a single format of Dialogue: lines...
575         */
576
577         vector<string> bits;
578         split (bits, ass, is_any_of (","));
579         if (bits.size() < 10) {
580                 return;
581         }
582
583         sub::RawSubtitle base;
584         list<sub::RawSubtitle> raw = sub::SSAReader::parse_line (base, bits[9]);
585         list<sub::Subtitle> subs = sub::collect<list<sub::Subtitle> > (raw);
586
587         /* XXX: lots of this is copied from TextSubtitle; there should probably be some sharing */
588
589         /* Highest line index in this subtitle */
590         int highest = 0;
591         BOOST_FOREACH (sub::Subtitle i, subs) {
592                 BOOST_FOREACH (sub::Line j, i.lines) {
593                         DCPOMATIC_ASSERT (j.vertical_position.reference && j.vertical_position.reference.get() == sub::TOP_OF_SUBTITLE);
594                         DCPOMATIC_ASSERT (j.vertical_position.line);
595                         highest = max (highest, j.vertical_position.line.get());
596                 }
597         }
598
599         list<dcp::SubtitleString> ss;
600
601         BOOST_FOREACH (sub::Subtitle i, sub::collect<list<sub::Subtitle> > (sub::SSAReader::parse_line (base, bits[9]))) {
602                 BOOST_FOREACH (sub::Line j, i.lines) {
603                         BOOST_FOREACH (sub::Block k, j.blocks) {
604                                 ss.push_back (
605                                         dcp::SubtitleString (
606                                                 boost::optional<string> (),
607                                                 k.italic,
608                                                 k.bold,
609                                                 dcp::Colour (255, 255, 255),
610                                                 /* 48pt is 1/22nd of the screen height */
611                                                 48,
612                                                 1,
613                                                 dcp::Time (i.from.seconds(), 1000),
614                                                 dcp::Time (i.to.seconds(), 1000),
615                                                 0,
616                                                 dcp::HALIGN_CENTER,
617                                                 /* This 1.015 is an arbitrary value to lift the bottom sub off the bottom
618                                                    of the screen a bit to a pleasing degree.
619                                                 */
620                                                 1.015 - ((1 + highest - j.vertical_position.line.get()) * 1.5 / 22),
621                                                 dcp::VALIGN_TOP,
622                                                 dcp::DIRECTION_LTR,
623                                                 k.text,
624                                                 static_cast<dcp::Effect> (0),
625                                                 dcp::Colour (255, 255, 255),
626                                                 dcp::Time (),
627                                                 dcp::Time ()
628                                                 )
629                                         );
630                         }
631                 }
632         }
633
634         text_subtitle (period, ss);
635 }