Don't crash if the first packet in a stream has AV_NOPTS_VALUE;
[dcpomatic.git] / src / lib / ffmpeg_decoder.cc
1 /*
2     Copyright (C) 2012-2018 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 /** @file  src/ffmpeg_decoder.cc
22  *  @brief A decoder using FFmpeg to decode content.
23  */
24
25 #include "filter.h"
26 #include "exceptions.h"
27 #include "image.h"
28 #include "util.h"
29 #include "log.h"
30 #include "dcpomatic_log.h"
31 #include "ffmpeg_decoder.h"
32 #include "text_decoder.h"
33 #include "ffmpeg_audio_stream.h"
34 #include "ffmpeg_subtitle_stream.h"
35 #include "video_filter_graph.h"
36 #include "audio_buffers.h"
37 #include "ffmpeg_content.h"
38 #include "raw_image_proxy.h"
39 #include "video_decoder.h"
40 #include "film.h"
41 #include "audio_decoder.h"
42 #include "compose.hpp"
43 #include "text_content.h"
44 #include "audio_content.h"
45 #include "frame_interval_checker.h"
46 #include <dcp/subtitle_string.h>
47 #include <sub/ssa_reader.h>
48 #include <sub/subtitle.h>
49 #include <sub/collect.h>
50 extern "C" {
51 #include <libavcodec/avcodec.h>
52 #include <libavformat/avformat.h>
53 }
54 #include <boost/foreach.hpp>
55 #include <boost/algorithm/string.hpp>
56 #include <vector>
57 #include <iomanip>
58 #include <iostream>
59 #include <stdint.h>
60
61 #include "i18n.h"
62
63 using std::cout;
64 using std::string;
65 using std::vector;
66 using std::list;
67 using std::min;
68 using std::pair;
69 using std::max;
70 using std::map;
71 using boost::shared_ptr;
72 using boost::is_any_of;
73 using boost::split;
74 using boost::optional;
75 using boost::dynamic_pointer_cast;
76 using dcp::Size;
77 using namespace dcpomatic;
78
79 FFmpegDecoder::FFmpegDecoder (shared_ptr<const Film> film, shared_ptr<const FFmpegContent> c, bool fast)
80         : FFmpeg (c)
81         , Decoder (film)
82         , _have_current_subtitle (false)
83 {
84         if (c->video && c->video->use()) {
85                 video.reset (new VideoDecoder (this, c));
86                 _pts_offset = pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->active_video_frame_rate(film));
87                 /* It doesn't matter what size or pixel format this is, it just needs to be black */
88                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, dcp::Size (128, 128), true));
89                 _black_image->make_black ();
90         } else {
91                 _pts_offset = ContentTime ();
92         }
93
94         if (c->audio) {
95                 audio.reset (new AudioDecoder (this, c->audio, fast));
96         }
97
98         if (c->only_text()) {
99                 /* XXX: this time here should be the time of the first subtitle, not 0 */
100                 text.push_back (shared_ptr<TextDecoder> (new TextDecoder (this, c->only_text(), ContentTime())));
101         }
102
103         _next_time.resize (_format_context->nb_streams);
104 }
105
106 void
107 FFmpegDecoder::flush ()
108 {
109         /* Get any remaining frames */
110
111         _packet.data = 0;
112         _packet.size = 0;
113
114         /* XXX: should we reset _packet.data and size after each *_decode_* call? */
115
116         while (video && decode_video_packet()) {}
117
118         if (audio) {
119                 decode_audio_packet ();
120         }
121
122         /* Make sure all streams are the same length and round up to the next video frame */
123
124         FrameRateChange const frc = film()->active_frame_rate_change(_ffmpeg_content->position());
125         ContentTime full_length (_ffmpeg_content->full_length(film()), frc);
126         full_length = full_length.ceil (frc.source);
127         if (video) {
128                 double const vfr = _ffmpeg_content->video_frame_rate().get();
129                 Frame const f = full_length.frames_round (vfr);
130                 Frame v = video->position(film()).get_value_or(ContentTime()).frames_round(vfr) + 1;
131                 while (v < f) {
132                         video->emit (film(), shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)), v);
133                         ++v;
134                 }
135         }
136
137         BOOST_FOREACH (shared_ptr<FFmpegAudioStream> i, _ffmpeg_content->ffmpeg_audio_streams ()) {
138                 ContentTime a = audio->stream_position(film(), i);
139                 /* Unfortunately if a is 0 that really means that we don't know the stream position since
140                    there has been no data on it since the last seek.  In this case we'll just do nothing
141                    here.  I'm not sure if that's the right idea.
142                 */
143                 if (a > ContentTime()) {
144                         while (a < full_length) {
145                                 ContentTime to_do = min (full_length - a, ContentTime::from_seconds (0.1));
146                                 shared_ptr<AudioBuffers> silence (new AudioBuffers (i->channels(), to_do.frames_ceil (i->frame_rate())));
147                                 silence->make_silent ();
148                                 audio->emit (film(), i, silence, a);
149                                 a += to_do;
150                         }
151                 }
152         }
153
154         if (audio) {
155                 audio->flush ();
156         }
157 }
158
159 bool
160 FFmpegDecoder::pass ()
161 {
162 #ifdef DCPOMATIC_VARIANT_SWAROOP
163         if (_ffmpeg_content->encrypted() && !_ffmpeg_content->kdm()) {
164                 return true;
165         }
166 #endif
167
168         int r = av_read_frame (_format_context, &_packet);
169
170         /* AVERROR_INVALIDDATA can apparently be returned sometimes even when av_read_frame
171            has pretty-much succeeded (and hence generated data which should be processed).
172            Hence it makes sense to continue here in that case.
173         */
174         if (r < 0 && r != AVERROR_INVALIDDATA) {
175                 if (r != AVERROR_EOF) {
176                         /* Maybe we should fail here, but for now we'll just finish off instead */
177                         char buf[256];
178                         av_strerror (r, buf, sizeof(buf));
179                         LOG_ERROR (N_("error on av_read_frame (%1) (%2)"), &buf[0], r);
180                 }
181
182                 flush ();
183                 return true;
184         }
185
186         int const si = _packet.stream_index;
187         shared_ptr<const FFmpegContent> fc = _ffmpeg_content;
188
189         if (_video_stream && si == _video_stream.get() && video && !video->ignore()) {
190                 decode_video_packet ();
191         } else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index(_format_context, si) && !only_text()->ignore()) {
192                 decode_subtitle_packet ();
193         } else {
194                 decode_audio_packet ();
195         }
196
197         av_packet_unref (&_packet);
198         return false;
199 }
200
201 /** @param data pointer to array of pointers to buffers.
202  *  Only the first buffer will be used for non-planar data, otherwise there will be one per channel.
203  */
204 shared_ptr<AudioBuffers>
205 FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream) const
206 {
207         DCPOMATIC_ASSERT (bytes_per_audio_sample (stream));
208
209 DCPOMATIC_DISABLE_WARNINGS
210         int const size = av_samples_get_buffer_size (
211                 0, stream->stream(_format_context)->codec->channels, _frame->nb_samples, audio_sample_format (stream), 1
212                 );
213 DCPOMATIC_ENABLE_WARNINGS
214
215         /* XXX: can't we just use _frame->nb_samples directly here? */
216         /* XXX: can't we use swr_convert() to do the format conversion? */
217
218         /* Deinterleave and convert to float */
219
220         /* total_samples and frames will be rounded down here, so if there are stray samples at the end
221            of the block that do not form a complete sample or frame they will be dropped.
222         */
223         int const total_samples = size / bytes_per_audio_sample (stream);
224         int const channels = stream->channels();
225         int const frames = total_samples / channels;
226         shared_ptr<AudioBuffers> audio (new AudioBuffers (channels, frames));
227         float** data = audio->data();
228
229         switch (audio_sample_format (stream)) {
230         case AV_SAMPLE_FMT_U8:
231         {
232                 uint8_t* p = reinterpret_cast<uint8_t *> (_frame->data[0]);
233                 int sample = 0;
234                 int channel = 0;
235                 for (int i = 0; i < total_samples; ++i) {
236                         data[channel][sample] = float(*p++) / (1 << 23);
237
238                         ++channel;
239                         if (channel == channels) {
240                                 channel = 0;
241                                 ++sample;
242                         }
243                 }
244         }
245         break;
246
247         case AV_SAMPLE_FMT_S16:
248         {
249                 int16_t* p = reinterpret_cast<int16_t *> (_frame->data[0]);
250                 int sample = 0;
251                 int channel = 0;
252                 for (int i = 0; i < total_samples; ++i) {
253                         data[channel][sample] = float(*p++) / (1 << 15);
254
255                         ++channel;
256                         if (channel == channels) {
257                                 channel = 0;
258                                 ++sample;
259                         }
260                 }
261         }
262         break;
263
264         case AV_SAMPLE_FMT_S16P:
265         {
266                 int16_t** p = reinterpret_cast<int16_t **> (_frame->data);
267                 for (int i = 0; i < channels; ++i) {
268                         for (int j = 0; j < frames; ++j) {
269                                 data[i][j] = static_cast<float>(p[i][j]) / (1 << 15);
270                         }
271                 }
272         }
273         break;
274
275         case AV_SAMPLE_FMT_S32:
276         {
277                 int32_t* p = reinterpret_cast<int32_t *> (_frame->data[0]);
278                 int sample = 0;
279                 int channel = 0;
280                 for (int i = 0; i < total_samples; ++i) {
281                         data[channel][sample] = static_cast<float>(*p++) / 2147483648;
282
283                         ++channel;
284                         if (channel == channels) {
285                                 channel = 0;
286                                 ++sample;
287                         }
288                 }
289         }
290         break;
291
292         case AV_SAMPLE_FMT_S32P:
293         {
294                 int32_t** p = reinterpret_cast<int32_t **> (_frame->data);
295                 for (int i = 0; i < channels; ++i) {
296                         for (int j = 0; j < frames; ++j) {
297                                 data[i][j] = static_cast<float>(p[i][j]) / 2147483648;
298                         }
299                 }
300         }
301         break;
302
303         case AV_SAMPLE_FMT_FLT:
304         {
305                 float* p = reinterpret_cast<float*> (_frame->data[0]);
306                 int sample = 0;
307                 int channel = 0;
308                 for (int i = 0; i < total_samples; ++i) {
309                         data[channel][sample] = *p++;
310
311                         ++channel;
312                         if (channel == channels) {
313                                 channel = 0;
314                                 ++sample;
315                         }
316                 }
317         }
318         break;
319
320         case AV_SAMPLE_FMT_FLTP:
321         {
322                 float** p = reinterpret_cast<float**> (_frame->data);
323                 DCPOMATIC_ASSERT (_frame->channels <= channels);
324                 /* Sometimes there aren't as many channels in the _frame as in the stream */
325                 for (int i = 0; i < _frame->channels; ++i) {
326                         memcpy (data[i], p[i], frames * sizeof(float));
327                 }
328                 for (int i = _frame->channels; i < channels; ++i) {
329                         audio->make_silent (i);
330                 }
331         }
332         break;
333
334         default:
335                 throw DecodeError (String::compose (_("Unrecognised audio sample format (%1)"), static_cast<int> (audio_sample_format (stream))));
336         }
337
338         return audio;
339 }
340
341 AVSampleFormat
342 FFmpegDecoder::audio_sample_format (shared_ptr<FFmpegAudioStream> stream) const
343 {
344 DCPOMATIC_DISABLE_WARNINGS
345         return stream->stream (_format_context)->codec->sample_fmt;
346 DCPOMATIC_ENABLE_WARNINGS
347 }
348
349 int
350 FFmpegDecoder::bytes_per_audio_sample (shared_ptr<FFmpegAudioStream> stream) const
351 {
352         return av_get_bytes_per_sample (audio_sample_format (stream));
353 }
354
355 void
356 FFmpegDecoder::seek (ContentTime time, bool accurate)
357 {
358         Decoder::seek (time, accurate);
359
360         /* If we are doing an `accurate' seek, we need to use pre-roll, as
361            we don't really know what the seek will give us.
362         */
363
364         ContentTime pre_roll = accurate ? ContentTime::from_seconds (2) : ContentTime (0);
365         time -= pre_roll;
366
367         /* XXX: it seems debatable whether PTS should be used here...
368            http://www.mjbshaw.com/2012/04/seeking-in-ffmpeg-know-your-timestamp.html
369         */
370
371         optional<int> stream;
372
373         if (_video_stream) {
374                 stream = _video_stream;
375         } else {
376                 DCPOMATIC_ASSERT (_ffmpeg_content->audio);
377                 shared_ptr<FFmpegAudioStream> s = dynamic_pointer_cast<FFmpegAudioStream> (_ffmpeg_content->audio->stream ());
378                 if (s) {
379                         stream = s->index (_format_context);
380                 }
381         }
382
383         DCPOMATIC_ASSERT (stream);
384
385         ContentTime u = time - _pts_offset;
386         if (u < ContentTime ()) {
387                 u = ContentTime ();
388         }
389         av_seek_frame (
390                 _format_context,
391                 stream.get(),
392                 u.seconds() / av_q2d (_format_context->streams[stream.get()]->time_base),
393                 AVSEEK_FLAG_BACKWARD
394                 );
395
396         {
397                 /* Force re-creation of filter graphs to reset them and hence to make sure
398                    they don't have any pre-seek frames knocking about.
399                 */
400                 boost::mutex::scoped_lock lm (_filter_graphs_mutex);
401                 _filter_graphs.clear ();
402         }
403
404         if (video_codec_context ()) {
405                 avcodec_flush_buffers (video_codec_context());
406         }
407
408 DCPOMATIC_DISABLE_WARNINGS
409         BOOST_FOREACH (shared_ptr<FFmpegAudioStream> i, ffmpeg_content()->ffmpeg_audio_streams()) {
410                 avcodec_flush_buffers (i->stream(_format_context)->codec);
411         }
412 DCPOMATIC_ENABLE_WARNINGS
413
414         if (subtitle_codec_context ()) {
415                 avcodec_flush_buffers (subtitle_codec_context ());
416         }
417
418         _have_current_subtitle = false;
419
420         BOOST_FOREACH (optional<ContentTime>& i, _next_time) {
421                 i = optional<ContentTime>();
422         }
423 }
424
425 void
426 FFmpegDecoder::decode_audio_packet ()
427 {
428         /* Audio packets can contain multiple frames, so we may have to call avcodec_decode_audio4
429            several times.
430         */
431
432         AVPacket copy_packet = _packet;
433         int const stream_index = copy_packet.stream_index;
434
435         /* XXX: inefficient */
436         vector<shared_ptr<FFmpegAudioStream> > streams = ffmpeg_content()->ffmpeg_audio_streams ();
437         vector<shared_ptr<FFmpegAudioStream> >::const_iterator stream = streams.begin ();
438         while (stream != streams.end () && !(*stream)->uses_index (_format_context, stream_index)) {
439                 ++stream;
440         }
441
442         if (stream == streams.end ()) {
443                 /* The packet's stream may not be an audio one; just ignore it in this method if so */
444                 return;
445         }
446
447 DCPOMATIC_DISABLE_WARNINGS
448         while (copy_packet.size > 0) {
449
450                 int frame_finished;
451                 int decode_result = avcodec_decode_audio4 ((*stream)->stream (_format_context)->codec, _frame, &frame_finished, &copy_packet);
452                 if (decode_result < 0) {
453                         /* avcodec_decode_audio4 can sometimes return an error even though it has decoded
454                            some valid data; for example dca_subframe_footer can return AVERROR_INVALIDDATA
455                            if it overreads the auxiliary data.  ffplay carries on if frame_finished is true,
456                            even in the face of such an error, so I think we should too.
457
458                            Returning from the method here caused mantis #352.
459                         */
460                         LOG_WARNING ("avcodec_decode_audio4 failed (%1)", decode_result);
461
462                         /* Fudge decode_result so that we come out of the while loop when
463                            we've processed this data.
464                         */
465                         decode_result = copy_packet.size;
466                 }
467
468                 if (frame_finished) {
469                         shared_ptr<AudioBuffers> data = deinterleave_audio (*stream);
470
471                         ContentTime ct;
472                         if (_frame->pts == AV_NOPTS_VALUE) {
473                                 /* In some streams we see not every frame coming through with a timestamp; for those
474                                    that have AV_NOPTS_VALUE we need to work out the timestamp ourselves.  This is
475                                    particularly noticeable with TrueHD streams (see #1111).
476                                 */
477                                 if (_next_time[stream_index]) {
478                                         ct = *_next_time[stream_index];
479                                 }
480                         } else {
481                                 ct = ContentTime::from_seconds (
482                                         av_frame_get_best_effort_timestamp (_frame) *
483                                         av_q2d ((*stream)->stream (_format_context)->time_base))
484                                         + _pts_offset;
485                         }
486
487                         _next_time[stream_index] = ct + ContentTime::from_frames(data->frames(), (*stream)->frame_rate());
488
489                         if (ct < ContentTime ()) {
490                                 /* Discard audio data that comes before time 0 */
491                                 Frame const remove = min (int64_t (data->frames()), (-ct).frames_ceil(double((*stream)->frame_rate ())));
492                                 data->move (data->frames() - remove, remove, 0);
493                                 data->set_frames (data->frames() - remove);
494                                 ct += ContentTime::from_frames (remove, (*stream)->frame_rate ());
495                         }
496
497                         if (ct < ContentTime()) {
498                                 LOG_WARNING (
499                                         "Crazy timestamp %1 for %2 samples in stream %3 packet pts %4 (ts=%5 tb=%6, off=%7)",
500                                         to_string(ct),
501                                         data->frames(),
502                                         copy_packet.stream_index,
503                                         copy_packet.pts,
504                                         av_frame_get_best_effort_timestamp(_frame),
505                                         av_q2d((*stream)->stream(_format_context)->time_base),
506                                         to_string(_pts_offset)
507                                         );
508                         }
509 DCPOMATIC_ENABLE_WARNINGS
510
511                         /* Give this data provided there is some, and its time is sane */
512                         if (ct >= ContentTime() && data->frames() > 0) {
513                                 audio->emit (film(), *stream, data, ct);
514                         }
515                 }
516
517                 copy_packet.data += decode_result;
518                 copy_packet.size -= decode_result;
519         }
520 }
521
522 bool
523 FFmpegDecoder::decode_video_packet ()
524 {
525         DCPOMATIC_ASSERT (_video_stream);
526
527         int frame_finished;
528 DCPOMATIC_DISABLE_WARNINGS
529         if (avcodec_decode_video2 (video_codec_context(), _frame, &frame_finished, &_packet) < 0 || !frame_finished) {
530                 return false;
531         }
532 DCPOMATIC_ENABLE_WARNINGS
533
534         boost::mutex::scoped_lock lm (_filter_graphs_mutex);
535
536         shared_ptr<VideoFilterGraph> graph;
537
538         list<shared_ptr<VideoFilterGraph> >::iterator i = _filter_graphs.begin();
539         while (i != _filter_graphs.end() && !(*i)->can_process (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) {
540                 ++i;
541         }
542
543         if (i == _filter_graphs.end ()) {
544                 dcp::Fraction vfr (lrint(_ffmpeg_content->video_frame_rate().get() * 1000), 1000);
545                 graph.reset (new VideoFilterGraph (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format, vfr));
546                 graph->setup (_ffmpeg_content->filters ());
547                 _filter_graphs.push_back (graph);
548                 LOG_GENERAL (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format);
549         } else {
550                 graph = *i;
551         }
552
553         list<pair<shared_ptr<Image>, int64_t> > images = graph->process (_frame);
554
555         for (list<pair<shared_ptr<Image>, int64_t> >::iterator i = images.begin(); i != images.end(); ++i) {
556
557                 shared_ptr<Image> image = i->first;
558
559                 if (i->second != AV_NOPTS_VALUE) {
560                         double const pts = i->second * av_q2d (_format_context->streams[_video_stream.get()]->time_base) + _pts_offset.seconds ();
561
562                         video->emit (
563                                 film(),
564                                 shared_ptr<ImageProxy> (new RawImageProxy (image)),
565                                 llrint(pts * _ffmpeg_content->active_video_frame_rate(film()))
566                                 );
567                 } else {
568                         LOG_WARNING_NC ("Dropping frame without PTS");
569                 }
570         }
571
572         return true;
573 }
574
575 void
576 FFmpegDecoder::decode_subtitle_packet ()
577 {
578         int got_subtitle;
579         AVSubtitle sub;
580         if (avcodec_decode_subtitle2 (subtitle_codec_context(), &sub, &got_subtitle, &_packet) < 0 || !got_subtitle) {
581                 return;
582         }
583
584         /* Stop any current subtitle, either at the time it was supposed to stop, or now if now is sooner */
585         if (_have_current_subtitle) {
586                 if (_current_subtitle_to) {
587                         only_text()->emit_stop (min(*_current_subtitle_to, subtitle_period(sub).from + _pts_offset));
588                 } else {
589                         only_text()->emit_stop (subtitle_period(sub).from + _pts_offset);
590                 }
591                 _have_current_subtitle = false;
592         }
593
594         if (sub.num_rects <= 0) {
595                 /* Nothing new in this subtitle */
596                 return;
597         }
598
599         /* Subtitle PTS (within the source, not taking into account any of the
600            source that we may have chopped off for the DCP).
601         */
602         FFmpegSubtitlePeriod sub_period = subtitle_period (sub);
603         ContentTime from;
604         from = sub_period.from + _pts_offset;
605         if (sub_period.to) {
606                 _current_subtitle_to = *sub_period.to + _pts_offset;
607         } else {
608                 _current_subtitle_to = optional<ContentTime>();
609                 _have_current_subtitle = true;
610         }
611
612         for (unsigned int i = 0; i < sub.num_rects; ++i) {
613                 AVSubtitleRect const * rect = sub.rects[i];
614
615                 switch (rect->type) {
616                 case SUBTITLE_NONE:
617                         break;
618                 case SUBTITLE_BITMAP:
619                         decode_bitmap_subtitle (rect, from);
620                         break;
621                 case SUBTITLE_TEXT:
622                         cout << "XXX: SUBTITLE_TEXT " << rect->text << "\n";
623                         break;
624                 case SUBTITLE_ASS:
625                         decode_ass_subtitle (rect->ass, from);
626                         break;
627                 }
628         }
629
630         if (_current_subtitle_to) {
631                 only_text()->emit_stop (*_current_subtitle_to);
632         }
633
634         avsubtitle_free (&sub);
635 }
636
637 void
638 FFmpegDecoder::decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTime from)
639 {
640         /* Note BGRA is expressed little-endian, so the first byte in the word is B, second
641            G, third R, fourth A.
642         */
643         shared_ptr<Image> image (new Image (AV_PIX_FMT_BGRA, dcp::Size (rect->w, rect->h), true));
644
645 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
646         /* Start of the first line in the subtitle */
647         uint8_t* sub_p = rect->pict.data[0];
648         /* sub_p looks up into a BGRA palette which is at rect->pict.data[1];
649            (i.e. first byte B, second G, third R, fourth A)
650         */
651         uint8_t const * palette = rect->pict.data[1];
652 #else
653         /* Start of the first line in the subtitle */
654         uint8_t* sub_p = rect->data[0];
655         /* sub_p looks up into a BGRA palette which is at rect->data[1].
656            (first byte B, second G, third R, fourth A)
657         */
658         uint8_t const * palette = rect->data[1];
659 #endif
660         /* And the stream has a map of those palette colours to colours
661            chosen by the user; created a `mapped' palette from those settings.
662         */
663         map<RGBA, RGBA> colour_map = ffmpeg_content()->subtitle_stream()->colours ();
664         vector<RGBA> mapped_palette (rect->nb_colors);
665         for (int i = 0; i < rect->nb_colors; ++i) {
666                 RGBA c (palette[2], palette[1], palette[0], palette[3]);
667                 map<RGBA, RGBA>::const_iterator j = colour_map.find (c);
668                 if (j != colour_map.end ()) {
669                         mapped_palette[i] = j->second;
670                 } else {
671                         /* This colour was not found in the FFmpegSubtitleStream's colour map; probably because
672                            it is from a project that was created before this stuff was added.  Just use the
673                            colour straight from the original palette.
674                         */
675                         mapped_palette[i] = c;
676                 }
677                 palette += 4;
678         }
679
680         /* Start of the output data */
681         uint8_t* out_p = image->data()[0];
682
683         for (int y = 0; y < rect->h; ++y) {
684                 uint8_t* sub_line_p = sub_p;
685                 uint8_t* out_line_p = out_p;
686                 for (int x = 0; x < rect->w; ++x) {
687                         RGBA const p = mapped_palette[*sub_line_p++];
688                         *out_line_p++ = p.b;
689                         *out_line_p++ = p.g;
690                         *out_line_p++ = p.r;
691                         *out_line_p++ = p.a;
692                 }
693 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
694                 sub_p += rect->pict.linesize[0];
695 #else
696                 sub_p += rect->linesize[0];
697 #endif
698                 out_p += image->stride()[0];
699         }
700
701         int target_width = subtitle_codec_context()->width;
702         if (target_width == 0 && video_codec_context()) {
703                 /* subtitle_codec_context()->width == 0 has been seen in the wild but I don't
704                    know if it's supposed to mean something from FFmpeg's point of view.
705                 */
706                 target_width = video_codec_context()->width;
707         }
708         int target_height = subtitle_codec_context()->height;
709         if (target_height == 0 && video_codec_context()) {
710                 target_height = video_codec_context()->height;
711         }
712         DCPOMATIC_ASSERT (target_width);
713         DCPOMATIC_ASSERT (target_height);
714         dcpomatic::Rect<double> const scaled_rect (
715                 static_cast<double> (rect->x) / target_width,
716                 static_cast<double> (rect->y) / target_height,
717                 static_cast<double> (rect->w) / target_width,
718                 static_cast<double> (rect->h) / target_height
719                 );
720
721         only_text()->emit_bitmap_start (from, image, scaled_rect);
722 }
723
724 void
725 FFmpegDecoder::decode_ass_subtitle (string ass, ContentTime from)
726 {
727         /* We have no styles and no Format: line, so I'm assuming that FFmpeg
728            produces a single format of Dialogue: lines...
729         */
730
731         int commas = 0;
732         string text;
733         for (size_t i = 0; i < ass.length(); ++i) {
734                 if (commas < 9 && ass[i] == ',') {
735                         ++commas;
736                 } else if (commas == 9) {
737                         text += ass[i];
738                 }
739         }
740
741         if (text.empty ()) {
742                 return;
743         }
744
745         sub::RawSubtitle base;
746         list<sub::RawSubtitle> raw = sub::SSAReader::parse_line (
747                 base,
748                 text,
749                 _ffmpeg_content->video->size().width,
750                 _ffmpeg_content->video->size().height
751                 );
752
753         BOOST_FOREACH (sub::Subtitle const & i, sub::collect<list<sub::Subtitle> > (raw)) {
754                 only_text()->emit_plain_start (from, i);
755         }
756 }